From 43040a4051ac6cfc13ad905c550a03d582b80633 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 14:39:19 +0100 Subject: [PATCH 001/147] Bump ruff from 0.6.8 to 0.6.9 (#17794) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 40 ++++++++++++++++++++-------------------- pyproject.toml | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/poetry.lock b/poetry.lock index bf30fbbe15c6..79bf395401e1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2277,29 +2277,29 @@ files = [ [[package]] name = "ruff" -version = "0.6.8" +version = "0.6.9" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.8-py3-none-linux_armv6l.whl", hash = "sha256:77944bca110ff0a43b768f05a529fecd0706aac7bcce36d7f1eeb4cbfca5f0f2"}, - {file = "ruff-0.6.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:27b87e1801e786cd6ede4ada3faa5e254ce774de835e6723fd94551464c56b8c"}, - {file = "ruff-0.6.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:cd48f945da2a6334f1793d7f701725a76ba93bf3d73c36f6b21fb04d5338dcf5"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:677e03c00f37c66cea033274295a983c7c546edea5043d0c798833adf4cf4c6f"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9f1476236b3eacfacfc0f66aa9e6cd39f2a624cb73ea99189556015f27c0bdeb"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f5a2f17c7d32991169195d52a04c95b256378bbf0de8cb98478351eb70d526f"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5fd0d4b7b1457c49e435ee1e437900ced9b35cb8dc5178921dfb7d98d65a08d0"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8034b19b993e9601f2ddf2c517451e17a6ab5cdb1c13fdff50c1442a7171d87"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6cfb227b932ba8ef6e56c9f875d987973cd5e35bc5d05f5abf045af78ad8e098"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef0411eccfc3909269fed47c61ffebdcb84a04504bafa6b6df9b85c27e813b0"}, - {file = "ruff-0.6.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:007dee844738c3d2e6c24ab5bc7d43c99ba3e1943bd2d95d598582e9c1b27750"}, - {file = "ruff-0.6.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ce60058d3cdd8490e5e5471ef086b3f1e90ab872b548814e35930e21d848c9ce"}, - {file = "ruff-0.6.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:1085c455d1b3fdb8021ad534379c60353b81ba079712bce7a900e834859182fa"}, - {file = "ruff-0.6.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:70edf6a93b19481affd287d696d9e311388d808671bc209fb8907b46a8c3af44"}, - {file = "ruff-0.6.8-py3-none-win32.whl", hash = "sha256:792213f7be25316f9b46b854df80a77e0da87ec66691e8f012f887b4a671ab5a"}, - {file = "ruff-0.6.8-py3-none-win_amd64.whl", hash = "sha256:ec0517dc0f37cad14a5319ba7bba6e7e339d03fbf967a6d69b0907d61be7a263"}, - {file = "ruff-0.6.8-py3-none-win_arm64.whl", hash = "sha256:8d3bb2e3fbb9875172119021a13eed38849e762499e3cfde9588e4b4d70968dc"}, - {file = "ruff-0.6.8.tar.gz", hash = "sha256:a5bf44b1aa0adaf6d9d20f86162b34f7c593bfedabc51239953e446aefc8ce18"}, + {file = "ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd"}, + {file = "ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec"}, + {file = "ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f"}, + {file = "ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625"}, + {file = "ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039"}, + {file = "ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d"}, + {file = "ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117"}, + {file = "ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93"}, + {file = "ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2"}, ] [[package]] @@ -3114,4 +3114,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "304d03b74d2886def69ae44ce5afaed21318db9f09aae91281e0f182e1660ffd" +content-hash = "c8a22f901970b2f851151e731532757fd3acf7ba02930952636d2e6c5c9c0c90" diff --git a/pyproject.toml b/pyproject.toml index 0246407f183b..0e4f4c840676 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -320,7 +320,7 @@ all = [ # failing on new releases. Keeping lower bounds loose here means that dependabot # can bump versions without having to update the content-hash in the lockfile. # This helps prevents merge conflicts when running a batch of dependabot updates. -ruff = "0.6.8" +ruff = "0.6.9" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" From 475e192cbef9a3ea7bc19eaa10de84322e7813a1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 14:40:12 +0100 Subject: [PATCH 002/147] Bump tomli from 2.0.1 to 2.0.2 (#17796) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index 79bf395401e1..929248ff8d3e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2535,13 +2535,13 @@ twisted = ["twisted"] [[package]] name = "tomli" -version = "2.0.1" +version = "2.0.2" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, + {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, + {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, ] [[package]] From 165f4ca776239e1ed88a28b547345cd129a1d8ce Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 14:41:03 +0100 Subject: [PATCH 003/147] Bump sentry-sdk from 2.14.0 to 2.15.0 (#17795) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 929248ff8d3e..4f051694659e 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2334,13 +2334,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.14.0" +version = "2.15.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.14.0-py2.py3-none-any.whl", hash = "sha256:b8bc3dc51d06590df1291b7519b85c75e2ced4f28d9ea655b6d54033503b5bf4"}, - {file = "sentry_sdk-2.14.0.tar.gz", hash = "sha256:1e0e2eaf6dad918c7d1e0edac868a7bf20017b177f242cefe2a6bcd47955961d"}, + {file = "sentry_sdk-2.15.0-py2.py3-none-any.whl", hash = "sha256:8fb0d1a4e1a640172f31502e4503543765a1fe8a9209779134a4ac52d4677303"}, + {file = "sentry_sdk-2.15.0.tar.gz", hash = "sha256:a599e7d3400787d6f43327b973e55a087b931ba2c592a7a7afa691f8eb5e75e2"}, ] [package.dependencies] From 1bb528ee440c5f5c46c96a493c4a8a8a790a00f2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 14:41:27 +0100 Subject: [PATCH 004/147] Bump phonenumbers from 8.13.46 to 8.13.47 (#17797) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 4f051694659e..caee5974c5d3 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1447,13 +1447,13 @@ dev = ["jinja2"] [[package]] name = "phonenumbers" -version = "8.13.46" +version = "8.13.47" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.46-py2.py3-none-any.whl", hash = "sha256:519422d407af066fdbf98e179ea2e214487060f26526d67871f817eefbbb2134"}, - {file = "phonenumbers-8.13.46.tar.gz", hash = "sha256:94bf18ba9725bb6868d29473b13f78ef01e2585c5cb561ec0200be7676e77452"}, + {file = "phonenumbers-8.13.47-py2.py3-none-any.whl", hash = "sha256:5d3c0142ef7055ca5551884352e3b6b93bfe002a0bc95b8eaba39b0e2184541b"}, + {file = "phonenumbers-8.13.47.tar.gz", hash = "sha256:53c5e7c6d431cafe4efdd44956078404ae9bc8b0eacc47be3105d3ccc88aaffa"}, ] [[package]] From f40641c29b36ac42f22d1976ec02c38d8602afc6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 14:56:15 +0100 Subject: [PATCH 005/147] Bump sigstore/cosign-installer from 3.6.0 to 3.7.0 (#17798) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1a97809a26b0..ebf866e3d5f8 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -30,7 +30,7 @@ jobs: run: docker buildx inspect - name: Install Cosign - uses: sigstore/cosign-installer@v3.6.0 + uses: sigstore/cosign-installer@v3.7.0 - name: Checkout repository uses: actions/checkout@v4 From bdcc9fa388262ece29f06973d601a99d3fee7be7 Mon Sep 17 00:00:00 2001 From: Andrew Ferrazzutti Date: Tue, 8 Oct 2024 10:05:36 -0400 Subject: [PATCH 006/147] Fix incorrectly documented config path argument (#17802) --- changelog.d/17802.doc | 1 + docs/workers.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17802.doc diff --git a/changelog.d/17802.doc b/changelog.d/17802.doc new file mode 100644 index 000000000000..72e653d3c4a4 --- /dev/null +++ b/changelog.d/17802.doc @@ -0,0 +1 @@ +Correct documentation to refer to the `--config-path` argument instead of `--config-file`. diff --git a/docs/workers.md b/docs/workers.md index 51b22fef9bfe..0116c455bc72 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -177,11 +177,11 @@ The following applies to Synapse installations that have been installed from sou You can start the main Synapse process with Poetry by running the following command: ```console -poetry run synapse_homeserver --config-file [your homeserver.yaml] +poetry run synapse_homeserver --config-path [your homeserver.yaml] ``` For worker setups, you can run the following command ```console -poetry run synapse_worker --config-file [your homeserver.yaml] --config-file [your worker.yaml] +poetry run synapse_worker --config-path [your homeserver.yaml] --config-path [your worker.yaml] ``` ## Available worker applications From 60aebdb27edfe00f43b7e95bc60a730408bc3084 Mon Sep 17 00:00:00 2001 From: Martin Weinelt Date: Tue, 8 Oct 2024 19:32:25 +0200 Subject: [PATCH 007/147] Fix saving of non-RGB thumbnails as PNG (#17736) --- changelog.d/17736.bugfix | 1 + synapse/media/thumbnailer.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17736.bugfix diff --git a/changelog.d/17736.bugfix b/changelog.d/17736.bugfix new file mode 100644 index 000000000000..0d3fd06962ce --- /dev/null +++ b/changelog.d/17736.bugfix @@ -0,0 +1 @@ +Fix saving of PNG thumbnails, when the original image is in the CMYK color space. diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py index ee1118a53a22..3845067835a6 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -206,7 +206,7 @@ def crop(self, width: int, height: int, output_type: str) -> BytesIO: def _encode_image(self, output_image: Image.Image, output_type: str) -> BytesIO: output_bytes_io = BytesIO() fmt = self.FORMATS[output_type] - if fmt == "JPEG": + if fmt == "JPEG" or fmt == "PNG" and output_image.mode == "CMYK": output_image = output_image.convert("RGB") output_image.save(output_bytes_io, fmt, quality=80) return output_bytes_io From 05576f0b4b4a1ba5e756cbd1f0a064751f233d83 Mon Sep 17 00:00:00 2001 From: Nathan Date: Wed, 9 Oct 2024 14:21:08 +0200 Subject: [PATCH 008/147] Added display_name_claim in jwt_config which sets the user's display name upon registration (#17708) --- changelog.d/17708.feature | 1 + .../configuration/config_documentation.md | 3 +++ synapse/config/jwt.py | 2 ++ synapse/handlers/jwt.py | 16 +++++++++--- synapse/rest/client/login.py | 9 +++++-- tests/rest/client/test_login.py | 25 +++++++++++++++++++ 6 files changed, 50 insertions(+), 6 deletions(-) create mode 100644 changelog.d/17708.feature diff --git a/changelog.d/17708.feature b/changelog.d/17708.feature new file mode 100644 index 000000000000..90ec810f50da --- /dev/null +++ b/changelog.d/17708.feature @@ -0,0 +1 @@ +Added the `display_name_claim` option to the JWT configuration. This option allows specifying the claim key that contains the user's display name in the JWT payload. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 29f3528c7e11..1de2f688656f 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3722,6 +3722,8 @@ Additional sub-options for this setting include: Required if `enabled` is set to true. * `subject_claim`: Name of the claim containing a unique identifier for the user. Optional, defaults to `sub`. +* `display_name_claim`: Name of the claim containing the display name for the user. Optional. + If provided, the display name will be set to the value of this claim upon first login. * `issuer`: The issuer to validate the "iss" claim against. Optional. If provided the "iss" claim will be required and validated for all JSON web tokens. * `audiences`: A list of audiences to validate the "aud" claim against. Optional. @@ -3736,6 +3738,7 @@ jwt_config: secret: "provided-by-your-issuer" algorithm: "provided-by-your-issuer" subject_claim: "name_of_claim" + display_name_claim: "name_of_claim" issuer: "provided-by-your-issuer" audiences: - "provided-by-your-issuer" diff --git a/synapse/config/jwt.py b/synapse/config/jwt.py index b41f2dc08f34..5c76551f3340 100644 --- a/synapse/config/jwt.py +++ b/synapse/config/jwt.py @@ -38,6 +38,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.jwt_algorithm = jwt_config["algorithm"] self.jwt_subject_claim = jwt_config.get("subject_claim", "sub") + self.jwt_display_name_claim = jwt_config.get("display_name_claim") # The issuer and audiences are optional, if provided, it is asserted # that the claims exist on the JWT. @@ -49,5 +50,6 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: self.jwt_secret = None self.jwt_algorithm = None self.jwt_subject_claim = None + self.jwt_display_name_claim = None self.jwt_issuer = None self.jwt_audiences = None diff --git a/synapse/handlers/jwt.py b/synapse/handlers/jwt.py index 5fa7a305add2..400f3a59aa1c 100644 --- a/synapse/handlers/jwt.py +++ b/synapse/handlers/jwt.py @@ -18,7 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import TYPE_CHECKING +from typing import TYPE_CHECKING, Optional, Tuple from authlib.jose import JsonWebToken, JWTClaims from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError @@ -36,11 +36,12 @@ def __init__(self, hs: "HomeServer"): self.jwt_secret = hs.config.jwt.jwt_secret self.jwt_subject_claim = hs.config.jwt.jwt_subject_claim + self.jwt_display_name_claim = hs.config.jwt.jwt_display_name_claim self.jwt_algorithm = hs.config.jwt.jwt_algorithm self.jwt_issuer = hs.config.jwt.jwt_issuer self.jwt_audiences = hs.config.jwt.jwt_audiences - def validate_login(self, login_submission: JsonDict) -> str: + def validate_login(self, login_submission: JsonDict) -> Tuple[str, Optional[str]]: """ Authenticates the user for the /login API @@ -49,7 +50,8 @@ def validate_login(self, login_submission: JsonDict) -> str: (including 'type' and other relevant fields) Returns: - The user ID that is logging in. + A tuple of (user_id, display_name) of the user that is logging in. + If the JWT does not contain a display name, the second element of the tuple will be None. Raises: LoginError if there was an authentication problem. @@ -109,4 +111,10 @@ def validate_login(self, login_submission: JsonDict) -> str: if user is None: raise LoginError(403, "Invalid JWT", errcode=Codes.FORBIDDEN) - return UserID(user, self.hs.hostname).to_string() + default_display_name = None + if self.jwt_display_name_claim: + display_name_claim = claims.get(self.jwt_display_name_claim) + if display_name_claim is not None: + default_display_name = display_name_claim + + return UserID(user, self.hs.hostname).to_string(), default_display_name diff --git a/synapse/rest/client/login.py b/synapse/rest/client/login.py index 03b1e7edc496..3271b02d40e5 100644 --- a/synapse/rest/client/login.py +++ b/synapse/rest/client/login.py @@ -363,6 +363,7 @@ async def _complete_login( login_submission: JsonDict, callback: Optional[Callable[[LoginResponse], Awaitable[None]]] = None, create_non_existent_users: bool = False, + default_display_name: Optional[str] = None, ratelimit: bool = True, auth_provider_id: Optional[str] = None, should_issue_refresh_token: bool = False, @@ -410,7 +411,8 @@ async def _complete_login( canonical_uid = await self.auth_handler.check_user_exists(user_id) if not canonical_uid: canonical_uid = await self.registration_handler.register_user( - localpart=UserID.from_string(user_id).localpart + localpart=UserID.from_string(user_id).localpart, + default_display_name=default_display_name, ) user_id = canonical_uid @@ -546,11 +548,14 @@ async def _do_jwt_login( Returns: The body of the JSON response. """ - user_id = self.hs.get_jwt_handler().validate_login(login_submission) + user_id, default_display_name = self.hs.get_jwt_handler().validate_login( + login_submission + ) return await self._complete_login( user_id, login_submission, create_non_existent_users=True, + default_display_name=default_display_name, should_issue_refresh_token=should_issue_refresh_token, request_info=request_info, ) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index 2b1e44381b67..cbd6d8d4bf8c 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -1047,6 +1047,7 @@ class JWTTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, login.register_servlets, + profile.register_servlets, ] jwt_secret = "secret" @@ -1202,6 +1203,30 @@ def test_login_custom_sub(self) -> None: self.assertEqual(channel.code, 200, msg=channel.result) self.assertEqual(channel.json_body["user_id"], "@frog:test") + @override_config( + {"jwt_config": {**base_config, "display_name_claim": "display_name"}} + ) + def test_login_custom_display_name(self) -> None: + """Test setting a custom display name.""" + localpart = "pinkie" + user_id = f"@{localpart}:test" + display_name = "Pinkie Pie" + + # Perform the login, specifying a custom display name. + channel = self.jwt_login({"sub": localpart, "display_name": display_name}) + self.assertEqual(channel.code, 200, msg=channel.result) + self.assertEqual(channel.json_body["user_id"], user_id) + + # Fetch the user's display name and check that it was set correctly. + access_token = channel.json_body["access_token"] + channel = self.make_request( + "GET", + f"/_matrix/client/v3/profile/{user_id}/displayname", + access_token=access_token, + ) + self.assertEqual(channel.code, 200, msg=channel.result) + self.assertEqual(channel.json_body["displayname"], display_name) + def test_login_no_token(self) -> None: params = {"type": "org.matrix.login.jwt"} channel = self.make_request(b"POST", LOGIN_URL, params) From f6a3e5e1c235884b0e550bceba55cd9311def2d0 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 10 Oct 2024 09:59:01 +0100 Subject: [PATCH 009/147] Fix release script to check GH token (#17803) The current logic didn't work. --- changelog.d/17803.misc | 1 + scripts-dev/release.py | 36 +++++++++++++++++++++++------------- 2 files changed, 24 insertions(+), 13 deletions(-) create mode 100644 changelog.d/17803.misc diff --git a/changelog.d/17803.misc b/changelog.d/17803.misc new file mode 100644 index 000000000000..a267df8b83cd --- /dev/null +++ b/changelog.d/17803.misc @@ -0,0 +1 @@ +Test github token before running release script steps. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index 44356242679c..b14b61c7053f 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -40,7 +40,7 @@ import git from click.exceptions import ClickException from git import GitCommandError, Repo -from github import Github +from github import BadCredentialsException, Github from packaging import version @@ -323,10 +323,8 @@ def tag(gh_token: Optional[str]) -> None: def _tag(gh_token: Optional[str]) -> None: """Tags the release and generates a draft GitHub release""" - if gh_token: - # Test that the GH Token is valid before continuing. - gh = Github(gh_token) - gh.get_user() + # Test that the GH Token is valid before continuing. + check_valid_gh_token(gh_token) # Make sure we're in a git repo. repo = get_repo_and_check_clean_checkout() @@ -469,10 +467,8 @@ def upload(gh_token: Optional[str]) -> None: def _upload(gh_token: Optional[str]) -> None: """Upload release to pypi.""" - if gh_token: - # Test that the GH Token is valid before continuing. - gh = Github(gh_token) - gh.get_user() + # Test that the GH Token is valid before continuing. + check_valid_gh_token(gh_token) current_version = get_package_version() tag_name = f"v{current_version}" @@ -569,10 +565,8 @@ def wait_for_actions(gh_token: Optional[str]) -> None: def _wait_for_actions(gh_token: Optional[str]) -> None: - if gh_token: - # Test that the GH Token is valid before continuing. - gh = Github(gh_token) - gh.get_user() + # Test that the GH Token is valid before continuing. + check_valid_gh_token(gh_token) # Find out the version and tag name. current_version = get_package_version() @@ -806,6 +800,22 @@ def get_repo_and_check_clean_checkout( return repo +def check_valid_gh_token(gh_token: Optional[str]) -> None: + """Check that a github token is valid, if supplied""" + + if not gh_token: + # No github token supplied, so nothing to do. + return + + try: + gh = Github(gh_token) + + # We need to lookup name to trigger a request. + _name = gh.get_user().name + except BadCredentialsException as e: + raise click.ClickException(f"Github credentials are bad: {e}") + + def find_ref(repo: git.Repo, ref_name: str) -> Optional[git.HEAD]: """Find the branch/ref, looking first locally then in the remote.""" if ref_name in repo.references: From 451a9dc7b9284a5d0988318970e15ad41b34491f Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 14 Oct 2024 11:31:49 +0100 Subject: [PATCH 010/147] Clarify when 3PID invite module callbacks are called (#17627) Co-authored-by: Eric Eastwood --- changelog.d/17627.doc | 1 + docs/modules/spam_checker_callbacks.md | 9 ++++++--- 2 files changed, 7 insertions(+), 3 deletions(-) create mode 100644 changelog.d/17627.doc diff --git a/changelog.d/17627.doc b/changelog.d/17627.doc new file mode 100644 index 000000000000..487a0aea0dac --- /dev/null +++ b/changelog.d/17627.doc @@ -0,0 +1 @@ +Clarify when the `user_may_invite` and `user_may_send_3pid_invite` module callbacks are called. \ No newline at end of file diff --git a/docs/modules/spam_checker_callbacks.md b/docs/modules/spam_checker_callbacks.md index ffdfe6082e1b..ec306d81abf3 100644 --- a/docs/modules/spam_checker_callbacks.md +++ b/docs/modules/spam_checker_callbacks.md @@ -76,8 +76,9 @@ _Changed in Synapse v1.62.0: `synapse.module_api.NOT_SPAM` and `synapse.module_a async def user_may_invite(inviter: str, invitee: str, room_id: str) -> Union["synapse.module_api.NOT_SPAM", "synapse.module_api.errors.Codes", bool] ``` -Called when processing an invitation. Both inviter and invitee are -represented by their Matrix user ID (e.g. `@alice:example.com`). +Called when processing an invitation, both when one is created locally or when +receiving an invite over federation. Both inviter and invitee are represented by +their Matrix user ID (e.g. `@alice:example.com`). The callback must return one of: @@ -112,7 +113,9 @@ async def user_may_send_3pid_invite( ``` Called when processing an invitation using a third-party identifier (also called a 3PID, -e.g. an email address or a phone number). +e.g. an email address or a phone number). It is only called when a 3PID invite is created +locally - not when one is received in a room over federation. If the 3PID is already associated +with a Matrix ID, the spam check will go through the `user_may_invite` callback instead. The inviter is represented by their Matrix user ID (e.g. `@alice:example.com`), and the invitee is represented by its medium (e.g. "email") and its address From 24975eca4dc32b2e8158256faccf30c8b1346a5e Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 14 Oct 2024 11:34:33 +0100 Subject: [PATCH 011/147] Build debian packages for new Ubuntu versions (#17824) c.f. https://wiki.ubuntu.com/Releases for the currently supported Ubuntu releases. Note: this removes support for 23.04 and 23.10, which are EOL. Fixes #17811 --- changelog.d/17824.misc | 1 + scripts-dev/build_debian_packages.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17824.misc diff --git a/changelog.d/17824.misc b/changelog.d/17824.misc new file mode 100644 index 000000000000..22574f00ec39 --- /dev/null +++ b/changelog.d/17824.misc @@ -0,0 +1 @@ +Build debian packages for new Ubuntu versions, and stop building for no longer supported versions. diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index de2a1345448c..88c84194009c 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -32,8 +32,8 @@ "debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24) "ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14) "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04) - "ubuntu:lunar", # 23.04 (EOL 2024-01) (our EOL forced by Python 3.11 is 2027-10-24) - "ubuntu:mantic", # 23.10 (EOL 2024-07) (our EOL forced by Python 3.11 is 2027-10-24) + "ubuntu:noble", # 24.04 LTS (EOL 2029-06) + "ubuntu:oracular", # 24.10 (EOL 2025-07) "debian:trixie", # (EOL not specified yet) ) From 1266138b66b44f1d322f79c5e727238bbbf1e919 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 13:26:12 +0100 Subject: [PATCH 012/147] Bump sentry-sdk from 2.15.0 to 2.16.0 (#17829) --- poetry.lock | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index caee5974c5d3..3ad449b35747 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2334,13 +2334,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.15.0" +version = "2.16.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.15.0-py2.py3-none-any.whl", hash = "sha256:8fb0d1a4e1a640172f31502e4503543765a1fe8a9209779134a4ac52d4677303"}, - {file = "sentry_sdk-2.15.0.tar.gz", hash = "sha256:a599e7d3400787d6f43327b973e55a087b931ba2c592a7a7afa691f8eb5e75e2"}, + {file = "sentry_sdk-2.16.0-py2.py3-none-any.whl", hash = "sha256:49139c31ebcd398f4f6396b18910610a0c1602f6e67083240c33019d1f6aa30c"}, + {file = "sentry_sdk-2.16.0.tar.gz", hash = "sha256:90f733b32e15dfc1999e6b7aca67a38688a567329de4d6e184154a73f96c6892"}, ] [package.dependencies] @@ -2363,6 +2363,7 @@ falcon = ["falcon (>=1.4)"] fastapi = ["fastapi (>=0.79.0)"] flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"] grpcio = ["grpcio (>=1.21.1)", "protobuf (>=3.8.0)"] +http2 = ["httpcore[http2] (==1.*)"] httpx = ["httpx (>=0.16.0)"] huey = ["huey (>=2)"] huggingface-hub = ["huggingface-hub (>=0.22)"] From 5dd6157972616b8764df96c32a54c6372acf86a5 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 13:26:23 +0100 Subject: [PATCH 013/147] Bump types-setuptools from 75.1.0.20240917 to 75.1.0.20241014 (#17828) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 3ad449b35747..d6132716b18b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2834,13 +2834,13 @@ urllib3 = ">=2" [[package]] name = "types-setuptools" -version = "75.1.0.20240917" +version = "75.1.0.20241014" description = "Typing stubs for setuptools" optional = false python-versions = ">=3.8" files = [ - {file = "types-setuptools-75.1.0.20240917.tar.gz", hash = "sha256:12f12a165e7ed383f31def705e5c0fa1c26215dd466b0af34bd042f7d5331f55"}, - {file = "types_setuptools-75.1.0.20240917-py3-none-any.whl", hash = "sha256:06f78307e68d1bbde6938072c57b81cf8a99bc84bd6dc7e4c5014730b097dc0c"}, + {file = "types-setuptools-75.1.0.20241014.tar.gz", hash = "sha256:29b0560a8d4b4a91174be085847002c69abfcb048e20b33fc663005aedf56804"}, + {file = "types_setuptools-75.1.0.20241014-py3-none-any.whl", hash = "sha256:caab58366741fb99673d0138b6e2d760717f154cfb981b74fea5e8de40f0b703"}, ] [[package]] From ae6179b3826ecd0770dc6b13388715e0344c8b70 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 14 Oct 2024 13:26:40 +0100 Subject: [PATCH 014/147] Bump mypy-zope from 1.0.5 to 1.0.7 (#17827) --- poetry.lock | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index d6132716b18b..9fd95ff44752 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1377,16 +1377,17 @@ files = [ [[package]] name = "mypy-zope" -version = "1.0.5" +version = "1.0.7" description = "Plugin for mypy to support zope interfaces" optional = false python-versions = "*" files = [ - {file = "mypy_zope-1.0.5.tar.gz", hash = "sha256:2440406d49c0e1199c1cd819c92a2c4957de65579c6abc8a081c927f4bdc8d49"}, + {file = "mypy_zope-1.0.7-py3-none-any.whl", hash = "sha256:f19de249574319d81083b15f8a022c6b15583582f23340a860922141f1b651ca"}, + {file = "mypy_zope-1.0.7.tar.gz", hash = "sha256:32a79ce78647c0bea61e7e0c0eb1233fcb97bb94e8950cca73f17d3419c602f7"}, ] [package.dependencies] -mypy = ">=1.0.0,<1.11.0" +mypy = ">=1.0.0,<1.12.0" "zope.interface" = "*" "zope.schema" = "*" From d025b5ab508020740501ac8cca0da2fd99e89cee Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 14 Oct 2024 13:31:22 +0100 Subject: [PATCH 015/147] Correctly changes to required state config in sliding sync (#17785) Fixes https://github.com/element-hq/synapse/issues/17698 This handles `required_state` changes by checking if new state has been added to the config, and if so fetching and returning that from the current state. This also takes care to ensure that given a state entry S that is added, removed and then re-added that we do *not* send S down a second time if there have been no changes to S in the current state. This is fine for Rust SDK (as it just remembers all state), but we might decide not to do this behaviour in the MSC. If we decide to always send down S then its easy enough to rip out all the code. --------- Co-authored-by: Eric Eastwood --- changelog.d/17785.bugfix | 1 + changelog.d/17805.bugfix | 1 + synapse/handlers/sliding_sync/__init__.py | 234 +++++- .../storage/databases/main/sliding_sync.py | 4 +- synapse/types/state.py | 7 + tests/handlers/test_sliding_sync.py | 694 +++++++++++++++++- .../sliding_sync/test_rooms_required_state.py | 261 +++++++ 7 files changed, 1188 insertions(+), 14 deletions(-) create mode 100644 changelog.d/17785.bugfix create mode 100644 changelog.d/17805.bugfix diff --git a/changelog.d/17785.bugfix b/changelog.d/17785.bugfix new file mode 100644 index 000000000000..df2898f54e36 --- /dev/null +++ b/changelog.d/17785.bugfix @@ -0,0 +1 @@ +Fix bug with sliding sync where the server would not return state that was added to the `required_state` config. diff --git a/changelog.d/17805.bugfix b/changelog.d/17805.bugfix new file mode 100644 index 000000000000..df2898f54e36 --- /dev/null +++ b/changelog.d/17805.bugfix @@ -0,0 +1 @@ +Fix bug with sliding sync where the server would not return state that was added to the `required_state` config. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 8c12cea8ebf1..39dba4ff988a 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -14,7 +14,7 @@ import logging from itertools import chain -from typing import TYPE_CHECKING, Dict, List, Mapping, Optional, Set, Tuple +from typing import TYPE_CHECKING, AbstractSet, Dict, List, Mapping, Optional, Set, Tuple from prometheus_client import Histogram from typing_extensions import assert_never @@ -522,6 +522,8 @@ async def get_room_sync_data( state_reset_out_of_room = True + prev_room_sync_config = previous_connection_state.room_configs.get(room_id) + # Determine whether we should limit the timeline to the token range. # # We should return historical messages (before token range) in the @@ -550,7 +552,6 @@ async def get_room_sync_data( # or `limited` mean for clients that interpret them correctly. In future this # behavior is almost certainly going to change. # - # TODO: Also handle changes to `required_state` from_bound = None initial = True ignore_timeline_bound = False @@ -571,7 +572,6 @@ async def get_room_sync_data( log_kv({"sliding_sync.room_status": room_status}) - prev_room_sync_config = previous_connection_state.room_configs.get(room_id) if prev_room_sync_config is not None: # Check if the timeline limit has increased, if so ignore the # timeline bound and record the change (see "XXX: Odd behavior" @@ -582,8 +582,6 @@ async def get_room_sync_data( ): ignore_timeline_bound = True - # TODO: Check for changes in `required_state`` - log_kv( { "sliding_sync.from_bound": from_bound, @@ -997,6 +995,10 @@ async def get_room_sync_data( include_others=required_state_filter.include_others, ) + # The required state map to store in the room sync config, if it has + # changed. + changed_required_state_map: Optional[Mapping[str, AbstractSet[str]]] = None + # We can return all of the state that was requested if this was the first # time we've sent the room down this connection. room_state: StateMap[EventBase] = {} @@ -1010,6 +1012,29 @@ async def get_room_sync_data( else: assert from_bound is not None + if prev_room_sync_config is not None: + # Check if there are any changes to the required state config + # that we need to handle. + changed_required_state_map, added_state_filter = ( + _required_state_changes( + user.to_string(), + previous_room_config=prev_room_sync_config, + room_sync_config=room_sync_config, + state_deltas=room_state_delta_id_map, + ) + ) + + if added_state_filter: + # Some state entries got added, so we pull out the current + # state for them. If we don't do this we'd only send down new deltas. + state_ids = await self.get_current_state_ids_at( + room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, + state_filter=added_state_filter, + to_token=to_token, + ) + room_state_delta_id_map.update(state_ids) + events = await self.store.get_events( state_filter.filter_state(room_state_delta_id_map).values() ) @@ -1108,10 +1133,13 @@ async def get_room_sync_data( # sensible order again. bump_stamp = 0 - unstable_expanded_timeline = False - prev_room_sync_config = previous_connection_state.room_configs.get(room_id) + room_sync_required_state_map_to_persist = room_sync_config.required_state_map + if changed_required_state_map: + room_sync_required_state_map_to_persist = changed_required_state_map + # Record the `room_sync_config` if we're `ignore_timeline_bound` (which means # that the `timeline_limit` has increased) + unstable_expanded_timeline = False if ignore_timeline_bound: # FIXME: We signal the fact that we're sending down more events to # the client by setting `unstable_expanded_timeline` to true (see @@ -1120,7 +1148,7 @@ async def get_room_sync_data( new_connection_state.room_configs[room_id] = RoomSyncConfig( timeline_limit=room_sync_config.timeline_limit, - required_state_map=room_sync_config.required_state_map, + required_state_map=room_sync_required_state_map_to_persist, ) elif prev_room_sync_config is not None: # If the result is `limited` then we need to record that the @@ -1149,10 +1177,14 @@ async def get_room_sync_data( ): new_connection_state.room_configs[room_id] = RoomSyncConfig( timeline_limit=room_sync_config.timeline_limit, - required_state_map=room_sync_config.required_state_map, + required_state_map=room_sync_required_state_map_to_persist, ) - # TODO: Record changes in required_state. + elif changed_required_state_map is not None: + new_connection_state.room_configs[room_id] = RoomSyncConfig( + timeline_limit=room_sync_config.timeline_limit, + required_state_map=room_sync_required_state_map_to_persist, + ) else: new_connection_state.room_configs[room_id] = room_sync_config @@ -1285,3 +1317,185 @@ async def _get_bump_stamp( return new_bump_event_pos.stream return None + + +def _required_state_changes( + user_id: str, + *, + previous_room_config: "RoomSyncConfig", + room_sync_config: RoomSyncConfig, + state_deltas: StateMap[str], +) -> Tuple[Optional[Mapping[str, AbstractSet[str]]], StateFilter]: + """Calculates the changes between the required state room config from the + previous requests compared with the current request. + + This does two things. First, it calculates if we need to update the room + config due to changes to required state. Secondly, it works out which state + entries we need to pull from current state and return due to the state entry + now appearing in the required state when it previously wasn't (on top of the + state deltas). + + This function tries to ensure to handle the case where a state entry is + added, removed and then added again to the required state. In that case we + only want to re-send that entry down sync if it has changed. + + Returns: + A 2-tuple of updated required state config (or None if there is no update) + and the state filter to use to fetch extra current state that we need to + return. + """ + + prev_required_state_map = previous_room_config.required_state_map + request_required_state_map = room_sync_config.required_state_map + + if prev_required_state_map == request_required_state_map: + # There has been no change. Return immediately. + return None, StateFilter.none() + + prev_wildcard = prev_required_state_map.get(StateValues.WILDCARD, set()) + request_wildcard = request_required_state_map.get(StateValues.WILDCARD, set()) + + # If we were previously fetching everything ("*", "*"), always update the effective + # room required state config to match the request. And since we we're previously + # already fetching everything, we don't have to fetch anything now that they've + # narrowed. + if StateValues.WILDCARD in prev_wildcard: + return request_required_state_map, StateFilter.none() + + # If a event type wildcard has been added or removed we don't try and do + # anything fancy, and instead always update the effective room required + # state config to match the request. + if request_wildcard - prev_wildcard: + # Some keys were added, so we need to fetch everything + return request_required_state_map, StateFilter.all() + if prev_wildcard - request_wildcard: + # Keys were only removed, so we don't have to fetch everything. + return request_required_state_map, StateFilter.none() + + # Contains updates to the required state map compared with the previous room + # config. This has the same format as `RoomSyncConfig.required_state` + changes: Dict[str, AbstractSet[str]] = {} + + # The set of types/state keys that we need to fetch and return to the + # client. Passed to `StateFilter.from_types(...)` + added: List[Tuple[str, Optional[str]]] = [] + + # First we calculate what, if anything, has been *added*. + for event_type in ( + prev_required_state_map.keys() | request_required_state_map.keys() + ): + old_state_keys = prev_required_state_map.get(event_type, set()) + request_state_keys = request_required_state_map.get(event_type, set()) + + if old_state_keys == request_state_keys: + # No change to this type + continue + + if not request_state_keys - old_state_keys: + # Nothing *added*, so we skip. Removals happen below. + continue + + # Always update changes to include the newly added keys + changes[event_type] = request_state_keys + + if StateValues.WILDCARD in old_state_keys: + # We were previously fetching everything for this type, so we don't need to + # fetch anything new. + continue + + # Record the new state keys to fetch for this type. + if StateValues.WILDCARD in request_state_keys: + # If we have added a wildcard then we always just fetch everything. + added.append((event_type, None)) + else: + for state_key in request_state_keys - old_state_keys: + if state_key == StateValues.ME: + added.append((event_type, user_id)) + elif state_key == StateValues.LAZY: + # We handle lazy loading separately (outside this function), + # so don't need to explicitly add anything here. + # + # LAZY values should also be ignore for event types that are + # not membership. + pass + else: + added.append((event_type, state_key)) + + added_state_filter = StateFilter.from_types(added) + + # Convert the list of state deltas to map from type to state_keys that have + # changed. + changed_types_to_state_keys: Dict[str, Set[str]] = {} + for event_type, state_key in state_deltas: + changed_types_to_state_keys.setdefault(event_type, set()).add(state_key) + + # Figure out what changes we need to apply to the effective required state + # config. + for event_type, changed_state_keys in changed_types_to_state_keys.items(): + old_state_keys = prev_required_state_map.get(event_type, set()) + request_state_keys = request_required_state_map.get(event_type, set()) + + if old_state_keys == request_state_keys: + # No change. + continue + + if request_state_keys - old_state_keys: + # We've expanded the set of state keys, so we just clobber the + # current set with the new set. + # + # We could also ensure that we keep entries where the state hasn't + # changed, but are no longer in the requested required state, but + # that's a sufficient edge case that we can ignore (as its only a + # performance optimization). + changes[event_type] = request_state_keys + continue + + old_state_key_wildcard = StateValues.WILDCARD in old_state_keys + request_state_key_wildcard = StateValues.WILDCARD in request_state_keys + + if old_state_key_wildcard != request_state_key_wildcard: + # If a state_key wildcard has been added or removed, we always update the + # effective room required state config to match the request. + changes[event_type] = request_state_keys + continue + + if event_type == EventTypes.Member: + old_state_key_lazy = StateValues.LAZY in old_state_keys + request_state_key_lazy = StateValues.LAZY in request_state_keys + + if old_state_key_lazy != request_state_key_lazy: + # If a "$LAZY" has been added or removed we always update the effective room + # required state config to match the request. + changes[event_type] = request_state_keys + continue + + # Handle "$ME" values by adding "$ME" if the state key matches the user + # ID. + if user_id in changed_state_keys: + changed_state_keys.add(StateValues.ME) + + # At this point there are no wildcards and no additions to the set of + # state keys requested, only deletions. + # + # We only remove state keys from the effective state if they've been + # removed from the request *and* the state has changed. This ensures + # that if a client removes and then re-adds a state key, we only send + # down the associated current state event if its changed (rather than + # sending down the same event twice). + invalidated = (old_state_keys - request_state_keys) & changed_state_keys + if invalidated: + changes[event_type] = old_state_keys - invalidated + + if changes: + # Update the required state config based on the changes. + new_required_state_map = dict(prev_required_state_map) + for event_type, state_keys in changes.items(): + if state_keys: + new_required_state_map[event_type] = state_keys + else: + # Remove entries with empty state keys. + new_required_state_map.pop(event_type, None) + + return new_required_state_map, added_state_filter + else: + return None, added_state_filter diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py index f2df37fec15c..7b357c1ffea4 100644 --- a/synapse/storage/databases/main/sliding_sync.py +++ b/synapse/storage/databases/main/sliding_sync.py @@ -386,8 +386,8 @@ def _get_and_clear_connection_positions_txn( required_state_map: Dict[int, Dict[str, Set[str]]] = {} for row in rows: state = required_state_map[row[0]] = {} - for event_type, state_keys in db_to_json(row[1]): - state[event_type] = set(state_keys) + for event_type, state_key in db_to_json(row[1]): + state.setdefault(event_type, set()).add(state_key) # Get all the room configs, looking up the required state from the map # above. diff --git a/synapse/types/state.py b/synapse/types/state.py index 1141c4b5c1f5..67d1c3fe9722 100644 --- a/synapse/types/state.py +++ b/synapse/types/state.py @@ -616,6 +616,13 @@ def __contains__(self, key: Any) -> bool: return False + def __bool__(self) -> bool: + """Returns true if this state filter will match any state, or false if + this is the empty filter""" + if self.include_others: + return True + return bool(self.types) + _ALL_STATE_FILTER = StateFilter(types=immutabledict(), include_others=True) _ALL_NON_MEMBER_STATE_FILTER = StateFilter( diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index e2c7a94ce29e..9a68d1dd958c 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -18,9 +18,10 @@ # # import logging -from typing import AbstractSet, Dict, Optional, Tuple +from typing import AbstractSet, Dict, Mapping, Optional, Set, Tuple from unittest.mock import patch +import attr from parameterized import parameterized from twisted.test.proto_helpers import MemoryReactor @@ -35,15 +36,18 @@ RoomsForUserType, RoomSyncConfig, StateValues, + _required_state_changes, ) from synapse.rest import admin from synapse.rest.client import knock, login, room from synapse.server import HomeServer from synapse.storage.util.id_generators import MultiWriterIdGenerator -from synapse.types import JsonDict, StreamToken, UserID +from synapse.types import JsonDict, StateMap, StreamToken, UserID from synapse.types.handlers.sliding_sync import SlidingSyncConfig +from synapse.types.state import StateFilter from synapse.util import Clock +from tests import unittest from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.unittest import HomeserverTestCase, TestCase @@ -3213,3 +3217,689 @@ def test_default_bump_event_types(self) -> None: # We only care about the *latest* event in the room. [room_id1, room_id2], ) + + +@attr.s(slots=True, auto_attribs=True, frozen=True) +class RequiredStateChangesTestParameters: + previous_required_state_map: Dict[str, Set[str]] + request_required_state_map: Dict[str, Set[str]] + state_deltas: StateMap[str] + expected_with_state_deltas: Tuple[ + Optional[Mapping[str, AbstractSet[str]]], StateFilter + ] + expected_without_state_deltas: Tuple[ + Optional[Mapping[str, AbstractSet[str]]], StateFilter + ] + + +class RequiredStateChangesTestCase(unittest.TestCase): + """Test cases for `_required_state_changes`""" + + @parameterized.expand( + [ + ( + "simple_no_change", + """Test no change to required state""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {"state_key"}}, + request_required_state_map={"type1": {"state_key"}}, + state_deltas={("type1", "state_key"): "$event_id"}, + # No changes + expected_with_state_deltas=(None, StateFilter.none()), + expected_without_state_deltas=(None, StateFilter.none()), + ), + ), + ( + "simple_add_type", + """Test adding a type to the config""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {"state_key"}}, + request_required_state_map={ + "type1": {"state_key"}, + "type2": {"state_key"}, + }, + state_deltas={("type2", "state_key"): "$event_id"}, + expected_with_state_deltas=( + # We've added a type so we should persist the changed required state + # config. + {"type1": {"state_key"}, "type2": {"state_key"}}, + # We should see the new type added + StateFilter.from_types([("type2", "state_key")]), + ), + expected_without_state_deltas=( + {"type1": {"state_key"}, "type2": {"state_key"}}, + StateFilter.from_types([("type2", "state_key")]), + ), + ), + ), + ( + "simple_add_type_from_nothing", + """Test adding a type to the config when previously requesting nothing""", + RequiredStateChangesTestParameters( + previous_required_state_map={}, + request_required_state_map={ + "type1": {"state_key"}, + "type2": {"state_key"}, + }, + state_deltas={("type2", "state_key"): "$event_id"}, + expected_with_state_deltas=( + # We've added a type so we should persist the changed required state + # config. + {"type1": {"state_key"}, "type2": {"state_key"}}, + # We should see the new types added + StateFilter.from_types( + [("type1", "state_key"), ("type2", "state_key")] + ), + ), + expected_without_state_deltas=( + {"type1": {"state_key"}, "type2": {"state_key"}}, + StateFilter.from_types( + [("type1", "state_key"), ("type2", "state_key")] + ), + ), + ), + ), + ( + "simple_add_state_key", + """Test adding a state key to the config""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type": {"state_key1"}}, + request_required_state_map={"type": {"state_key1", "state_key2"}}, + state_deltas={("type", "state_key2"): "$event_id"}, + expected_with_state_deltas=( + # We've added a key so we should persist the changed required state + # config. + {"type": {"state_key1", "state_key2"}}, + # We should see the new state_keys added + StateFilter.from_types([("type", "state_key2")]), + ), + expected_without_state_deltas=( + {"type": {"state_key1", "state_key2"}}, + StateFilter.from_types([("type", "state_key2")]), + ), + ), + ), + ( + "simple_remove_type", + """ + Test removing a type from the config when there are a matching state + delta does cause the persisted required state config to change + + Test removing a type from the config when there are no matching state + deltas does *not* cause the persisted required state config to change + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key"}, + "type2": {"state_key"}, + }, + request_required_state_map={"type1": {"state_key"}}, + state_deltas={("type2", "state_key"): "$event_id"}, + expected_with_state_deltas=( + # Remove `type2` since there's been a change to that state, + # (persist the change to required state). That way next time, + # they request `type2`, we see that we haven't sent it before + # and send the new state. (we should still keep track that we've + # sent `type1` before). + {"type1": {"state_key"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `type2` is no longer requested but since that state hasn't + # changed, nothing should change (we should still keep track + # that we've sent `type2` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "simple_remove_type_to_nothing", + """ + Test removing a type from the config and no longer requesting any state + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key"}, + "type2": {"state_key"}, + }, + request_required_state_map={}, + state_deltas={("type2", "state_key"): "$event_id"}, + expected_with_state_deltas=( + # Remove `type2` since there's been a change to that state, + # (persist the change to required state). That way next time, + # they request `type2`, we see that we haven't sent it before + # and send the new state. (we should still keep track that we've + # sent `type1` before). + {"type1": {"state_key"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `type2` is no longer requested but since that state hasn't + # changed, nothing should change (we should still keep track + # that we've sent `type2` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "simple_remove_state_key", + """ + Test removing a state_key from the config + """, + RequiredStateChangesTestParameters( + previous_required_state_map={"type": {"state_key1", "state_key2"}}, + request_required_state_map={"type": {"state_key1"}}, + state_deltas={("type", "state_key2"): "$event_id"}, + expected_with_state_deltas=( + # Remove `(type, state_key2)` since there's been a change + # to that state (persist the change to required state). + # That way next time, they request `(type, state_key2)`, we see + # that we haven't sent it before and send the new state. (we + # should still keep track that we've sent `(type, state_key1)` + # before). + {"type": {"state_key1"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `(type, state_key2)` is no longer requested but since that + # state hasn't changed, nothing should change (we should still + # keep track that we've sent `(type, state_key1)` and `(type, + # state_key2)` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "type_wildcards_add", + """ + Test adding a wildcard type causes the persisted required state config + to change and we request everything. + + If a event type wildcard has been added or removed we don't try and do + anything fancy, and instead always update the effective room required + state config to match the request. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {"state_key2"}}, + request_required_state_map={ + "type1": {"state_key2"}, + StateValues.WILDCARD: {"state_key"}, + }, + state_deltas={ + ("other_type", "state_key"): "$event_id", + }, + # We've added a wildcard, so we persist the change and request everything + expected_with_state_deltas=( + {"type1": {"state_key2"}, StateValues.WILDCARD: {"state_key"}}, + StateFilter.all(), + ), + expected_without_state_deltas=( + {"type1": {"state_key2"}, StateValues.WILDCARD: {"state_key"}}, + StateFilter.all(), + ), + ), + ), + ( + "type_wildcards_remove", + """ + Test removing a wildcard type causes the persisted required state config + to change and request nothing. + + If a event type wildcard has been added or removed we don't try and do + anything fancy, and instead always update the effective room required + state config to match the request. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key2"}, + StateValues.WILDCARD: {"state_key"}, + }, + request_required_state_map={"type1": {"state_key2"}}, + state_deltas={ + ("other_type", "state_key"): "$event_id", + }, + # We've removed a type wildcard, so we persist the change but don't request anything + expected_with_state_deltas=( + {"type1": {"state_key2"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + {"type1": {"state_key2"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_wildcards_add", + """Test adding a wildcard state_key""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {"state_key"}}, + request_required_state_map={ + "type1": {"state_key"}, + "type2": {StateValues.WILDCARD}, + }, + state_deltas={("type2", "state_key"): "$event_id"}, + # We've added a wildcard state_key, so we persist the change and + # request all of the state for that type + expected_with_state_deltas=( + {"type1": {"state_key"}, "type2": {StateValues.WILDCARD}}, + StateFilter.from_types([("type2", None)]), + ), + expected_without_state_deltas=( + {"type1": {"state_key"}, "type2": {StateValues.WILDCARD}}, + StateFilter.from_types([("type2", None)]), + ), + ), + ), + ( + "state_key_wildcards_remove", + """Test removing a wildcard state_key""", + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key"}, + "type2": {StateValues.WILDCARD}, + }, + request_required_state_map={"type1": {"state_key"}}, + state_deltas={("type2", "state_key"): "$event_id"}, + # We've removed a state_key wildcard, so we persist the change and + # request nothing + expected_with_state_deltas=( + {"type1": {"state_key"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + # We've removed a state_key wildcard but there have been no matching + # state changes, so no changes needed, just persist the + # `request_required_state_map` as-is. + expected_without_state_deltas=( + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_remove_some", + """ + Test that removing state keys work when only some of the state keys have + changed + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key1", "state_key2", "state_key3"} + }, + request_required_state_map={"type1": {"state_key1"}}, + state_deltas={("type1", "state_key3"): "$event_id"}, + expected_with_state_deltas=( + # We've removed some state keys from the type, but only state_key3 was + # changed so only that one should be removed. + {"type1": {"state_key1", "state_key2"}}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # No changes needed, just persist the + # `request_required_state_map` as-is + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_me_add", + """ + Test adding state keys work when using "$ME" + """, + RequiredStateChangesTestParameters( + previous_required_state_map={}, + request_required_state_map={"type1": {StateValues.ME}}, + state_deltas={("type1", "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # We've added a type so we should persist the changed required state + # config. + {"type1": {StateValues.ME}}, + # We should see the new state_keys added + StateFilter.from_types([("type1", "@user:test")]), + ), + expected_without_state_deltas=( + {"type1": {StateValues.ME}}, + StateFilter.from_types([("type1", "@user:test")]), + ), + ), + ), + ( + "state_key_me_remove", + """ + Test removing state keys work when using "$ME" + """, + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {StateValues.ME}}, + request_required_state_map={}, + state_deltas={("type1", "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # Remove `type1` since there's been a change to that state, + # (persist the change to required state). That way next time, + # they request `type1`, we see that we haven't sent it before + # and send the new state. (if we were tracking that we sent any + # other state, we should still keep track that). + {}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `type1` is no longer requested but since that state hasn't + # changed, nothing should change (we should still keep track + # that we've sent `type1` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_user_id_add", + """ + Test adding state keys work when using your own user ID + """, + RequiredStateChangesTestParameters( + previous_required_state_map={}, + request_required_state_map={"type1": {"@user:test"}}, + state_deltas={("type1", "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # We've added a type so we should persist the changed required state + # config. + {"type1": {"@user:test"}}, + # We should see the new state_keys added + StateFilter.from_types([("type1", "@user:test")]), + ), + expected_without_state_deltas=( + {"type1": {"@user:test"}}, + StateFilter.from_types([("type1", "@user:test")]), + ), + ), + ), + ( + "state_key_me_remove", + """ + Test removing state keys work when using your own user ID + """, + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {"@user:test"}}, + request_required_state_map={}, + state_deltas={("type1", "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # Remove `type1` since there's been a change to that state, + # (persist the change to required state). That way next time, + # they request `type1`, we see that we haven't sent it before + # and send the new state. (if we were tracking that we sent any + # other state, we should still keep track that). + {}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `type1` is no longer requested but since that state hasn't + # changed, nothing should change (we should still keep track + # that we've sent `type1` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_lazy_add", + """ + Test adding state keys work when using "$LAZY" + """, + RequiredStateChangesTestParameters( + previous_required_state_map={}, + request_required_state_map={EventTypes.Member: {StateValues.LAZY}}, + state_deltas={(EventTypes.Member, "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # If a "$LAZY" has been added or removed we always update the + # required state to what was requested for simplicity. + {EventTypes.Member: {StateValues.LAZY}}, + StateFilter.none(), + ), + expected_without_state_deltas=( + {EventTypes.Member: {StateValues.LAZY}}, + StateFilter.none(), + ), + ), + ), + ( + "state_key_lazy_remove", + """ + Test removing state keys work when using "$LAZY" + """, + RequiredStateChangesTestParameters( + previous_required_state_map={EventTypes.Member: {StateValues.LAZY}}, + request_required_state_map={}, + state_deltas={(EventTypes.Member, "@user:test"): "$event_id"}, + expected_with_state_deltas=( + # If a "$LAZY" has been added or removed we always update the + # required state to what was requested for simplicity. + {}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `EventTypes.Member` is no longer requested but since that + # state hasn't changed, nothing should change (we should still + # keep track that we've sent `EventTypes.Member` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "type_wildcard_with_state_key_wildcard_to_explicit_state_keys", + """ + Test switching from a wildcard ("*", "*") to explicit state keys + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + StateValues.WILDCARD: {StateValues.WILDCARD} + }, + request_required_state_map={ + StateValues.WILDCARD: {"state_key1", "state_key2", "state_key3"} + }, + state_deltas={("type1", "state_key1"): "$event_id"}, + # If we were previously fetching everything ("*", "*"), always update the effective + # room required state config to match the request. And since we we're previously + # already fetching everything, we don't have to fetch anything now that they've + # narrowed. + expected_with_state_deltas=( + { + StateValues.WILDCARD: { + "state_key1", + "state_key2", + "state_key3", + } + }, + StateFilter.none(), + ), + expected_without_state_deltas=( + { + StateValues.WILDCARD: { + "state_key1", + "state_key2", + "state_key3", + } + }, + StateFilter.none(), + ), + ), + ), + ( + "type_wildcard_with_explicit_state_keys_to_wildcard_state_key", + """ + Test switching from explicit to wildcard state keys ("*", "*") + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + StateValues.WILDCARD: {"state_key1", "state_key2", "state_key3"} + }, + request_required_state_map={ + StateValues.WILDCARD: {StateValues.WILDCARD} + }, + state_deltas={("type1", "state_key1"): "$event_id"}, + # We've added a wildcard, so we persist the change and request everything + expected_with_state_deltas=( + {StateValues.WILDCARD: {StateValues.WILDCARD}}, + StateFilter.all(), + ), + expected_without_state_deltas=( + {StateValues.WILDCARD: {StateValues.WILDCARD}}, + StateFilter.all(), + ), + ), + ), + ( + "state_key_wildcard_to_explicit_state_keys", + """Test switching from a wildcard to explicit state keys with a concrete type""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type1": {StateValues.WILDCARD}}, + request_required_state_map={ + "type1": {"state_key1", "state_key2", "state_key3"} + }, + state_deltas={("type1", "state_key1"): "$event_id"}, + # If a state_key wildcard has been added or removed, we always + # update the effective room required state config to match the + # request. And since we we're previously already fetching + # everything, we don't have to fetch anything now that they've + # narrowed. + expected_with_state_deltas=( + { + "type1": { + "state_key1", + "state_key2", + "state_key3", + } + }, + StateFilter.none(), + ), + expected_without_state_deltas=( + { + "type1": { + "state_key1", + "state_key2", + "state_key3", + } + }, + StateFilter.none(), + ), + ), + ), + ( + "state_key_wildcard_to_explicit_state_keys", + """Test switching from a wildcard to explicit state keys with a concrete type""", + RequiredStateChangesTestParameters( + previous_required_state_map={ + "type1": {"state_key1", "state_key2", "state_key3"} + }, + request_required_state_map={"type1": {StateValues.WILDCARD}}, + state_deltas={("type1", "state_key1"): "$event_id"}, + # If a state_key wildcard has been added or removed, we always + # update the effective room required state config to match the + # request. And we need to request all of the state for that type + # because we previously, only sent down a few keys. + expected_with_state_deltas=( + {"type1": {StateValues.WILDCARD}}, + StateFilter.from_types([("type1", None)]), + ), + expected_without_state_deltas=( + {"type1": {StateValues.WILDCARD}}, + StateFilter.from_types([("type1", None)]), + ), + ), + ), + ] + ) + def test_xxx( + self, + _test_label: str, + _test_description: str, + test_parameters: RequiredStateChangesTestParameters, + ) -> None: + # Without `state_deltas` + changed_required_state_map, added_state_filter = _required_state_changes( + user_id="@user:test", + previous_room_config=RoomSyncConfig( + timeline_limit=0, + required_state_map=test_parameters.previous_required_state_map, + ), + room_sync_config=RoomSyncConfig( + timeline_limit=0, + required_state_map=test_parameters.request_required_state_map, + ), + state_deltas={}, + ) + + self.assertEqual( + changed_required_state_map, + test_parameters.expected_without_state_deltas[0], + "changed_required_state_map does not match (without state_deltas)", + ) + self.assertEqual( + added_state_filter, + test_parameters.expected_without_state_deltas[1], + "added_state_filter does not match (without state_deltas)", + ) + + # With `state_deltas` + changed_required_state_map, added_state_filter = _required_state_changes( + user_id="@user:test", + previous_room_config=RoomSyncConfig( + timeline_limit=0, + required_state_map=test_parameters.previous_required_state_map, + ), + room_sync_config=RoomSyncConfig( + timeline_limit=0, + required_state_map=test_parameters.request_required_state_map, + ), + state_deltas=test_parameters.state_deltas, + ) + + self.assertEqual( + changed_required_state_map, + test_parameters.expected_with_state_deltas[0], + "changed_required_state_map does not match (with state_deltas)", + ) + self.assertEqual( + added_state_filter, + test_parameters.expected_with_state_deltas[1], + "added_state_filter does not match (with state_deltas)", + ) diff --git a/tests/rest/client/sliding_sync/test_rooms_required_state.py b/tests/rest/client/sliding_sync/test_rooms_required_state.py index 91ac6c5a0ee7..7da51d4954a8 100644 --- a/tests/rest/client/sliding_sync/test_rooms_required_state.py +++ b/tests/rest/client/sliding_sync/test_rooms_required_state.py @@ -862,3 +862,264 @@ def test_rooms_required_state_partial_state(self) -> None: exact=True, message=f"Expected only fully-stated rooms to show up for test_key={list_key}.", ) + + def test_rooms_required_state_expand(self) -> None: + """Test that when we expand the required state argument we get the + expanded state, and not just the changes to the new expanded.""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room with a room name. + room_id1 = self.helper.create_room_as( + user1_id, tok=user1_tok, extra_content={"name": "Foo"} + ) + + # Only request the state event to begin with + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to include the room name + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Name, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should see the room name, even though there haven't been any + # changes. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Name, "")], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # We should not see any state changes. + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) + + def test_rooms_required_state_expand_retract_expand(self) -> None: + """Test that when expanding, retracting and then expanding the required + state, we get the changes that happened.""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room with a room name. + room_id1 = self.helper.create_room_as( + user1_id, tok=user1_tok, extra_content={"name": "Foo"} + ) + + # Only request the state event to begin with + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to include the room name + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Name, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should see the room name, even though there haven't been any + # changes. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Name, "")], + }, + exact=True, + ) + + # Update the room name + self.helper.send_state( + room_id1, "m.room.name", {"name": "Bar"}, state_key="", tok=user1_tok + ) + + # Update the sliding sync requests to exclude the room name again + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should not see the updated room name in state (though it will be in + # the timeline). + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to include the room name again + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Name, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should see the *new* room name, even though there haven't been any + # changes. + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Name, "")], + }, + exact=True, + ) + + def test_rooms_required_state_expand_deduplicate(self) -> None: + """Test that when expanding, retracting and then expanding the required + state, we don't get the state down again if it hasn't changed""" + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a room with a room name. + room_id1 = self.helper.create_room_as( + user1_id, tok=user1_tok, extra_content={"name": "Foo"} + ) + + # Only request the state event to begin with + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 1, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Create, "")], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to include the room name + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Name, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should see the room name, even though there haven't been any + # changes. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Name, "")], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to exclude the room name again + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should not see any state updates + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user1_tok) + + # Update the sliding sync requests to include the room name again + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Name, ""], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + # We should not see the room name again, as we have already sent that + # down. + self.assertIsNone(response_body["rooms"][room_id1].get("required_state")) From 5d47138b4677658da2b52d24f9e85b4742f6dc1d Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 14 Oct 2024 13:34:55 +0100 Subject: [PATCH 016/147] Fix typo in `target_cache_memory_usage` docs (#17825) --- changelog.d/17825.doc | 1 + docs/usage/configuration/config_documentation.md | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17825.doc diff --git a/changelog.d/17825.doc b/changelog.d/17825.doc new file mode 100644 index 000000000000..ee4366741758 --- /dev/null +++ b/changelog.d/17825.doc @@ -0,0 +1 @@ +Fix typo in `target_cache_memory_usage` docs. \ No newline at end of file diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 1de2f688656f..47e3ef12870c 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1434,7 +1434,7 @@ number of entries that can be stored. Please see the [Config Conventions](#config-conventions) for information on how to specify memory size and cache expiry durations. * `max_cache_memory_usage` sets a ceiling on how much memory the cache can use before caches begin to be continuously evicted. - They will continue to be evicted until the memory usage drops below the `target_memory_usage`, set in + They will continue to be evicted until the memory usage drops below the `target_cache_memory_usage`, set in the setting below, or until the `min_cache_ttl` is hit. There is no default value for this option. * `target_cache_memory_usage` sets a rough target for the desired memory usage of the caches. There is no default value for this option. From adda2a4613cb67d4329681e5c5eb7867a17e021d Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 14 Oct 2024 07:47:35 -0500 Subject: [PATCH 017/147] Sliding Sync: Slight optimization when fetching state for the room (`get_events_as_list(...)`) (#17718) Spawning from @kegsay [pointing out](https://matrix.to/#/!cnVVNLKqgUzNTOFQkz:matrix.org/$ExOO7J8uPUQSyH-9Uxc_QCa8jlXX9uK4VRtkSC0EI3o?via=element.io&via=matrix.org&via=jki.re) that the Sliding Sync endpoint doesn't handle a large room with a lot of state well on initial sync (requesting all state via `required_state: [ ["*","*"] ]`) (it just takes forever). After investigating further, the slow part is just `get_events_as_list(...)` fetching all of the current state ID's out for the room (which can be 100k+ events for rooms with a lot of membership). This is just a slow thing in Synapse in general and the same thing happens in Sync v2 or the `/state` endpoint. --- The only idea I had to improve things was to use `batch_iter` to only try fetching a fixed amount at a time instead of working with large maps, lists, and sets. This doesn't seem to have much effect though. There is already a `batch_iter(event_ids, 200)` in `_fetch_event_rows(...)` for when we actually have to touch the database and that's inside a queue to deduplicate work. I did notice one slight optimization to use `get_events_as_list(...)` directly instead of `get_events(...)`. `get_events(...)` just turns the result from `get_events_as_list(...)` into a dict and since we're just iterating over the events, we don't need the dict/map. --- changelog.d/17718.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 8 ++- .../storage/databases/main/events_worker.py | 41 ++++++++++++++-- .../databases/main/test_events_worker.py | 49 ++++++++++++++++++- 4 files changed, 89 insertions(+), 10 deletions(-) create mode 100644 changelog.d/17718.misc diff --git a/changelog.d/17718.misc b/changelog.d/17718.misc new file mode 100644 index 000000000000..ea73a03f537c --- /dev/null +++ b/changelog.d/17718.misc @@ -0,0 +1 @@ +Slight optimization when fetching state/events for Sliding Sync. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 39dba4ff988a..a1a6728fb934 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -452,13 +452,11 @@ async def get_current_state_at( to_token=to_token, ) - event_map = await self.store.get_events(list(state_ids.values())) + events = await self.store.get_events_as_list(list(state_ids.values())) state_map = {} - for key, event_id in state_ids.items(): - event = event_map.get(event_id) - if event: - state_map[key] = event + for event in events: + state_map[(event.type, event.state_key)] = event return state_map diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index c029228422d6..403407068c5f 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -61,7 +61,13 @@ current_context, make_deferred_yieldable, ) -from synapse.logging.opentracing import start_active_span, tag_args, trace +from synapse.logging.opentracing import ( + SynapseTags, + set_tag, + start_active_span, + tag_args, + trace, +) from synapse.metrics.background_process_metrics import ( run_as_background_process, wrap_as_background_process, @@ -525,6 +531,7 @@ async def get_event( return event + @trace async def get_events( self, event_ids: Collection[str], @@ -556,6 +563,11 @@ async def get_events( Returns: A mapping from event_id to event. """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", + str(len(event_ids)), + ) + events = await self.get_events_as_list( event_ids, redact_behaviour=redact_behaviour, @@ -603,6 +615,10 @@ async def get_events_as_list( Note that the returned list may be smaller than the list of event IDs if not all events could be fetched. """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", + str(len(event_ids)), + ) if not event_ids: return [] @@ -723,10 +739,11 @@ async def get_events_as_list( return events + @trace @cancellable async def get_unredacted_events_from_cache_or_db( self, - event_ids: Iterable[str], + event_ids: Collection[str], allow_rejected: bool = False, ) -> Dict[str, EventCacheEntry]: """Fetch a bunch of events from the cache or the database. @@ -748,6 +765,11 @@ async def get_unredacted_events_from_cache_or_db( Returns: map from event id to result """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", + str(len(event_ids)), + ) + # Shortcut: check if we have any events in the *in memory* cache - this function # may be called repeatedly for the same event so at this point we cannot reach # out to any external cache for performance reasons. The external cache is @@ -936,7 +958,7 @@ async def _get_events_from_cache( events, update_metrics=update_metrics ) - missing_event_ids = (e for e in events if e not in event_map) + missing_event_ids = [e for e in events if e not in event_map] event_map.update( await self._get_events_from_external_cache( events=missing_event_ids, @@ -946,8 +968,9 @@ async def _get_events_from_cache( return event_map + @trace async def _get_events_from_external_cache( - self, events: Iterable[str], update_metrics: bool = True + self, events: Collection[str], update_metrics: bool = True ) -> Dict[str, EventCacheEntry]: """Fetch events from any configured external cache. @@ -957,6 +980,10 @@ async def _get_events_from_external_cache( events: list of event_ids to fetch update_metrics: Whether to update the cache hit ratio metrics """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "events.length", + str(len(events)), + ) event_map = {} for event_id in events: @@ -1222,6 +1249,7 @@ def fire_errback(exc: Exception) -> None: with PreserveLoggingContext(): self.hs.get_reactor().callFromThread(fire_errback, e) + @trace async def _get_events_from_db( self, event_ids: Collection[str] ) -> Dict[str, EventCacheEntry]: @@ -1240,6 +1268,11 @@ async def _get_events_from_db( map from event id to result. May return extra events which weren't asked for. """ + set_tag( + SynapseTags.FUNC_ARG_PREFIX + "event_ids.length", + str(len(event_ids)), + ) + fetched_event_ids: Set[str] = set() fetched_events: Dict[str, _EventRow] = {} diff --git a/tests/storage/databases/main/test_events_worker.py b/tests/storage/databases/main/test_events_worker.py index fd1f5e7fd5e4..104d141a72ea 100644 --- a/tests/storage/databases/main/test_events_worker.py +++ b/tests/storage/databases/main/test_events_worker.py @@ -20,7 +20,7 @@ # import json from contextlib import contextmanager -from typing import Generator, List, Tuple +from typing import Generator, List, Set, Tuple from unittest import mock from twisted.enterprise.adbapi import ConnectionPool @@ -295,6 +295,53 @@ def test_dedupe(self) -> None: self.assertEqual(ctx.get_resource_usage().evt_db_fetch_count, 1) +class GetEventsTestCase(unittest.HomeserverTestCase): + """Test `get_events(...)`/`get_events_as_list(...)`""" + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store: EventsWorkerStore = hs.get_datastores().main + + def test_get_lots_of_messages(self) -> None: + """Sanity check that `get_events(...)`/`get_events_as_list(...)` works""" + num_events = 100 + + user_id = self.register_user("user", "pass") + user_tok = self.login(user_id, "pass") + + room_id = self.helper.create_room_as(user_id, tok=user_tok) + + event_ids: Set[str] = set() + for i in range(num_events): + event = self.get_success( + inject_event( + self.hs, + room_id=room_id, + type="m.room.message", + sender=user_id, + content={ + "body": f"foo{i}", + "msgtype": "m.text", + }, + ) + ) + event_ids.add(event.event_id) + + # Sanity check that we actually created the events + self.assertEqual(len(event_ids), num_events) + + # This is the function under test + fetched_event_map = self.get_success(self.store.get_events(event_ids)) + + # Sanity check that we got the events back + self.assertIncludes(fetched_event_map.keys(), event_ids, exact=True) + + class DatabaseOutageTestCase(unittest.HomeserverTestCase): """Test event fetching during a database outage.""" From c5b379de660927f64bf0952c1c6ce5a497ad6aea Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 14 Oct 2024 13:49:43 +0100 Subject: [PATCH 018/147] Enable the `.org.matrix.msc4028.encrypted_event` push rule by default (#17826) Clients will still only see this rule if the corresponding experimental feature, `msc4028_push_encrypted_events`, is also enabled. This aligns the implementation with MSC4028, specifically [this section](https://github.com/matrix-org/matrix-spec-proposals/blob/giomfo/push_encrypted_events/proposals/4028-push-all-encrypted-events-except-for-muted-rooms.md#unstable-prefix). See https://github.com/element-hq/synapse/issues/16846 for context. --- changelog.d/17826.misc | 1 + rust/src/push/base_rules.rs | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17826.misc diff --git a/changelog.d/17826.misc b/changelog.d/17826.misc new file mode 100644 index 000000000000..9148c96a0d4e --- /dev/null +++ b/changelog.d/17826.misc @@ -0,0 +1 @@ +Enable the `.org.matrix.msc4028.encrypted_event` push rule by default in accordance with [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028). Note that the corresponding experimental feature must still be switched on for this push rule to have any effect. \ No newline at end of file diff --git a/rust/src/push/base_rules.rs b/rust/src/push/base_rules.rs index 74f02d600138..e0832ada1c7a 100644 --- a/rust/src/push/base_rules.rs +++ b/rust/src/push/base_rules.rs @@ -81,7 +81,7 @@ pub const BASE_APPEND_OVERRIDE_RULES: &[PushRule] = &[ ))]), actions: Cow::Borrowed(&[Action::Notify]), default: true, - default_enabled: false, + default_enabled: true, }, PushRule { rule_id: Cow::Borrowed("global/override/.m.rule.suppress_notices"), From 11bc9a1b3ae21bd2041bf7cc4b93dd70a8c3b93e Mon Sep 17 00:00:00 2001 From: Tulir Asokan Date: Mon, 14 Oct 2024 15:24:28 +0200 Subject: [PATCH 019/147] Implement MSC4210: Remove legacy mentions (#17783) --- changelog.d/17783.feature | 1 + rust/benches/evaluator.rs | 5 +++++ rust/src/push/evaluator.rs | 13 +++++++++++-- rust/src/push/mod.rs | 11 +++++++++++ synapse/config/experimental.py | 3 +++ synapse/push/bulk_push_rule_evaluator.py | 1 + synapse/storage/databases/main/push_rule.py | 1 + synapse/synapse_rust/push.pyi | 2 ++ tests/push/test_push_rule_evaluator.py | 2 ++ 9 files changed, 37 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17783.feature diff --git a/changelog.d/17783.feature b/changelog.d/17783.feature new file mode 100644 index 000000000000..ce8c2164185c --- /dev/null +++ b/changelog.d/17783.feature @@ -0,0 +1 @@ +Implement [MSC4210](https://github.com/matrix-org/matrix-spec-proposals/pull/4210): Remove legacy mentions. Contributed by @tulir @ Beeper. diff --git a/rust/benches/evaluator.rs b/rust/benches/evaluator.rs index 4fea035b961f..28537e187ecf 100644 --- a/rust/benches/evaluator.rs +++ b/rust/benches/evaluator.rs @@ -60,6 +60,7 @@ fn bench_match_exact(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); @@ -105,6 +106,7 @@ fn bench_match_word(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); @@ -150,6 +152,7 @@ fn bench_match_word_miss(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); @@ -195,6 +198,7 @@ fn bench_eval_message(b: &mut Bencher) { true, vec![], false, + false, ) .unwrap(); @@ -205,6 +209,7 @@ fn bench_eval_message(b: &mut Bencher) { false, false, false, + false, ); b.iter(|| eval.run(&rules, Some("bob"), Some("person"))); diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 2f4b6d47bb2a..0d436a1d7b54 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -105,6 +105,9 @@ pub struct PushRuleEvaluator { /// If MSC3931 (room version feature flags) is enabled. Usually controlled by the same /// flag as MSC1767 (extensible events core). msc3931_enabled: bool, + + // If MSC4210 (remove legacy mentions) is enabled. + msc4210_enabled: bool, } #[pymethods] @@ -122,6 +125,7 @@ impl PushRuleEvaluator { related_event_match_enabled, room_version_feature_flags, msc3931_enabled, + msc4210_enabled, ))] pub fn py_new( flattened_keys: BTreeMap, @@ -133,6 +137,7 @@ impl PushRuleEvaluator { related_event_match_enabled: bool, room_version_feature_flags: Vec, msc3931_enabled: bool, + msc4210_enabled: bool, ) -> Result { let body = match flattened_keys.get("content.body") { Some(JsonValue::Value(SimpleJsonValue::Str(s))) => s.clone().into_owned(), @@ -150,6 +155,7 @@ impl PushRuleEvaluator { related_event_match_enabled, room_version_feature_flags, msc3931_enabled, + msc4210_enabled, }) } @@ -176,7 +182,8 @@ impl PushRuleEvaluator { // For backwards-compatibility the legacy mention rules are disabled // if the event contains the 'm.mentions' property. - if self.has_mentions + // Additionally, MSC4210 always disables the legacy rules. + if (self.has_mentions || self.msc4210_enabled) && (rule_id == "global/override/.m.rule.contains_display_name" || rule_id == "global/content/.m.rule.contains_user_name" || rule_id == "global/override/.m.rule.roomnotif") @@ -526,6 +533,7 @@ fn push_rule_evaluator() { true, vec![], true, + false, ) .unwrap(); @@ -555,6 +563,7 @@ fn test_requires_room_version_supports_condition() { false, flags, true, + false, ) .unwrap(); @@ -582,7 +591,7 @@ fn test_requires_room_version_supports_condition() { }; let rules = PushRules::new(vec![custom_rule]); result = evaluator.run( - &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false), + &FilteredPushRules::py_new(rules, BTreeMap::new(), true, false, true, false, false), None, None, ); diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index 2a452b69a3ca..ef8ed150d421 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -534,6 +534,7 @@ pub struct FilteredPushRules { msc3381_polls_enabled: bool, msc3664_enabled: bool, msc4028_push_encrypted_events: bool, + msc4210_enabled: bool, } #[pymethods] @@ -546,6 +547,7 @@ impl FilteredPushRules { msc3381_polls_enabled: bool, msc3664_enabled: bool, msc4028_push_encrypted_events: bool, + msc4210_enabled: bool, ) -> Self { Self { push_rules, @@ -554,6 +556,7 @@ impl FilteredPushRules { msc3381_polls_enabled, msc3664_enabled, msc4028_push_encrypted_events, + msc4210_enabled, } } @@ -596,6 +599,14 @@ impl FilteredPushRules { return false; } + if self.msc4210_enabled + && (rule.rule_id == "global/override/.m.rule.contains_display_name" + || rule.rule_id == "global/content/.m.rule.contains_user_name" + || rule.rule_id == "global/override/.m.rule.roomnotif") + { + return false; + } + true }) .map(|r| { diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 99185db93d4d..fd14db0d0244 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -447,3 +447,6 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # MSC4151: Report room API (Client-Server API) self.msc4151_enabled: bool = experimental.get("msc4151_enabled", False) + + # MSC4210: Remove legacy mentions + self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False) diff --git a/synapse/push/bulk_push_rule_evaluator.py b/synapse/push/bulk_push_rule_evaluator.py index 679cbe9afa0f..9c0592a90266 100644 --- a/synapse/push/bulk_push_rule_evaluator.py +++ b/synapse/push/bulk_push_rule_evaluator.py @@ -436,6 +436,7 @@ async def _action_for_event_by_user( self._related_event_match_enabled, event.room_version.msc3931_push_features, self.hs.config.experimental.msc1767_enabled, # MSC3931 flag + self.hs.config.experimental.msc4210_enabled, ) for uid, rules in rules_by_user.items(): diff --git a/synapse/storage/databases/main/push_rule.py b/synapse/storage/databases/main/push_rule.py index bbdde177116b..86c87f78bf11 100644 --- a/synapse/storage/databases/main/push_rule.py +++ b/synapse/storage/databases/main/push_rule.py @@ -109,6 +109,7 @@ def _load_rules( msc3664_enabled=experimental_config.msc3664_enabled, msc3381_polls_enabled=experimental_config.msc3381_polls_enabled, msc4028_push_encrypted_events=experimental_config.msc4028_push_encrypted_events, + msc4210_enabled=experimental_config.msc4210_enabled, ) return filtered_rules diff --git a/synapse/synapse_rust/push.pyi b/synapse/synapse_rust/push.pyi index 27a974e1bbee..3f317c328894 100644 --- a/synapse/synapse_rust/push.pyi +++ b/synapse/synapse_rust/push.pyi @@ -48,6 +48,7 @@ class FilteredPushRules: msc3381_polls_enabled: bool, msc3664_enabled: bool, msc4028_push_encrypted_events: bool, + msc4210_enabled: bool, ): ... def rules(self) -> Collection[Tuple[PushRule, bool]]: ... @@ -65,6 +66,7 @@ class PushRuleEvaluator: related_event_match_enabled: bool, room_version_feature_flags: Tuple[str, ...], msc3931_enabled: bool, + msc4210_enabled: bool, ): ... def run( self, diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index 420fbea99828..c1f8e18bd92d 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -149,6 +149,7 @@ def _get_evaluator( content: JsonMapping, *, related_events: Optional[JsonDict] = None, + msc4210: bool = False, ) -> PushRuleEvaluator: event = FrozenEvent( { @@ -174,6 +175,7 @@ def _get_evaluator( related_event_match_enabled=True, room_version_feature_flags=event.room_version.msc3931_push_features, msc3931_enabled=True, + msc4210_enabled=msc4210, ) def test_display_name(self) -> None: From 6ececb8f2ae52c6a77c54a740aaddbf87910469d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 14:29:05 +0000 Subject: [PATCH 020/147] Bump psycopg2 from 2.9.9 to 2.9.10 (#17843) Bumps [psycopg2](https://github.com/psycopg/psycopg2) from 2.9.9 to 2.9.10.
Changelog

Sourced from psycopg2's changelog.

Current release

What's new in psycopg 2.9.10 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^

  • Add support for Python 3.13.
  • Receive notifications on commit (:ticket:[#1728](https://github.com/psycopg/psycopg2/issues/1728)).
  • ~psycopg2.errorcodes map and ~psycopg2.errors classes updated to PostgreSQL 17.
  • Drop support for Python 3.7.

What's new in psycopg 2.9.9 ^^^^^^^^^^^^^^^^^^^^^^^^^^^

  • Add support for Python 3.12.
  • Drop support for Python 3.6.

What's new in psycopg 2.9.8 ^^^^^^^^^^^^^^^^^^^^^^^^^^^

  • Wheel package bundled with PostgreSQL 16 libpq in order to add support for recent features, such as sslcertmode.

What's new in psycopg 2.9.7 ^^^^^^^^^^^^^^^^^^^^^^^^^^^

  • Fix propagation of exceptions raised during module initialization (:ticket:[#1598](https://github.com/psycopg/psycopg2/issues/1598)).
  • Fix building when pg_config returns an empty string (:ticket:[#1599](https://github.com/psycopg/psycopg2/issues/1599)).
  • Wheel package bundled with OpenSSL 1.1.1v.

What's new in psycopg 2.9.6 ^^^^^^^^^^^^^^^^^^^^^^^^^^^

  • Package manylinux 2014 for aarch64 and ppc64le platforms, in order to include libpq 15 in the binary package (:ticket:[#1396](https://github.com/psycopg/psycopg2/issues/1396)).
  • Wheel package bundled with OpenSSL 1.1.1t.

What's new in psycopg 2.9.5 ^^^^^^^^^^^^^^^^^^^^^^^^^^^

  • Add support for Python 3.11.
  • Add support for rowcount in MERGE statements in binary packages (:ticket:[#1497](https://github.com/psycopg/psycopg2/issues/1497)).

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=psycopg2&package-manager=pip&previous-version=2.9.9&new-version=2.9.10)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/poetry.lock b/poetry.lock index 9fd95ff44752..91394947d489 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1595,24 +1595,20 @@ twisted = ["twisted"] [[package]] name = "psycopg2" -version = "2.9.9" +version = "2.9.10" description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "psycopg2-2.9.9-cp310-cp310-win32.whl", hash = "sha256:38a8dcc6856f569068b47de286b472b7c473ac7977243593a288ebce0dc89516"}, - {file = "psycopg2-2.9.9-cp310-cp310-win_amd64.whl", hash = "sha256:426f9f29bde126913a20a96ff8ce7d73fd8a216cfb323b1f04da402d452853c3"}, - {file = "psycopg2-2.9.9-cp311-cp311-win32.whl", hash = "sha256:ade01303ccf7ae12c356a5e10911c9e1c51136003a9a1d92f7aa9d010fb98372"}, - {file = "psycopg2-2.9.9-cp311-cp311-win_amd64.whl", hash = "sha256:121081ea2e76729acfb0673ff33755e8703d45e926e416cb59bae3a86c6a4981"}, - {file = "psycopg2-2.9.9-cp312-cp312-win32.whl", hash = "sha256:d735786acc7dd25815e89cc4ad529a43af779db2e25aa7c626de864127e5a024"}, - {file = "psycopg2-2.9.9-cp312-cp312-win_amd64.whl", hash = "sha256:a7653d00b732afb6fc597e29c50ad28087dcb4fbfb28e86092277a559ae4e693"}, - {file = "psycopg2-2.9.9-cp37-cp37m-win32.whl", hash = "sha256:5e0d98cade4f0e0304d7d6f25bbfbc5bd186e07b38eac65379309c4ca3193efa"}, - {file = "psycopg2-2.9.9-cp37-cp37m-win_amd64.whl", hash = "sha256:7e2dacf8b009a1c1e843b5213a87f7c544b2b042476ed7755be813eaf4e8347a"}, - {file = "psycopg2-2.9.9-cp38-cp38-win32.whl", hash = "sha256:ff432630e510709564c01dafdbe996cb552e0b9f3f065eb89bdce5bd31fabf4c"}, - {file = "psycopg2-2.9.9-cp38-cp38-win_amd64.whl", hash = "sha256:bac58c024c9922c23550af2a581998624d6e02350f4ae9c5f0bc642c633a2d5e"}, - {file = "psycopg2-2.9.9-cp39-cp39-win32.whl", hash = "sha256:c92811b2d4c9b6ea0285942b2e7cac98a59e166d59c588fe5cfe1eda58e72d59"}, - {file = "psycopg2-2.9.9-cp39-cp39-win_amd64.whl", hash = "sha256:de80739447af31525feddeb8effd640782cf5998e1a4e9192ebdf829717e3913"}, - {file = "psycopg2-2.9.9.tar.gz", hash = "sha256:d1454bde93fb1e224166811694d600e746430c006fbb031ea06ecc2ea41bf156"}, + {file = "psycopg2-2.9.10-cp310-cp310-win32.whl", hash = "sha256:5df2b672140f95adb453af93a7d669d7a7bf0a56bcd26f1502329166f4a61716"}, + {file = "psycopg2-2.9.10-cp310-cp310-win_amd64.whl", hash = "sha256:c6f7b8561225f9e711a9c47087388a97fdc948211c10a4bccbf0ba68ab7b3b5a"}, + {file = "psycopg2-2.9.10-cp311-cp311-win32.whl", hash = "sha256:47c4f9875125344f4c2b870e41b6aad585901318068acd01de93f3677a6522c2"}, + {file = "psycopg2-2.9.10-cp311-cp311-win_amd64.whl", hash = "sha256:0435034157049f6846e95103bd8f5a668788dd913a7c30162ca9503fdf542cb4"}, + {file = "psycopg2-2.9.10-cp312-cp312-win32.whl", hash = "sha256:65a63d7ab0e067e2cdb3cf266de39663203d38d6a8ed97f5ca0cb315c73fe067"}, + {file = "psycopg2-2.9.10-cp312-cp312-win_amd64.whl", hash = "sha256:4a579d6243da40a7b3182e0430493dbd55950c493d8c68f4eec0b302f6bbf20e"}, + {file = "psycopg2-2.9.10-cp39-cp39-win32.whl", hash = "sha256:9d5b3b94b79a844a986d029eee38998232451119ad653aea42bb9220a8c5066b"}, + {file = "psycopg2-2.9.10-cp39-cp39-win_amd64.whl", hash = "sha256:88138c8dedcbfa96408023ea2b0c369eda40fe5d75002c0964c78f46f11fa442"}, + {file = "psycopg2-2.9.10.tar.gz", hash = "sha256:12ec0b40b0273f95296233e8750441339298e6a572f7039da5b260e3c8b60e11"}, ] [[package]] From 0ab99369a1e4d4ce296dcd7b6d48f087718c5b51 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 14:34:00 +0000 Subject: [PATCH 021/147] Bump sentry-sdk from 2.16.0 to 2.17.0 (#17844) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [sentry-sdk](https://github.com/getsentry/sentry-python) from 2.16.0 to 2.17.0.
Release notes

Sourced from sentry-sdk's releases.

2.17.0

Various fixes & improvements

Changelog

Sourced from sentry-sdk's changelog.

2.17.0

Various fixes & improvements

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=sentry-sdk&package-manager=pip&previous-version=2.16.0&new-version=2.17.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 91394947d489..945952f782d5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2331,13 +2331,13 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.16.0" +version = "2.17.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" files = [ - {file = "sentry_sdk-2.16.0-py2.py3-none-any.whl", hash = "sha256:49139c31ebcd398f4f6396b18910610a0c1602f6e67083240c33019d1f6aa30c"}, - {file = "sentry_sdk-2.16.0.tar.gz", hash = "sha256:90f733b32e15dfc1999e6b7aca67a38688a567329de4d6e184154a73f96c6892"}, + {file = "sentry_sdk-2.17.0-py2.py3-none-any.whl", hash = "sha256:625955884b862cc58748920f9e21efdfb8e0d4f98cca4ab0d3918576d5b606ad"}, + {file = "sentry_sdk-2.17.0.tar.gz", hash = "sha256:dd0a05352b78ffeacced73a94e86f38b32e2eae15fff5f30ca5abb568a72eacf"}, ] [package.dependencies] From 22aa925523a7f487bf86c8e1a44607fe31a07b07 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 14:52:18 +0000 Subject: [PATCH 022/147] Bump types-requests from 2.32.0.20240914 to 2.32.0.20241016 (#17841) Bumps [types-requests](https://github.com/python/typeshed) from 2.32.0.20240914 to 2.32.0.20241016.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=types-requests&package-manager=pip&previous-version=2.32.0.20240914&new-version=2.32.0.20241016)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 945952f782d5..5350c6ddfa49 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2817,13 +2817,13 @@ files = [ [[package]] name = "types-requests" -version = "2.32.0.20240914" +version = "2.32.0.20241016" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"}, - {file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"}, + {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"}, + {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"}, ] [package.dependencies] From 9512b84a727a6519a407c72764b4c5ec36323a78 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 17 Oct 2024 15:05:00 +0000 Subject: [PATCH 023/147] Bump mypy from 1.10.1 to 1.11.2 (#17842) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [mypy](https://github.com/python/mypy) from 1.10.1 to 1.11.2.
Changelog

Sourced from mypy's changelog.

Mypy 1.11.2

  • Alternative fix for a union-like literal string (Ivan Levkivskyi, PR 17639)
  • Unwrap TypedDict item types before storing (Ivan Levkivskyi, PR 17640)

Acknowledgements

Thanks to all mypy contributors who contributed to this release:

  • Alex Waygood
  • Alexander Leopold Shon
  • Ali Hamdan
  • Anders Kaseorg
  • Ben Brown
  • Bénédikt Tran
  • bzoracler
  • Christoph Tyralla
  • Christopher Barber
  • dexterkennedy
  • gilesgc
  • GiorgosPapoutsakis
  • Ivan Levkivskyi
  • Jelle Zijlstra
  • Jukka Lehtosalo
  • Marc Mueller
  • Matthieu Devlin
  • Michael R. Crusoe
  • Nikita Sobolev
  • Seo Sanghyeon
  • Shantanu
  • sobolevn
  • Steven Troxler
  • Tadeu Manoel
  • Tamir Duberstein
  • Tushar Sadhwani
  • urnest
  • Valentin Stanciu

I’d also like to thank my employer, Dropbox, for supporting mypy development.

Mypy 1.10

We’ve just uploaded mypy 1.10 to the Python Package Index (PyPI). Mypy is a static type checker for Python. This release includes new features, performance improvements and bug fixes. You can install it as follows:

python3 -m pip install -U mypy

You can read the full documentation for this release on Read the Docs.

Support TypeIs (PEP 742)

Mypy now supports TypeIs (PEP 742), which allows

... (truncated)

Commits
  • 789f02c Bump version to 1.11.2
  • 917cc75 An alternative fix for a union-like literal string (#17639)
  • 7d805b3 Unwrap TypedDict item types before storing (#17640)
  • 32675dd Revert "Fix Literal strings containing pipe characters" (#17638)
  • 778542b Revert "Fix RawExpressionType.accept crash with --cache-fine-grained" (#1...
  • 14ab742 Bump version to 1.11.2+dev
  • 570b90a Bump version to 1.11
  • b3a102e Fix RawExpressionType.accept crash with --cache-fine-grained (#17588)
  • aec04c7 Fix PEP 604 isinstance caching (#17563)
  • cb44e4d Fix typing.TypeAliasType being undefined on python < 3.12 (#17558)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=mypy&package-manager=pip&previous-version=1.10.1&new-version=1.11.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- changelog.d/17842.misc | 1 + poetry.lock | 58 +++++++++++++------------- scripts-dev/mypy_synapse_plugin.py | 2 +- synmark/__main__.py | 4 ++ tests/media/test_oembed.py | 3 ++ tests/push/test_push_rule_evaluator.py | 2 + 6 files changed, 40 insertions(+), 30 deletions(-) create mode 100644 changelog.d/17842.misc diff --git a/changelog.d/17842.misc b/changelog.d/17842.misc new file mode 100644 index 000000000000..78af706c31b5 --- /dev/null +++ b/changelog.d/17842.misc @@ -0,0 +1 @@ +Fix some typing issues uncovered by upgrading mypy to 1.11.x. diff --git a/poetry.lock b/poetry.lock index 5350c6ddfa49..42d7f03c5c97 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1319,44 +1319,44 @@ files = [ [[package]] name = "mypy" -version = "1.10.1" +version = "1.11.2" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.10.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e36f229acfe250dc660790840916eb49726c928e8ce10fbdf90715090fe4ae02"}, - {file = "mypy-1.10.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:51a46974340baaa4145363b9e051812a2446cf583dfaeba124af966fa44593f7"}, - {file = "mypy-1.10.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:901c89c2d67bba57aaaca91ccdb659aa3a312de67f23b9dfb059727cce2e2e0a"}, - {file = "mypy-1.10.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0cd62192a4a32b77ceb31272d9e74d23cd88c8060c34d1d3622db3267679a5d9"}, - {file = "mypy-1.10.1-cp310-cp310-win_amd64.whl", hash = "sha256:a2cbc68cb9e943ac0814c13e2452d2046c2f2b23ff0278e26599224cf164e78d"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bd6f629b67bb43dc0d9211ee98b96d8dabc97b1ad38b9b25f5e4c4d7569a0c6a"}, - {file = "mypy-1.10.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a1bbb3a6f5ff319d2b9d40b4080d46cd639abe3516d5a62c070cf0114a457d84"}, - {file = "mypy-1.10.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8edd4e9bbbc9d7b79502eb9592cab808585516ae1bcc1446eb9122656c6066f"}, - {file = "mypy-1.10.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6166a88b15f1759f94a46fa474c7b1b05d134b1b61fca627dd7335454cc9aa6b"}, - {file = "mypy-1.10.1-cp311-cp311-win_amd64.whl", hash = "sha256:5bb9cd11c01c8606a9d0b83ffa91d0b236a0e91bc4126d9ba9ce62906ada868e"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:d8681909f7b44d0b7b86e653ca152d6dff0eb5eb41694e163c6092124f8246d7"}, - {file = "mypy-1.10.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:378c03f53f10bbdd55ca94e46ec3ba255279706a6aacaecac52ad248f98205d3"}, - {file = "mypy-1.10.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bacf8f3a3d7d849f40ca6caea5c055122efe70e81480c8328ad29c55c69e93e"}, - {file = "mypy-1.10.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:701b5f71413f1e9855566a34d6e9d12624e9e0a8818a5704d74d6b0402e66c04"}, - {file = "mypy-1.10.1-cp312-cp312-win_amd64.whl", hash = "sha256:3c4c2992f6ea46ff7fce0072642cfb62af7a2484efe69017ed8b095f7b39ef31"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:604282c886497645ffb87b8f35a57ec773a4a2721161e709a4422c1636ddde5c"}, - {file = "mypy-1.10.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37fd87cab83f09842653f08de066ee68f1182b9b5282e4634cdb4b407266bade"}, - {file = "mypy-1.10.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8addf6313777dbb92e9564c5d32ec122bf2c6c39d683ea64de6a1fd98b90fe37"}, - {file = "mypy-1.10.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cc3ca0a244eb9a5249c7c583ad9a7e881aa5d7b73c35652296ddcdb33b2b9c7"}, - {file = "mypy-1.10.1-cp38-cp38-win_amd64.whl", hash = "sha256:1b3a2ffce52cc4dbaeee4df762f20a2905aa171ef157b82192f2e2f368eec05d"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fe85ed6836165d52ae8b88f99527d3d1b2362e0cb90b005409b8bed90e9059b3"}, - {file = "mypy-1.10.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c2ae450d60d7d020d67ab440c6e3fae375809988119817214440033f26ddf7bf"}, - {file = "mypy-1.10.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6be84c06e6abd72f960ba9a71561c14137a583093ffcf9bbfaf5e613d63fa531"}, - {file = "mypy-1.10.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2189ff1e39db399f08205e22a797383613ce1cb0cb3b13d8bcf0170e45b96cc3"}, - {file = "mypy-1.10.1-cp39-cp39-win_amd64.whl", hash = "sha256:97a131ee36ac37ce9581f4220311247ab6cba896b4395b9c87af0675a13a755f"}, - {file = "mypy-1.10.1-py3-none-any.whl", hash = "sha256:71d8ac0b906354ebda8ef1673e5fde785936ac1f29ff6987c7483cfbd5a4235a"}, - {file = "mypy-1.10.1.tar.gz", hash = "sha256:1f8f492d7db9e3593ef42d4f115f04e556130f2819ad33ab84551403e97dd4c0"}, + {file = "mypy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a"}, + {file = "mypy-1.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef"}, + {file = "mypy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383"}, + {file = "mypy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8"}, + {file = "mypy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7"}, + {file = "mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385"}, + {file = "mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca"}, + {file = "mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104"}, + {file = "mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4"}, + {file = "mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6"}, + {file = "mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318"}, + {file = "mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36"}, + {file = "mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987"}, + {file = "mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca"}, + {file = "mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70"}, + {file = "mypy-1.11.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b"}, + {file = "mypy-1.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86"}, + {file = "mypy-1.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce"}, + {file = "mypy-1.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1"}, + {file = "mypy-1.11.2-cp38-cp38-win_amd64.whl", hash = "sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b"}, + {file = "mypy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6"}, + {file = "mypy-1.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70"}, + {file = "mypy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d"}, + {file = "mypy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d"}, + {file = "mypy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24"}, + {file = "mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12"}, + {file = "mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79"}, ] [package.dependencies] mypy-extensions = ">=1.0.0" tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} -typing-extensions = ">=4.1.0" +typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] diff --git a/scripts-dev/mypy_synapse_plugin.py b/scripts-dev/mypy_synapse_plugin.py index 509047b41b15..a15c3c005cf5 100644 --- a/scripts-dev/mypy_synapse_plugin.py +++ b/scripts-dev/mypy_synapse_plugin.py @@ -360,7 +360,7 @@ def is_cacheable( # For a type alias, check if the underlying real type is cachable. return is_cacheable(mypy.types.get_proper_type(rt), signature, verbose) - elif isinstance(rt, UninhabitedType) and rt.is_noreturn: + elif isinstance(rt, UninhabitedType): # There is no return value, just consider it cachable. This is only used # in tests. return True, None diff --git a/synmark/__main__.py b/synmark/__main__.py index 746261a1eca4..4944c2f3b078 100644 --- a/synmark/__main__.py +++ b/synmark/__main__.py @@ -90,6 +90,10 @@ def add_cmdline_args(cmd: List[str], args: Namespace) -> None: if runner.args.worker: if runner.args.log: + # sys.__stdout__ can technically be None, just exit if it's the case + if not sys.__stdout__: + exit(1) + globalLogBeginner.beginLoggingTo( [textFileLogObserver(sys.__stdout__)], redirectStandardIO=False ) diff --git a/tests/media/test_oembed.py b/tests/media/test_oembed.py index 29d4580697f9..b8265ff9cae0 100644 --- a/tests/media/test_oembed.py +++ b/tests/media/test_oembed.py @@ -20,6 +20,7 @@ # import json +from typing import Any from parameterized import parameterized @@ -52,6 +53,7 @@ def parse_response(self, response: JsonDict) -> OEmbedResult: def test_version(self) -> None: """Accept versions that are similar to 1.0 as a string or int (or missing).""" + version: Any for version in ("1.0", 1.0, 1): result = self.parse_response({"version": version}) # An empty Open Graph response is an error, ensure the URL is included. @@ -69,6 +71,7 @@ def test_version(self) -> None: def test_cache_age(self) -> None: """Ensure a cache-age is parsed properly.""" + cache_age: Any # Correct-ish cache ages are allowed. for cache_age in ("1", 1.0, 1): result = self.parse_response({"cache_age": cache_age}) diff --git a/tests/push/test_push_rule_evaluator.py b/tests/push/test_push_rule_evaluator.py index c1f8e18bd92d..3898532acffa 100644 --- a/tests/push/test_push_rule_evaluator.py +++ b/tests/push/test_push_rule_evaluator.py @@ -454,6 +454,7 @@ def test_exact_event_match_boolean(self) -> None: {"value": False}, "incorrect values should not match", ) + value: Any for value in ("foobaz", 1, 1.1, None, [], {}): self._assert_not_matches( condition, @@ -494,6 +495,7 @@ def test_exact_event_match_null(self) -> None: {"value": None}, "exact value should match", ) + value: Any for value in ("foobaz", True, False, 1, 1.1, [], {}): self._assert_not_matches( condition, From 80ad02e10e2ebefe7b1707cb504bd9d3cc598ea8 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 22 Oct 2024 11:23:36 +0200 Subject: [PATCH 024/147] Ensure Python 3.13 and PostgreSQL 17 compatibility (#17752) This adds Python 3.13.0 to the trial test matrix Also updates `cffi` and `zope.interface` in the locked dependencies to make sure we have versions compatible with Python 3.13. For some reasons, they are not being picked up by dependabot. --- .ci/scripts/calculate_jobs.py | 6 +- changelog.d/17752.misc | 1 + poetry.lock | 212 ++++++++++++++++++---------------- 3 files changed, 115 insertions(+), 104 deletions(-) create mode 100644 changelog.d/17752.misc diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index 92bd192fd1f2..15f2d94a810f 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -53,7 +53,7 @@ def set_output(key: str, value: str): "database": "sqlite", "extras": "all", } - for version in ("3.9", "3.10", "3.11", "3.12") + for version in ("3.9", "3.10", "3.11", "3.12", "3.13") ) trial_postgres_tests = [ @@ -68,9 +68,9 @@ def set_output(key: str, value: str): if not IS_PR: trial_postgres_tests.append( { - "python-version": "3.12", + "python-version": "3.13", "database": "postgres", - "postgres-version": "16", + "postgres-version": "17", "extras": "all", } ) diff --git a/changelog.d/17752.misc b/changelog.d/17752.misc new file mode 100644 index 000000000000..f9f01a424203 --- /dev/null +++ b/changelog.d/17752.misc @@ -0,0 +1 @@ +Add Python 3.13 and Postgres 17 to the test matrix. diff --git a/poetry.lock b/poetry.lock index 42d7f03c5c97..1102e89d12f0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -147,75 +147,78 @@ files = [ [[package]] name = "cffi" -version = "1.15.1" +version = "1.17.1" description = "Foreign Function Interface for Python calling C code." optional = false -python-versions = "*" +python-versions = ">=3.8" files = [ - {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, - {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, - {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, - {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, - {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, - {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, - {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, - {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, - {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, - {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, - {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, - {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, - {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, - {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, - {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, - {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, - {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, - {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, - {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, - {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, - {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, - {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, - {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, - {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, - {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, - {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, - {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, - {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, - {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, - {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, - {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, - {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, - {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:df8b1c11f177bc2313ec4b2d46baec87a5f3e71fc8b45dab2ee7cae86d9aba14"}, + {file = "cffi-1.17.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f2cdc858323644ab277e9bb925ad72ae0e67f69e804f4898c070998d50b1a67"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:edae79245293e15384b51f88b00613ba9f7198016a5948b5dddf4917d4d26382"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45398b671ac6d70e67da8e4224a065cec6a93541bb7aebe1b198a61b58c7b702"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad9413ccdeda48c5afdae7e4fa2192157e991ff761e7ab8fdd8926f40b160cc3"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5da5719280082ac6bd9aa7becb3938dc9f9cbd57fac7d2871717b1feb0902ab6"}, + {file = "cffi-1.17.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bb1a08b8008b281856e5971307cc386a8e9c5b625ac297e853d36da6efe9c17"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:045d61c734659cc045141be4bae381a41d89b741f795af1dd018bfb532fd0df8"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6883e737d7d9e4899a8a695e00ec36bd4e5e4f18fabe0aca0efe0a4b44cdb13e"}, + {file = "cffi-1.17.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6b8b4a92e1c65048ff98cfe1f735ef8f1ceb72e3d5f0c25fdb12087a23da22be"}, + {file = "cffi-1.17.1-cp310-cp310-win32.whl", hash = "sha256:c9c3d058ebabb74db66e431095118094d06abf53284d9c81f27300d0e0d8bc7c"}, + {file = "cffi-1.17.1-cp310-cp310-win_amd64.whl", hash = "sha256:0f048dcf80db46f0098ccac01132761580d28e28bc0f78ae0d58048063317e15"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a45e3c6913c5b87b3ff120dcdc03f6131fa0065027d0ed7ee6190736a74cd401"}, + {file = "cffi-1.17.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:30c5e0cb5ae493c04c8b42916e52ca38079f1b235c2f8ae5f4527b963c401caf"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f75c7ab1f9e4aca5414ed4d8e5c0e303a34f4421f8a0d47a4d019ceff0ab6af4"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a1ed2dd2972641495a3ec98445e09766f077aee98a1c896dcb4ad0d303628e41"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:46bf43160c1a35f7ec506d254e5c890f3c03648a4dbac12d624e4490a7046cd1"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a24ed04c8ffd54b0729c07cee15a81d964e6fee0e3d4d342a27b020d22959dc6"}, + {file = "cffi-1.17.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:610faea79c43e44c71e1ec53a554553fa22321b65fae24889706c0a84d4ad86d"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:a9b15d491f3ad5d692e11f6b71f7857e7835eb677955c00cc0aefcd0669adaf6"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:de2ea4b5833625383e464549fec1bc395c1bdeeb5f25c4a3a82b5a8c756ec22f"}, + {file = "cffi-1.17.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:fc48c783f9c87e60831201f2cce7f3b2e4846bf4d8728eabe54d60700b318a0b"}, + {file = "cffi-1.17.1-cp311-cp311-win32.whl", hash = "sha256:85a950a4ac9c359340d5963966e3e0a94a676bd6245a4b55bc43949eee26a655"}, + {file = "cffi-1.17.1-cp311-cp311-win_amd64.whl", hash = "sha256:caaf0640ef5f5517f49bc275eca1406b0ffa6aa184892812030f04c2abf589a0"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:805b4371bf7197c329fcb3ead37e710d1bca9da5d583f5073b799d5c5bd1eee4"}, + {file = "cffi-1.17.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:733e99bc2df47476e3848417c5a4540522f234dfd4ef3ab7fafdf555b082ec0c"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1257bdabf294dceb59f5e70c64a3e2f462c30c7ad68092d01bbbfb1c16b1ba36"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da95af8214998d77a98cc14e3a3bd00aa191526343078b530ceb0bd710fb48a5"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d63afe322132c194cf832bfec0dc69a99fb9bb6bbd550f161a49e9e855cc78ff"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f79fc4fc25f1c8698ff97788206bb3c2598949bfe0fef03d299eb1b5356ada99"}, + {file = "cffi-1.17.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b62ce867176a75d03a665bad002af8e6d54644fad99a3c70905c543130e39d93"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:386c8bf53c502fff58903061338ce4f4950cbdcb23e2902d86c0f722b786bbe3"}, + {file = "cffi-1.17.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4ceb10419a9adf4460ea14cfd6bc43d08701f0835e979bf821052f1805850fe8"}, + {file = "cffi-1.17.1-cp312-cp312-win32.whl", hash = "sha256:a08d7e755f8ed21095a310a693525137cfe756ce62d066e53f502a83dc550f65"}, + {file = "cffi-1.17.1-cp312-cp312-win_amd64.whl", hash = "sha256:51392eae71afec0d0c8fb1a53b204dbb3bcabcb3c9b807eedf3e1e6ccf2de903"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f3a2b4222ce6b60e2e8b337bb9596923045681d71e5a082783484d845390938e"}, + {file = "cffi-1.17.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0984a4925a435b1da406122d4d7968dd861c1385afe3b45ba82b750f229811e2"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d01b12eeeb4427d3110de311e1774046ad344f5b1a7403101878976ecd7a10f3"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:706510fe141c86a69c8ddc029c7910003a17353970cff3b904ff0686a5927683"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:de55b766c7aa2e2a3092c51e0483d700341182f08e67c63630d5b6f200bb28e5"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c59d6e989d07460165cc5ad3c61f9fd8f1b4796eacbd81cee78957842b834af4"}, + {file = "cffi-1.17.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd398dbc6773384a17fe0d3e7eeb8d1a21c2200473ee6806bb5e6a8e62bb73dd"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:3edc8d958eb099c634dace3c7e16560ae474aa3803a5df240542b305d14e14ed"}, + {file = "cffi-1.17.1-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:72e72408cad3d5419375fc87d289076ee319835bdfa2caad331e377589aebba9"}, + {file = "cffi-1.17.1-cp313-cp313-win32.whl", hash = "sha256:e03eab0a8677fa80d646b5ddece1cbeaf556c313dcfac435ba11f107ba117b5d"}, + {file = "cffi-1.17.1-cp313-cp313-win_amd64.whl", hash = "sha256:f6a16c31041f09ead72d69f583767292f750d24913dadacf5756b966aacb3f1a"}, + {file = "cffi-1.17.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:636062ea65bd0195bc012fea9321aca499c0504409f413dc88af450b57ffd03b"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c7eac2ef9b63c79431bc4b25f1cd649d7f061a28808cbc6c47b534bd789ef964"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e221cf152cff04059d011ee126477f0d9588303eb57e88923578ace7baad17f9"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:31000ec67d4221a71bd3f67df918b1f88f676f1c3b535a7eb473255fdc0b83fc"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f17be4345073b0a7b8ea599688f692ac3ef23ce28e5df79c04de519dbc4912c"}, + {file = "cffi-1.17.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2b1fac190ae3ebfe37b979cc1ce69c81f4e4fe5746bb401dca63a9062cdaf1"}, + {file = "cffi-1.17.1-cp38-cp38-win32.whl", hash = "sha256:7596d6620d3fa590f677e9ee430df2958d2d6d6de2feeae5b20e82c00b76fbf8"}, + {file = "cffi-1.17.1-cp38-cp38-win_amd64.whl", hash = "sha256:78122be759c3f8a014ce010908ae03364d00a1f81ab5c7f4a7a5120607ea56e1"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b2ab587605f4ba0bf81dc0cb08a41bd1c0a5906bd59243d56bad7668a6fc6c16"}, + {file = "cffi-1.17.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:28b16024becceed8c6dfbc75629e27788d8a3f9030691a1dbf9821a128b22c36"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1d599671f396c4723d016dbddb72fe8e0397082b0a77a4fab8028923bec050e8"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca74b8dbe6e8e8263c0ffd60277de77dcee6c837a3d0881d8c1ead7268c9e576"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f7f5baafcc48261359e14bcd6d9bff6d4b28d9103847c9e136694cb0501aef87"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:98e3969bcff97cae1b2def8ba499ea3d6f31ddfdb7635374834cf89a1a08ecf0"}, + {file = "cffi-1.17.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cdf5ce3acdfd1661132f2a9c19cac174758dc2352bfe37d98aa7512c6b7178b3"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:9755e4345d1ec879e3849e62222a18c7174d65a6a92d5b346b1863912168b595"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f1e22e8c4419538cb197e4dd60acc919d7696e5ef98ee4da4e01d3f8cfa4cc5a"}, + {file = "cffi-1.17.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c03e868a0b3bc35839ba98e74211ed2b05d2119be4e8a0f224fba9384f1fe02e"}, + {file = "cffi-1.17.1-cp39-cp39-win32.whl", hash = "sha256:e31ae45bc2e29f6b2abd0de1cc3b9d5205aa847cafaecb8af1476a609a2f6eb7"}, + {file = "cffi-1.17.1-cp39-cp39-win_amd64.whl", hash = "sha256:d016c76bdd850f3c626af19b0542c9677ba156e4ee4fccfdd7848803533ef662"}, + {file = "cffi-1.17.1.tar.gz", hash = "sha256:1c39c6016c32bc48dd54561950ebd6836e1670f2ae46128f67cf49e789c52824"}, ] [package.dependencies] @@ -3028,50 +3031,57 @@ test = ["zope.testrunner"] [[package]] name = "zope-interface" -version = "6.0" +version = "7.1.0" description = "Interfaces for Python" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "zope.interface-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f299c020c6679cb389814a3b81200fe55d428012c5e76da7e722491f5d205990"}, - {file = "zope.interface-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ee4b43f35f5dc15e1fec55ccb53c130adb1d11e8ad8263d68b1284b66a04190d"}, - {file = "zope.interface-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a158846d0fca0a908c1afb281ddba88744d403f2550dc34405c3691769cdd85"}, - {file = "zope.interface-6.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f72f23bab1848edb7472309e9898603141644faec9fd57a823ea6b4d1c4c8995"}, - {file = "zope.interface-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:48f4d38cf4b462e75fac78b6f11ad47b06b1c568eb59896db5b6ec1094eb467f"}, - {file = "zope.interface-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:87b690bbee9876163210fd3f500ee59f5803e4a6607d1b1238833b8885ebd410"}, - {file = "zope.interface-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f2363e5fd81afb650085c6686f2ee3706975c54f331b426800b53531191fdf28"}, - {file = "zope.interface-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:af169ba897692e9cd984a81cb0f02e46dacdc07d6cf9fd5c91e81f8efaf93d52"}, - {file = "zope.interface-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fa90bac61c9dc3e1a563e5babb3fd2c0c1c80567e815442ddbe561eadc803b30"}, - {file = "zope.interface-6.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:89086c9d3490a0f265a3c4b794037a84541ff5ffa28bb9c24cc9f66566968464"}, - {file = "zope.interface-6.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:809fe3bf1a91393abc7e92d607976bbb8586512913a79f2bf7d7ec15bd8ea518"}, - {file = "zope.interface-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:0ec9653825f837fbddc4e4b603d90269b501486c11800d7c761eee7ce46d1bbb"}, - {file = "zope.interface-6.0-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:790c1d9d8f9c92819c31ea660cd43c3d5451df1df61e2e814a6f99cebb292788"}, - {file = "zope.interface-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b39b8711578dcfd45fc0140993403b8a81e879ec25d53189f3faa1f006087dca"}, - {file = "zope.interface-6.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eba51599370c87088d8882ab74f637de0c4f04a6d08a312dce49368ba9ed5c2a"}, - {file = "zope.interface-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ee934f023f875ec2cfd2b05a937bd817efcc6c4c3f55c5778cbf78e58362ddc"}, - {file = "zope.interface-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:042f2381118b093714081fd82c98e3b189b68db38ee7d35b63c327c470ef8373"}, - {file = "zope.interface-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:dfbbbf0809a3606046a41f8561c3eada9db811be94138f42d9135a5c47e75f6f"}, - {file = "zope.interface-6.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:424d23b97fa1542d7be882eae0c0fc3d6827784105264a8169a26ce16db260d8"}, - {file = "zope.interface-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e538f2d4a6ffb6edfb303ce70ae7e88629ac6e5581870e66c306d9ad7b564a58"}, - {file = "zope.interface-6.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:12175ca6b4db7621aedd7c30aa7cfa0a2d65ea3a0105393e05482d7a2d367446"}, - {file = "zope.interface-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4c3d7dfd897a588ec27e391edbe3dd320a03684457470415870254e714126b1f"}, - {file = "zope.interface-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:b3f543ae9d3408549a9900720f18c0194ac0fe810cecda2a584fd4dca2eb3bb8"}, - {file = "zope.interface-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d0583b75f2e70ec93f100931660328965bb9ff65ae54695fb3fa0a1255daa6f2"}, - {file = "zope.interface-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:23ac41d52fd15dd8be77e3257bc51bbb82469cf7f5e9a30b75e903e21439d16c"}, - {file = "zope.interface-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99856d6c98a326abbcc2363827e16bd6044f70f2ef42f453c0bd5440c4ce24e5"}, - {file = "zope.interface-6.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1592f68ae11e557b9ff2bc96ac8fc30b187e77c45a3c9cd876e3368c53dc5ba8"}, - {file = "zope.interface-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4407b1435572e3e1610797c9203ad2753666c62883b921318c5403fb7139dec2"}, - {file = "zope.interface-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:5171eb073474a5038321409a630904fd61f12dd1856dd7e9d19cd6fe092cbbc5"}, - {file = "zope.interface-6.0.tar.gz", hash = "sha256:aab584725afd10c710b8f1e6e208dbee2d0ad009f57d674cb9d1b3964037275d"}, + {file = "zope.interface-7.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2bd9e9f366a5df08ebbdc159f8224904c1c5ce63893984abb76954e6fbe4381a"}, + {file = "zope.interface-7.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:661d5df403cd3c5b8699ac480fa7f58047a3253b029db690efa0c3cf209993ef"}, + {file = "zope.interface-7.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91b6c30689cfd87c8f264acb2fc16ad6b3c72caba2aec1bf189314cf1a84ca33"}, + {file = "zope.interface-7.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b6a4924f5bad9fe21d99f66a07da60d75696a136162427951ec3cb223a5570d"}, + {file = "zope.interface-7.1.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80a3c00b35f6170be5454b45abe2719ea65919a2f09e8a6e7b1362312a872cd3"}, + {file = "zope.interface-7.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:b936d61dbe29572fd2cfe13e30b925e5383bed1aba867692670f5a2a2eb7b4e9"}, + {file = "zope.interface-7.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0ac20581fc6cd7c754f6dff0ae06fedb060fa0e9ea6309d8be8b2701d9ea51c4"}, + {file = "zope.interface-7.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:848b6fa92d7c8143646e64124ed46818a0049a24ecc517958c520081fd147685"}, + {file = "zope.interface-7.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec1ef1fdb6f014d5886b97e52b16d0f852364f447d2ab0f0c6027765777b6667"}, + {file = "zope.interface-7.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bcff5c09d0215f42ba64b49205a278e44413d9bf9fa688fd9e42bfe472b5f4f"}, + {file = "zope.interface-7.1.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07add15de0cc7e69917f7d286b64d54125c950aeb43efed7a5ea7172f000fbc1"}, + {file = "zope.interface-7.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:9940d5bc441f887c5f375ec62bcf7e7e495a2d5b1da97de1184a88fb567f06af"}, + {file = "zope.interface-7.1.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:f245d039f72e6f802902375755846f5de1ee1e14c3e8736c078565599bcab621"}, + {file = "zope.interface-7.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6159e767d224d8f18deff634a1d3722e68d27488c357f62ebeb5f3e2f5288b1f"}, + {file = "zope.interface-7.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e956b1fd7f3448dd5e00f273072e73e50dfafcb35e4227e6d5af208075593c9"}, + {file = "zope.interface-7.1.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ff115ef91c0eeac69cd92daeba36a9d8e14daee445b504eeea2b1c0b55821984"}, + {file = "zope.interface-7.1.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bec001798ab62c3fc5447162bf48496ae9fba02edc295a9e10a0b0c639a6452e"}, + {file = "zope.interface-7.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:124149e2d42067b9c6597f4dafdc7a0983d0163868f897b7bb5dc850b14f9a87"}, + {file = "zope.interface-7.1.0-cp313-cp313-macosx_10_9_x86_64.whl", hash = "sha256:9733a9a0f94ef53d7aa64661811b20875b5bc6039034c6e42fb9732170130573"}, + {file = "zope.interface-7.1.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5fcf379b875c610b5a41bc8a891841533f98de0520287d7f85e25386cd10d3e9"}, + {file = "zope.interface-7.1.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d0a45b5af9f72c805ee668d1479480ca85169312211bed6ed18c343e39307d5f"}, + {file = "zope.interface-7.1.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4af4a12b459a273b0b34679a5c3dc5e34c1847c3dd14a628aa0668e19e638ea2"}, + {file = "zope.interface-7.1.0-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a735f82d2e3ed47ca01a20dfc4c779b966b16352650a8036ab3955aad151ed8a"}, + {file = "zope.interface-7.1.0-cp313-cp313-win_amd64.whl", hash = "sha256:5501e772aff595e3c54266bc1bfc5858e8f38974ce413a8f1044aae0f32a83a3"}, + {file = "zope.interface-7.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ec59fe53db7d32abb96c6d4efeed84aab4a7c38c62d7a901a9b20c09dd936e7a"}, + {file = "zope.interface-7.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e53c291debef523b09e1fe3dffe5f35dde164f1c603d77f770b88a1da34b7ed6"}, + {file = "zope.interface-7.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:711eebc77f2092c6a8b304bad0b81a6ce3cf5490b25574e7309fbc07d881e3af"}, + {file = "zope.interface-7.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a00ead2e24c76436e1b457a5132d87f83858330f6c923640b7ef82d668525d1"}, + {file = "zope.interface-7.1.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e28ea0bc4b084fc93a483877653a033062435317082cdc6388dec3438309faf"}, + {file = "zope.interface-7.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:27cfb5205d68b12682b6e55ab8424662d96e8ead19550aad0796b08dd2c9a45e"}, + {file = "zope.interface-7.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9e3e48f3dea21c147e1b10c132016cb79af1159facca9736d231694ef5a740a8"}, + {file = "zope.interface-7.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a99240b1d02dc469f6afbe7da1bf617645e60290c272968f4e53feec18d7dce8"}, + {file = "zope.interface-7.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc8a318162123eddbdf22fcc7b751288ce52e4ad096d3766ff1799244352449d"}, + {file = "zope.interface-7.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b7b25db127db3e6b597c5f74af60309c4ad65acd826f89609662f0dc33a54728"}, + {file = "zope.interface-7.1.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a29ac607e970b5576547f0e3589ec156e04de17af42839eedcf478450687317"}, + {file = "zope.interface-7.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:a14c9decf0eb61e0892631271d500c1e306c7b6901c998c7035e194d9150fdd1"}, + {file = "zope_interface-7.1.0.tar.gz", hash = "sha256:3f005869a1a05e368965adb2075f97f8ee9a26c61898a9e52a9764d93774f237"}, ] [package.dependencies] setuptools = "*" [package.extras] -docs = ["Sphinx", "repoze.sphinx.autointerface"] -test = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] -testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"] +docs = ["Sphinx", "furo", "repoze.sphinx.autointerface"] +test = ["coverage[toml]", "zope.event", "zope.testing"] +testing = ["coverage[toml]", "zope.event", "zope.testing"] [[package]] name = "zope-schema" From a5e16a4ab5a7db41b6dc978da1d833c39874813f Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 22 Oct 2024 05:06:46 -0500 Subject: [PATCH 025/147] Sliding Sync: Reset `forgotten` status when membership changes (like rejoining a room) (#17835) Reset `sliding_sync_membership_snapshots` -> `forgotten` status when membership changes (like rejoining a room). Fix https://github.com/element-hq/synapse/issues/17781 ### What was the problem before? Previously, if someone used `/forget` on one of their rooms, it would update `sliding_sync_membership_snapshots` as expected but when someone rejoined the room (or had any membership change), the upsert didn't overwrite and reset the `forgotten` status so it remained `forgotten` and invisible down the Sliding Sync endpoint. --- changelog.d/17835.bugfix | 1 + synapse/storage/databases/main/events.py | 11 +- .../databases/main/events_bg_updates.py | 118 ++++++++++++++++++ synapse/storage/databases/main/roommember.py | 1 + synapse/storage/schema/__init__.py | 2 + ..._membership_snapshots_forgotten_column.sql | 21 ++++ synapse/types/storage/__init__.py | 3 + .../client/sliding_sync/test_sliding_sync.py | 115 ++++++++++++++++- tests/rest/client/test_rooms.py | 62 +++++++++ tests/storage/test_sliding_sync_tables.py | 103 +++++++++++++++ 10 files changed, 433 insertions(+), 4 deletions(-) create mode 100644 changelog.d/17835.bugfix create mode 100644 synapse/storage/schema/main/delta/88/02_fix_sliding_sync_membership_snapshots_forgotten_column.sql diff --git a/changelog.d/17835.bugfix b/changelog.d/17835.bugfix new file mode 100644 index 000000000000..b970598c3546 --- /dev/null +++ b/changelog.d/17835.bugfix @@ -0,0 +1 @@ +Fix a bug in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync that would cause rooms to stay forgotten and hidden even after rejoining. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index c0b7d8107d76..e5eae9cee997 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1863,10 +1863,10 @@ def _update_current_state_txn( txn.execute_batch( f""" INSERT INTO sliding_sync_membership_snapshots - (room_id, user_id, sender, membership_event_id, membership, event_stream_ordering, event_instance_name + (room_id, user_id, sender, membership_event_id, membership, forgotten, event_stream_ordering, event_instance_name {("," + ", ".join(sliding_sync_snapshot_keys)) if sliding_sync_snapshot_keys else ""}) VALUES ( - ?, ?, ?, ?, ?, + ?, ?, ?, ?, ?, ?, (SELECT stream_ordering FROM events WHERE event_id = ?), (SELECT COALESCE(instance_name, 'master') FROM events WHERE event_id = ?) {("," + ", ".join("?" for _ in sliding_sync_snapshot_values)) if sliding_sync_snapshot_values else ""} @@ -1876,6 +1876,7 @@ def _update_current_state_txn( sender = EXCLUDED.sender, membership_event_id = EXCLUDED.membership_event_id, membership = EXCLUDED.membership, + forgotten = EXCLUDED.forgotten, event_stream_ordering = EXCLUDED.event_stream_ordering {("," + ", ".join(f"{key} = EXCLUDED.{key}" for key in sliding_sync_snapshot_keys)) if sliding_sync_snapshot_keys else ""} """, @@ -1886,6 +1887,9 @@ def _update_current_state_txn( membership_info.sender, membership_info.membership_event_id, membership_info.membership, + # Since this is a new membership, it isn't forgotten anymore (which + # matches how Synapse currently thinks about the forgotten status) + 0, # XXX: We do not use `membership_info.membership_event_stream_ordering` here # because it is an unreliable value. See XXX note above. membership_info.membership_event_id, @@ -2901,6 +2905,9 @@ def _store_room_members_txn( "sender": event.sender, "membership_event_id": event.event_id, "membership": event.membership, + # Since this is a new membership, it isn't forgotten anymore (which + # matches how Synapse currently thinks about the forgotten status) + "forgotten": 0, "event_stream_ordering": event.internal_metadata.stream_ordering, "event_instance_name": event.internal_metadata.instance_name, } diff --git a/synapse/storage/databases/main/events_bg_updates.py b/synapse/storage/databases/main/events_bg_updates.py index a8723f94bc86..4b0bdd79c679 100644 --- a/synapse/storage/databases/main/events_bg_updates.py +++ b/synapse/storage/databases/main/events_bg_updates.py @@ -304,6 +304,12 @@ def __init__( _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, self._sliding_sync_membership_snapshots_bg_update, ) + # Add a background update to fix data integrity issue in the + # `sliding_sync_membership_snapshots` -> `forgotten` column + self.db_pool.updates.register_background_update_handler( + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE, + self._sliding_sync_membership_snapshots_fix_forgotten_column_bg_update, + ) # We want this to run on the main database at startup before we start processing # events. @@ -2429,6 +2435,118 @@ def _fill_table_txn(txn: LoggingTransaction) -> None: return len(memberships_to_update_rows) + async def _sliding_sync_membership_snapshots_fix_forgotten_column_bg_update( + self, progress: JsonDict, batch_size: int + ) -> int: + """ + Background update to update the `sliding_sync_membership_snapshots` -> + `forgotten` column to be in sync with the `room_memberships` table. + + Because of previously flawed code (now fixed); any room that someone has + forgotten and subsequently re-joined or had any new membership on, we need to go + and update the column to match the `room_memberships` table as it has fallen out + of sync. + """ + last_event_stream_ordering = progress.get( + "last_event_stream_ordering", -(1 << 31) + ) + + def _txn( + txn: LoggingTransaction, + ) -> int: + """ + Returns: + The number of rows updated. + """ + + # To simplify things, we can just recheck any row in + # `sliding_sync_membership_snapshots` with `forgotten=1` + txn.execute( + """ + SELECT + s.room_id, + s.user_id, + s.membership_event_id, + s.event_stream_ordering, + m.forgotten + FROM sliding_sync_membership_snapshots AS s + INNER JOIN room_memberships AS m ON (s.membership_event_id = m.event_id) + WHERE s.event_stream_ordering > ? + AND s.forgotten = 1 + ORDER BY s.event_stream_ordering ASC + LIMIT ? + """, + (last_event_stream_ordering, batch_size), + ) + + memberships_to_update_rows = cast( + List[Tuple[str, str, str, int, int]], + txn.fetchall(), + ) + if not memberships_to_update_rows: + return 0 + + # Assemble the values to update + # + # (room_id, user_id) + key_values: List[Tuple[str, str]] = [] + # (forgotten,) + value_values: List[Tuple[int]] = [] + for ( + room_id, + user_id, + _membership_event_id, + _event_stream_ordering, + forgotten, + ) in memberships_to_update_rows: + key_values.append( + ( + room_id, + user_id, + ) + ) + value_values.append((forgotten,)) + + # Update all of the rows in one go + self.db_pool.simple_update_many_txn( + txn, + table="sliding_sync_membership_snapshots", + key_names=("room_id", "user_id"), + key_values=key_values, + value_names=("forgotten",), + value_values=value_values, + ) + + # Update the progress + ( + _room_id, + _user_id, + _membership_event_id, + event_stream_ordering, + _forgotten, + ) = memberships_to_update_rows[-1] + self.db_pool.updates._background_update_progress_txn( + txn, + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE, + { + "last_event_stream_ordering": event_stream_ordering, + }, + ) + + return len(memberships_to_update_rows) + + num_rows = await self.db_pool.runInteraction( + "_sliding_sync_membership_snapshots_fix_forgotten_column_bg_update", + _txn, + ) + + if not num_rows: + await self.db_pool.updates._end_background_update( + _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE + ) + + return num_rows + def _resolve_stale_data_in_sliding_sync_tables( txn: LoggingTransaction, diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index c77e009d0301..4249cf77e559 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -1375,6 +1375,7 @@ def f(txn: LoggingTransaction) -> None: keyvalues={"user_id": user_id, "room_id": room_id}, updatevalues={"forgotten": 1}, ) + # Handle updating the `sliding_sync_membership_snapshots` table self.db_pool.simple_update_txn( txn, table="sliding_sync_membership_snapshots", diff --git a/synapse/storage/schema/__init__.py b/synapse/storage/schema/__init__.py index f171f4568a01..934e1cccedbf 100644 --- a/synapse/storage/schema/__init__.py +++ b/synapse/storage/schema/__init__.py @@ -153,6 +153,8 @@ Changes in SCHEMA_VERSION = 88 - MSC4140: Add `delayed_events` table that keeps track of events that are to be posted in response to a resettable timeout or an on-demand action. + - Add background update to fix data integrity issue in the + `sliding_sync_membership_snapshots` -> `forgotten` column """ diff --git a/synapse/storage/schema/main/delta/88/02_fix_sliding_sync_membership_snapshots_forgotten_column.sql b/synapse/storage/schema/main/delta/88/02_fix_sliding_sync_membership_snapshots_forgotten_column.sql new file mode 100644 index 000000000000..4de46af2fc05 --- /dev/null +++ b/synapse/storage/schema/main/delta/88/02_fix_sliding_sync_membership_snapshots_forgotten_column.sql @@ -0,0 +1,21 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + +-- Add a background update to update the `sliding_sync_membership_snapshots` -> +-- `forgotten` column to be in sync with the `room_memberships` table. +-- +-- For any room that someone has forgotten and subsequently re-joined or had any new +-- membership on, we need to go and update the column to match the `room_memberships` +-- table as it has fallen out of sync. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8802, 'sliding_sync_membership_snapshots_fix_forgotten_column_bg_update', '{}'); diff --git a/synapse/types/storage/__init__.py b/synapse/types/storage/__init__.py index fae5449bcc35..b5fa20a41a59 100644 --- a/synapse/types/storage/__init__.py +++ b/synapse/types/storage/__init__.py @@ -45,3 +45,6 @@ class _BackgroundUpdates: SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE = ( "sliding_sync_membership_snapshots_bg_update" ) + SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE = ( + "sliding_sync_membership_snapshots_fix_forgotten_column_bg_update" + ) diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py index ea3ca57957d1..578cb384cd7f 100644 --- a/tests/rest/client/sliding_sync/test_sliding_sync.py +++ b/tests/rest/client/sliding_sync/test_sliding_sync.py @@ -240,6 +240,7 @@ def _create_remote_invite_room_for_user( self, invitee_user_id: str, unsigned_invite_room_state: Optional[List[StrippedStateEvent]], + invite_room_id: Optional[str] = None, ) -> str: """ Create a fake invite for a remote room and persist it. @@ -252,19 +253,23 @@ def _create_remote_invite_room_for_user( invitee_user_id: The person being invited unsigned_invite_room_state: List of stripped state events to assist the receiver in identifying the room. + invite_room_id: Optional remote room ID to be invited to. When unset, we + will generate one. Returns: The room ID of the remote invite room """ store = self.hs.get_datastores().main - invite_room_id = f"!test_room{self._remote_invite_count}:remote_server" + if invite_room_id is None: + invite_room_id = f"!test_room{self._remote_invite_count}:remote_server" invite_event_dict = { "room_id": invite_room_id, "sender": "@inviter:remote_server", "state_key": invitee_user_id, - "depth": 1, + # Just keep advancing the depth + "depth": self._remote_invite_count, "origin_server_ts": 1, "type": EventTypes.Member, "content": {"membership": Membership.INVITE}, @@ -679,6 +684,112 @@ def test_forgotten_up_to_date(self) -> None: exact=True, ) + def test_rejoin_forgotten_room(self) -> None: + """ + Make sure we can see a forgotten room again if we rejoin (or any new membership + like an invite) (no longer forgotten) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) + # User1 joins the room + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # We should see the room (like normal) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + # Leave and forget the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + # User1 forgets the room + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{room_id}/forget", + content={}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Re-join the room + self.helper.join(room_id, user1_id, tok=user1_tok) + + # We should see the room again after re-joining + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + def test_invited_to_forgotten_remote_room(self) -> None: + """ + Make sure we can see a forgotten room again if we are invited again + (remote/federated out-of-band memberships) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + + # Create a remote room invite (out-of-band membership) + room_id = self._create_remote_invite_room_for_user(user1_id, None) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 99]], + "required_state": [], + "timeline_limit": 0, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + # We should see the room (like normal) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + + # Leave and forget the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + # User1 forgets the room + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{room_id}/forget", + content={}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Get invited to the room again + # self.helper.join(room_id, user1_id, tok=user1_tok) + self._create_remote_invite_room_for_user(user1_id, None, invite_room_id=room_id) + + # We should see the room again after re-joining + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + self.assertIncludes( + set(response_body["lists"]["foo-list"]["ops"][0]["room_ids"]), + {room_id}, + exact=True, + ) + def test_ignored_user_invites_initial_sync(self) -> None: """ Make sure we ignore invites if they are from one of the `m.ignored_user_list` on diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 2ecd37ca1a0e..07600418ed86 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -2894,6 +2894,68 @@ def _check_for_reason(self, reason: str) -> None: self.assertEqual(event_content.get("reason"), reason, channel.result) +class RoomForgottenTestCase(unittest.HomeserverTestCase): + """ + Test forget/forgotten rooms + """ + + servlets = [ + synapse.rest.admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.store = hs.get_datastores().main + + def test_room_not_forgotten_after_unban(self) -> None: + """ + Test what happens when someone is banned from a room, they forget the room, and + some time later are unbanned. + + Currently, when they are unbanned, the room isn't forgotten anymore which may or + may not be expected. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # User1 is banned and forgets the room + self.helper.ban(room_id, src=user2_id, targ=user1_id, tok=user2_tok) + # User1 forgets the room + self.get_success(self.store.forget(user1_id, room_id)) + + # The room should show up as forgotten + forgotten_room_ids = self.get_success( + self.store.get_forgotten_rooms_for_user(user1_id) + ) + self.assertIncludes(forgotten_room_ids, {room_id}, exact=True) + + # Unban user1 + self.helper.change_membership( + room=room_id, + src=user2_id, + targ=user1_id, + membership=Membership.LEAVE, + tok=user2_tok, + ) + + # Room is no longer forgotten because it's a new membership + # + # XXX: Is this how we actually want it to behave? It seems like ideally, the + # room forgotten status should only be reset when the user decides to join again + # (or is invited/knocks). This way the room remains forgotten for any ban/leave + # transitions. + forgotten_room_ids = self.get_success( + self.store.get_forgotten_rooms_for_user(user1_id) + ) + self.assertIncludes(forgotten_room_ids, set(), exact=True) + + class LabelsTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets_for_client_rest_resource, diff --git a/tests/storage/test_sliding_sync_tables.py b/tests/storage/test_sliding_sync_tables.py index 35917505a429..53212f7c4520 100644 --- a/tests/storage/test_sliding_sync_tables.py +++ b/tests/storage/test_sliding_sync_tables.py @@ -5014,3 +5014,106 @@ def test_membership_snapshots_background_update_catch_up_no_membership( }, exact=True, ) + + +class SlidingSyncMembershipSnapshotsTableFixForgottenColumnBackgroundUpdatesTestCase( + SlidingSyncTablesTestCaseBase +): + """ + Test the background updates that fixes `sliding_sync_membership_snapshots` -> + `forgotten` column. + """ + + def test_membership_snapshots_fix_forgotten_column_background_update(self) -> None: + """ + Test that the background update, updates the `sliding_sync_membership_snapshots` + -> `forgotten` column to be in sync with the `room_memberships` table. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) + # User1 joins the room + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Leave and forget the room + self.helper.leave(room_id, user1_id, tok=user1_tok) + # User1 forgets the room + channel = self.make_request( + "POST", + f"/_matrix/client/r0/rooms/{room_id}/forget", + content={}, + access_token=user1_tok, + ) + self.assertEqual(channel.code, 200, channel.result) + + # Re-join the room + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Reset `sliding_sync_membership_snapshots` table as if the `forgotten` column + # got out of sync from the `room_memberships` table from the previous flawed + # code. + self.get_success( + self.store.db_pool.simple_update_one( + table="sliding_sync_membership_snapshots", + keyvalues={"room_id": room_id, "user_id": user1_id}, + updatevalues={"forgotten": 1}, + desc="sliding_sync_membership_snapshots.test_membership_snapshots_fix_forgotten_column_background_update", + ) + ) + + # Insert and run the background update. + self.get_success( + self.store.db_pool.simple_insert( + "background_updates", + { + "update_name": _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_FIX_FORGOTTEN_COLUMN_BG_UPDATE, + "progress_json": "{}", + }, + ) + ) + self.store.db_pool.updates._all_done = False + self.wait_for_background_updates() + + # Make sure the table is populated + + sliding_sync_membership_snapshots_results = ( + self._get_sliding_sync_membership_snapshots() + ) + self.assertIncludes( + set(sliding_sync_membership_snapshots_results.keys()), + { + (room_id, user1_id), + (room_id, user2_id), + }, + exact=True, + ) + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id) + ) + # Holds the info according to the current state when the user joined. + # + # We only care about checking on user1 as that's what we reset and expect to be + # correct now + self.assertEqual( + sliding_sync_membership_snapshots_results.get((room_id, user1_id)), + _SlidingSyncMembershipSnapshotResult( + room_id=room_id, + user_id=user1_id, + sender=user1_id, + membership_event_id=state_map[(EventTypes.Member, user1_id)].event_id, + membership=Membership.JOIN, + event_stream_ordering=state_map[ + (EventTypes.Member, user1_id) + ].internal_metadata.stream_ordering, + has_known_state=True, + room_type=None, + room_name=None, + is_encrypted=False, + tombstone_successor_room_id=None, + # We should see the room as no longer forgotten + forgotten=False, + ), + ) From 4b94a056bd27a05f278cca6a96511b3ea28d57f8 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 22 Oct 2024 11:56:08 +0100 Subject: [PATCH 026/147] 1.118.0rc1 --- CHANGES.md | 57 +++++++++++++++++++++++++++++++++++++++ changelog.d/17627.doc | 1 - changelog.d/17708.feature | 1 - changelog.d/17718.misc | 1 - changelog.d/17736.bugfix | 1 - changelog.d/17752.misc | 1 - changelog.d/17783.feature | 1 - changelog.d/17785.bugfix | 1 - changelog.d/17802.doc | 1 - changelog.d/17803.misc | 1 - changelog.d/17805.bugfix | 1 - changelog.d/17824.misc | 1 - changelog.d/17825.doc | 1 - changelog.d/17826.misc | 1 - changelog.d/17835.bugfix | 1 - changelog.d/17842.misc | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- 18 files changed, 64 insertions(+), 16 deletions(-) delete mode 100644 changelog.d/17627.doc delete mode 100644 changelog.d/17708.feature delete mode 100644 changelog.d/17718.misc delete mode 100644 changelog.d/17736.bugfix delete mode 100644 changelog.d/17752.misc delete mode 100644 changelog.d/17783.feature delete mode 100644 changelog.d/17785.bugfix delete mode 100644 changelog.d/17802.doc delete mode 100644 changelog.d/17803.misc delete mode 100644 changelog.d/17805.bugfix delete mode 100644 changelog.d/17824.misc delete mode 100644 changelog.d/17825.doc delete mode 100644 changelog.d/17826.misc delete mode 100644 changelog.d/17835.bugfix delete mode 100644 changelog.d/17842.misc diff --git a/CHANGES.md b/CHANGES.md index ba45fe015640..1e3b5370fc0b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,60 @@ +# Synapse 1.118.0rc1 (2024-10-22) + +### Notice of dropping Python 3.8 support in next release + +Python 3.8 is now [end-of-life](https://devguide.python.org/versions/). As per our [Deprecation Policy for Platform Dependencies](https://element-hq.github.io/synapse/latest/deprecation_policy.html#policy), Synpase will be dropping support for Python 3.8 in the next release; Synapse 1.119.0. + +1.118.x will be the final Synapse release to support Python 3.8. Python upgrade to a more recent Python version if necessary. + +### Python 3.13 and PostgreSQL 17 support + +On the other end of the spectrum, Synapse 1.118.0 is the first release to support [Python 3.13](https://www.python.org/downloads/release/python-3130/)! + +[PostgreSQL 17](https://www.postgresql.org/about/news/postgresql-17-released-2936/) is also supported as of this release. + +### Features + +- Added the `display_name_claim` option to the JWT configuration. This option allows specifying the claim key that contains the user's display name in the JWT payload. ([\#17708](https://github.com/element-hq/synapse/issues/17708)) +- Implement [MSC4210](https://github.com/matrix-org/matrix-spec-proposals/pull/4210): Remove legacy mentions. Contributed by @tulir @ Beeper. ([\#17783](https://github.com/element-hq/synapse/issues/17783)) + +### Bugfixes + +- Fix saving of PNG thumbnails, when the original image is in the CMYK color space. ([\#17736](https://github.com/element-hq/synapse/issues/17736)) +- Fix bug with sliding sync where the server would not return state that was added to the `required_state` config. ([\#17785](https://github.com/element-hq/synapse/issues/17785), [\#17805](https://github.com/element-hq/synapse/issues/17805)) +- Fix a bug in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync that would cause rooms to stay forgotten and hidden even after rejoining. ([\#17835](https://github.com/element-hq/synapse/issues/17835)) + +### Improved Documentation + +- Clarify when the `user_may_invite` and `user_may_send_3pid_invite` module callbacks are called. ([\#17627](https://github.com/element-hq/synapse/issues/17627)) +- Correct documentation to refer to the `--config-path` argument instead of `--config-file`. ([\#17802](https://github.com/element-hq/synapse/issues/17802)) +- Fix typo in `target_cache_memory_usage` docs. ([\#17825](https://github.com/element-hq/synapse/issues/17825)) + +### Internal Changes + +- Slight optimization when fetching state/events for Sliding Sync. ([\#17718](https://github.com/element-hq/synapse/issues/17718)) +- Add Python 3.13 and Postgres 17 to the test matrix. ([\#17752](https://github.com/element-hq/synapse/issues/17752)) +- Test github token before running release script steps. ([\#17803](https://github.com/element-hq/synapse/issues/17803)) +- Build debian packages for new Ubuntu versions, and stop building for no longer supported versions. ([\#17824](https://github.com/element-hq/synapse/issues/17824)) +- Enable the `.org.matrix.msc4028.encrypted_event` push rule by default in accordance with [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028). Note that the corresponding experimental feature must still be switched on for this push rule to have any effect. ([\#17826](https://github.com/element-hq/synapse/issues/17826)) +- Fix some typing issues uncovered by upgrading mypy to 1.11.x. ([\#17842](https://github.com/element-hq/synapse/issues/17842)) + + + +### Updates to locked dependencies + +* Bump mypy from 1.10.1 to 1.11.2. ([\#17842](https://github.com/element-hq/synapse/issues/17842)) +* Bump mypy-zope from 1.0.5 to 1.0.7. ([\#17827](https://github.com/element-hq/synapse/issues/17827)) +* Bump phonenumbers from 8.13.46 to 8.13.47. ([\#17797](https://github.com/element-hq/synapse/issues/17797)) +* Bump psycopg2 from 2.9.9 to 2.9.10. ([\#17843](https://github.com/element-hq/synapse/issues/17843)) +* Bump ruff from 0.6.8 to 0.6.9. ([\#17794](https://github.com/element-hq/synapse/issues/17794)) +* Bump sentry-sdk from 2.14.0 to 2.15.0. ([\#17795](https://github.com/element-hq/synapse/issues/17795)) +* Bump sentry-sdk from 2.15.0 to 2.16.0. ([\#17829](https://github.com/element-hq/synapse/issues/17829)) +* Bump sentry-sdk from 2.16.0 to 2.17.0. ([\#17844](https://github.com/element-hq/synapse/issues/17844)) +* Bump sigstore/cosign-installer from 3.6.0 to 3.7.0. ([\#17798](https://github.com/element-hq/synapse/issues/17798)) +* Bump tomli from 2.0.1 to 2.0.2. ([\#17796](https://github.com/element-hq/synapse/issues/17796)) +* Bump types-requests from 2.32.0.20240914 to 2.32.0.20241016. ([\#17841](https://github.com/element-hq/synapse/issues/17841)) +* Bump types-setuptools from 75.1.0.20240917 to 75.1.0.20241014. ([\#17828](https://github.com/element-hq/synapse/issues/17828)) + # Synapse 1.117.0 (2024-10-15) No significant changes since 1.117.0rc1. diff --git a/changelog.d/17627.doc b/changelog.d/17627.doc deleted file mode 100644 index 487a0aea0dac..000000000000 --- a/changelog.d/17627.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify when the `user_may_invite` and `user_may_send_3pid_invite` module callbacks are called. \ No newline at end of file diff --git a/changelog.d/17708.feature b/changelog.d/17708.feature deleted file mode 100644 index 90ec810f50da..000000000000 --- a/changelog.d/17708.feature +++ /dev/null @@ -1 +0,0 @@ -Added the `display_name_claim` option to the JWT configuration. This option allows specifying the claim key that contains the user's display name in the JWT payload. \ No newline at end of file diff --git a/changelog.d/17718.misc b/changelog.d/17718.misc deleted file mode 100644 index ea73a03f537c..000000000000 --- a/changelog.d/17718.misc +++ /dev/null @@ -1 +0,0 @@ -Slight optimization when fetching state/events for Sliding Sync. diff --git a/changelog.d/17736.bugfix b/changelog.d/17736.bugfix deleted file mode 100644 index 0d3fd06962ce..000000000000 --- a/changelog.d/17736.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix saving of PNG thumbnails, when the original image is in the CMYK color space. diff --git a/changelog.d/17752.misc b/changelog.d/17752.misc deleted file mode 100644 index f9f01a424203..000000000000 --- a/changelog.d/17752.misc +++ /dev/null @@ -1 +0,0 @@ -Add Python 3.13 and Postgres 17 to the test matrix. diff --git a/changelog.d/17783.feature b/changelog.d/17783.feature deleted file mode 100644 index ce8c2164185c..000000000000 --- a/changelog.d/17783.feature +++ /dev/null @@ -1 +0,0 @@ -Implement [MSC4210](https://github.com/matrix-org/matrix-spec-proposals/pull/4210): Remove legacy mentions. Contributed by @tulir @ Beeper. diff --git a/changelog.d/17785.bugfix b/changelog.d/17785.bugfix deleted file mode 100644 index df2898f54e36..000000000000 --- a/changelog.d/17785.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug with sliding sync where the server would not return state that was added to the `required_state` config. diff --git a/changelog.d/17802.doc b/changelog.d/17802.doc deleted file mode 100644 index 72e653d3c4a4..000000000000 --- a/changelog.d/17802.doc +++ /dev/null @@ -1 +0,0 @@ -Correct documentation to refer to the `--config-path` argument instead of `--config-file`. diff --git a/changelog.d/17803.misc b/changelog.d/17803.misc deleted file mode 100644 index a267df8b83cd..000000000000 --- a/changelog.d/17803.misc +++ /dev/null @@ -1 +0,0 @@ -Test github token before running release script steps. diff --git a/changelog.d/17805.bugfix b/changelog.d/17805.bugfix deleted file mode 100644 index df2898f54e36..000000000000 --- a/changelog.d/17805.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug with sliding sync where the server would not return state that was added to the `required_state` config. diff --git a/changelog.d/17824.misc b/changelog.d/17824.misc deleted file mode 100644 index 22574f00ec39..000000000000 --- a/changelog.d/17824.misc +++ /dev/null @@ -1 +0,0 @@ -Build debian packages for new Ubuntu versions, and stop building for no longer supported versions. diff --git a/changelog.d/17825.doc b/changelog.d/17825.doc deleted file mode 100644 index ee4366741758..000000000000 --- a/changelog.d/17825.doc +++ /dev/null @@ -1 +0,0 @@ -Fix typo in `target_cache_memory_usage` docs. \ No newline at end of file diff --git a/changelog.d/17826.misc b/changelog.d/17826.misc deleted file mode 100644 index 9148c96a0d4e..000000000000 --- a/changelog.d/17826.misc +++ /dev/null @@ -1 +0,0 @@ -Enable the `.org.matrix.msc4028.encrypted_event` push rule by default in accordance with [MSC4028](https://github.com/matrix-org/matrix-spec-proposals/pull/4028). Note that the corresponding experimental feature must still be switched on for this push rule to have any effect. \ No newline at end of file diff --git a/changelog.d/17835.bugfix b/changelog.d/17835.bugfix deleted file mode 100644 index b970598c3546..000000000000 --- a/changelog.d/17835.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug in [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync that would cause rooms to stay forgotten and hidden even after rejoining. diff --git a/changelog.d/17842.misc b/changelog.d/17842.misc deleted file mode 100644 index 78af706c31b5..000000000000 --- a/changelog.d/17842.misc +++ /dev/null @@ -1 +0,0 @@ -Fix some typing issues uncovered by upgrading mypy to 1.11.x. diff --git a/debian/changelog b/debian/changelog index 1995fbf6f63e..9fb360c33022 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.118.0~rc1) stable; urgency=medium + + * New Synapse release 1.118.0rc1. + + -- Synapse Packaging team Tue, 22 Oct 2024 11:48:14 +0100 + matrix-synapse-py3 (1.117.0) stable; urgency=medium * New Synapse release 1.117.0. diff --git a/pyproject.toml b/pyproject.toml index 658771a89afd..6cdd6850b11d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.117.0" +version = "1.118.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 46c885f5b5ce85f47a8d8a259f66520b6f87d7ae Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 22 Oct 2024 12:00:40 +0100 Subject: [PATCH 027/147] fix spelling in changelog --- CHANGES.md | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 1e3b5370fc0b..38338f1fdbee 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,16 +1,14 @@ # Synapse 1.118.0rc1 (2024-10-22) -### Notice of dropping Python 3.8 support in next release +### Python 3.8 support will be dropped in the next release -Python 3.8 is now [end-of-life](https://devguide.python.org/versions/). As per our [Deprecation Policy for Platform Dependencies](https://element-hq.github.io/synapse/latest/deprecation_policy.html#policy), Synpase will be dropping support for Python 3.8 in the next release; Synapse 1.119.0. +Python 3.8 is now [end-of-life](https://devguide.python.org/versions/). As per our [Deprecation Policy for Platform Dependencies](https://element-hq.github.io/synapse/latest/deprecation_policy.html#policy), Synapse will be dropping support for Python 3.8 in the next release; Synapse 1.119.0. -1.118.x will be the final Synapse release to support Python 3.8. Python upgrade to a more recent Python version if necessary. +Synapse 1.118.x will be the final release to support Python 3.8. If you are running Synapse with Python 3.8, please upgrade before the 1.119.0 release, due in less than one month. ### Python 3.13 and PostgreSQL 17 support -On the other end of the spectrum, Synapse 1.118.0 is the first release to support [Python 3.13](https://www.python.org/downloads/release/python-3130/)! - -[PostgreSQL 17](https://www.postgresql.org/about/news/postgresql-17-released-2936/) is also supported as of this release. +On the other end of the spectrum, Synapse 1.118.0 is the first release to support [Python 3.13](https://www.python.org/downloads/release/python-3130/)! [PostgreSQL 17](https://www.postgresql.org/about/news/postgresql-17-released-2936/) is also supported as of this release. ### Features From 57efc8c03eb27048c721f389f0b56e7bea145be3 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 23 Oct 2024 18:26:01 +0100 Subject: [PATCH 028/147] Add media tests for a CMYK JPEG image (#17786) --- changelog.d/17786.misc | 1 + tests/media/test_media_storage.py | 64 ++++++++++++++++++++++++++++++- tests/rest/client/test_media.py | 4 +- tests/test_utils/__init__.py | 19 +++++++++ 4 files changed, 86 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17786.misc diff --git a/changelog.d/17786.misc b/changelog.d/17786.misc new file mode 100644 index 000000000000..59eb3f4dbdb4 --- /dev/null +++ b/changelog.d/17786.misc @@ -0,0 +1 @@ +Add a test for downloading and thumbnailing a CMYK JPEG. \ No newline at end of file diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index e50ff5fa7805..034d9ece0b51 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -60,7 +60,7 @@ from tests import unittest from tests.server import FakeChannel -from tests.test_utils import SMALL_PNG +from tests.test_utils import SMALL_CMYK_JPEG, SMALL_PNG from tests.unittest import override_config from tests.utils import default_config @@ -187,6 +187,68 @@ class TestImage: # different versions of Pillow. ) +small_cmyk_jpeg = TestImage( + SMALL_CMYK_JPEG, + b"image/jpeg", + b".jpeg", + # These values were sourced simply by seeing at what the tests produced at + # the time of writing. If this changes, the tests will fail. + unhexlify( + b"ffd8ffe000104a46494600010100000100010000ffdb00430006" + b"040506050406060506070706080a100a0a09090a140e0f0c1017" + b"141818171416161a1d251f1a1b231c1616202c20232627292a29" + b"191f2d302d283025282928ffdb0043010707070a080a130a0a13" + b"281a161a28282828282828282828282828282828282828282828" + b"2828282828282828282828282828282828282828282828282828" + b"2828ffc00011080020002003012200021101031101ffc4001f00" + b"0001050101010101010000000000000000010203040506070809" + b"0a0bffc400b5100002010303020403050504040000017d010203" + b"00041105122131410613516107227114328191a1082342b1c115" + b"52d1f02433627282090a161718191a25262728292a3435363738" + b"393a434445464748494a535455565758595a636465666768696a" + b"737475767778797a838485868788898a92939495969798999aa2" + b"a3a4a5a6a7a8a9aab2b3b4b5b6b7b8b9bac2c3c4c5c6c7c8c9ca" + b"d2d3d4d5d6d7d8d9dae1e2e3e4e5e6e7e8e9eaf1f2f3f4f5f6f7" + b"f8f9faffc4001f01000301010101010101010100000000000001" + b"02030405060708090a0bffc400b5110002010204040304070504" + b"0400010277000102031104052131061241510761711322328108" + b"144291a1b1c109233352f0156272d10a162434e125f11718191a" + b"262728292a35363738393a434445464748494a53545556575859" + b"5a636465666768696a737475767778797a82838485868788898a" + b"92939495969798999aa2a3a4a5a6a7a8a9aab2b3b4b5b6b7b8b9" + b"bac2c3c4c5c6c7c8c9cad2d3d4d5d6d7d8d9dae2e3e4e5e6e7e8" + b"e9eaf2f3f4f5f6f7f8f9faffda000c03010002110311003f00fa" + b"a68a28a0028a28a0028a28a0028a28a00fffd9" + ), + unhexlify( + b"ffd8ffe000104a46494600010100000100010000ffdb00430006" + b"040506050406060506070706080a100a0a09090a140e0f0c1017" + b"141818171416161a1d251f1a1b231c1616202c20232627292a29" + b"191f2d302d283025282928ffdb0043010707070a080a130a0a13" + b"281a161a28282828282828282828282828282828282828282828" + b"2828282828282828282828282828282828282828282828282828" + b"2828ffc00011080001000103012200021101031101ffc4001f00" + b"0001050101010101010000000000000000010203040506070809" + b"0a0bffc400b5100002010303020403050504040000017d010203" + b"00041105122131410613516107227114328191a1082342b1c115" + b"52d1f02433627282090a161718191a25262728292a3435363738" + b"393a434445464748494a535455565758595a636465666768696a" + b"737475767778797a838485868788898a92939495969798999aa2" + b"a3a4a5a6a7a8a9aab2b3b4b5b6b7b8b9bac2c3c4c5c6c7c8c9ca" + b"d2d3d4d5d6d7d8d9dae1e2e3e4e5e6e7e8e9eaf1f2f3f4f5f6f7" + b"f8f9faffc4001f01000301010101010101010100000000000001" + b"02030405060708090a0bffc400b5110002010204040304070504" + b"0400010277000102031104052131061241510761711322328108" + b"144291a1b1c109233352f0156272d10a162434e125f11718191a" + b"262728292a35363738393a434445464748494a53545556575859" + b"5a636465666768696a737475767778797a82838485868788898a" + b"92939495969798999aa2a3a4a5a6a7a8a9aab2b3b4b5b6b7b8b9" + b"bac2c3c4c5c6c7c8c9cad2d3d4d5d6d7d8d9dae2e3e4e5e6e7e8" + b"e9eaf2f3f4f5f6f7f8f9faffda000c03010002110311003f00fa" + b"a68a28a00fffd9" + ), +) + small_lossless_webp = TestImage( unhexlify( b"524946461a000000574542505650384c0d0000002f0000001007" b"1011118888fe0700" diff --git a/tests/rest/client/test_media.py b/tests/rest/client/test_media.py index 42014e257e48..4060525efe32 100644 --- a/tests/rest/client/test_media.py +++ b/tests/rest/client/test_media.py @@ -66,6 +66,7 @@ SVG, TestImage, empty_file, + small_cmyk_jpeg, small_lossless_webp, small_png, small_png_with_transparency, @@ -1916,6 +1917,7 @@ def test_file_download(self) -> None: test_images = [ small_png, small_png_with_transparency, + small_cmyk_jpeg, small_lossless_webp, empty_file, SVG, @@ -2400,7 +2402,7 @@ def _test_thumbnail( if expected_body is not None: self.assertEqual( - channel.result["body"], expected_body, channel.result["body"] + channel.result["body"], expected_body, channel.result["body"].hex() ) else: # ensure that the result is at least some valid image diff --git a/tests/test_utils/__init__.py b/tests/test_utils/__init__.py index 4d7adf720497..e3611852b79b 100644 --- a/tests/test_utils/__init__.py +++ b/tests/test_utils/__init__.py @@ -23,6 +23,7 @@ Utilities for running the unit tests """ +import base64 import json import sys import warnings @@ -138,3 +139,21 @@ def json(cls, *, code: int = 200, payload: JsonSerializable) -> "FakeResponse": b"0000001f15c4890000000a49444154789c63000100000500010d" b"0a2db40000000049454e44ae426082" ) + +# A small CMYK-encoded JPEG image used in some tests. +# +# Generated with: +# img = PIL.Image.new('CMYK', (1, 1), (0, 0, 0, 0)) +# img.save('minimal_cmyk.jpg', 'JPEG') +# +# Resolution: 1x1, MIME type: image/jpeg, Extension: jpeg, Size: 4 KiB +SMALL_CMYK_JPEG = base64.b64decode(""" +/9j/7gAOQWRvYmUAZAAAAAAA/9sAQwAIBgYHBgUIBwcHCQkICgwUDQwLCww +ZEhMPFB0aHx4dGhwcICQuJyAiLCMcHCg3KSwwMTQ0NB8nOT04MjwuMzQy/8 +AAFAgAAQABBEMRAE0RAFkRAEsRAP/EAB8AAAEFAQEBAQEBAAAAAAAAAAABA +gMEBQYHCAkKC//EALUQAAIBAwMCBAMFBQQEAAABfQECAwAEEQUSITFBBhNR +YQcicRQygZGhCCNCscEVUtHwJDNicoIJChYXGBkaJSYnKCkqNDU2Nzg5OkN +ERUZHSElKU1RVVldYWVpjZGVmZ2hpanN0dXZ3eHl6g4SFhoeIiYqSk5SVlp +eYmZqio6Slpqeoqaqys7S1tre4ubrCw8TFxsfIycrS09TV1tfY2drh4uPk5 +ebn6Onq8fLz9PX29/j5+v/aAA4EQwBNAFkASwAAPwD3+vf69/r3+v/Z +""") From 1006c12eb20b28dc1c372517f56a9fa06574e7ad Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 10:35:37 +0100 Subject: [PATCH 029/147] Bump anyhow from 1.0.89 to 1.0.90 (#17858) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dfc8dc6047f1..06d5c48be52f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.89" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" +checksum = "37bf3594c4c988a53154954629820791dde498571819ae4ca50ca811e060cc95" [[package]] name = "arc-swap" From 12d65a677845e8376160a2e836db50a19bd63a82 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 10:40:58 +0100 Subject: [PATCH 030/147] Bump cryptography from 43.0.1 to 43.0.3 (#17853) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 58 ++++++++++++++++++++++++++--------------------------- 1 file changed, 29 insertions(+), 29 deletions(-) diff --git a/poetry.lock b/poetry.lock index 1102e89d12f0..09c4661f37ea 100644 --- a/poetry.lock +++ b/poetry.lock @@ -360,38 +360,38 @@ files = [ [[package]] name = "cryptography" -version = "43.0.1" +version = "43.0.3" description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." optional = false python-versions = ">=3.7" files = [ - {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"}, - {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"}, - {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"}, - {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"}, - {file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"}, - {file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"}, - {file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"}, - {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"}, - {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"}, - {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"}, - {file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"}, - {file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"}, - {file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"}, - {file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"}, - {file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"}, + {file = "cryptography-43.0.3-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:bf7a1932ac4176486eab36a19ed4c0492da5d97123f1406cf15e41b05e787d2e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63efa177ff54aec6e1c0aefaa1a241232dcd37413835a9b674b6e3f0ae2bfd3e"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e1ce50266f4f70bf41a2c6dc4358afadae90e2a1e5342d3c08883df1675374f"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:443c4a81bb10daed9a8f334365fe52542771f25aedaf889fd323a853ce7377d6"}, + {file = "cryptography-43.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:74f57f24754fe349223792466a709f8e0c093205ff0dca557af51072ff47ab18"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9762ea51a8fc2a88b70cf2995e5675b38d93bf36bd67d91721c309df184f49bd"}, + {file = "cryptography-43.0.3-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:81ef806b1fef6b06dcebad789f988d3b37ccaee225695cf3e07648eee0fc6b73"}, + {file = "cryptography-43.0.3-cp37-abi3-win32.whl", hash = "sha256:cbeb489927bd7af4aa98d4b261af9a5bc025bd87f0e3547e11584be9e9427be2"}, + {file = "cryptography-43.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:f46304d6f0c6ab8e52770addfa2fc41e6629495548862279641972b6215451cd"}, + {file = "cryptography-43.0.3-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:8ac43ae87929a5982f5948ceda07001ee5e83227fd69cf55b109144938d96984"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:846da004a5804145a5f441b8530b4bf35afbf7da70f82409f151695b127213d5"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0f996e7268af62598f2fc1204afa98a3b5712313a55c4c9d434aef49cadc91d4"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:f7b178f11ed3664fd0e995a47ed2b5ff0a12d893e41dd0494f406d1cf555cab7"}, + {file = "cryptography-43.0.3-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:c2e6fc39c4ab499049df3bdf567f768a723a5e8464816e8f009f121a5a9f4405"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:e1be4655c7ef6e1bbe6b5d0403526601323420bcf414598955968c9ef3eb7d16"}, + {file = "cryptography-43.0.3-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:df6b6c6d742395dd77a23ea3728ab62f98379eff8fb61be2744d4679ab678f73"}, + {file = "cryptography-43.0.3-cp39-abi3-win32.whl", hash = "sha256:d56e96520b1020449bbace2b78b603442e7e378a9b3bd68de65c782db1507995"}, + {file = "cryptography-43.0.3-cp39-abi3-win_amd64.whl", hash = "sha256:0c580952eef9bf68c4747774cde7ec1d85a6e61de97281f2dba83c7d2c806362"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d03b5621a135bffecad2c73e9f4deb1a0f977b9a8ffe6f8e002bf6c9d07b918c"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a2a431ee15799d6db9fe80c82b055bae5a752bef645bba795e8e52687c69efe3"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:281c945d0e28c92ca5e5930664c1cefd85efe80e5c0d2bc58dd63383fda29f83"}, + {file = "cryptography-43.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:f18c716be16bc1fea8e95def49edf46b82fccaa88587a45f8dc0ff6ab5d8e0a7"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4a02ded6cd4f0a5562a8887df8b3bd14e822a90f97ac5e544c162899bc467664"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:53a583b6637ab4c4e3591a15bc9db855b8d9dee9a669b550f311480acab6eb08"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1ec0bcf7e17c0c5669d881b1cd38c4972fade441b27bda1051665faaa89bdcaa"}, + {file = "cryptography-43.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2ce6fae5bdad59577b44e4dfed356944fbf1d925269114c28be377692643b4ff"}, + {file = "cryptography-43.0.3.tar.gz", hash = "sha256:315b9001266a492a6ff443b61238f956b214dbec9910a081ba5b6646a055a805"}, ] [package.dependencies] @@ -404,7 +404,7 @@ nox = ["nox"] pep8test = ["check-sdist", "click", "mypy", "ruff"] sdist = ["build"] ssh = ["bcrypt (>=3.1.5)"] -test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test = ["certifi", "cryptography-vectors (==43.0.3)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] test-randomorder = ["pytest-randomly"] [[package]] From 4e5410fdae9a5850e8cd771a87cfd185aa2c9dc4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 10:42:38 +0100 Subject: [PATCH 031/147] Bump types-psycopg2 from 2.9.21.20240819 to 2.9.21.20241019 (#17855) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 09c4661f37ea..43063ac70fc4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2783,13 +2783,13 @@ files = [ [[package]] name = "types-psycopg2" -version = "2.9.21.20240819" +version = "2.9.21.20241019" description = "Typing stubs for psycopg2" optional = false python-versions = ">=3.8" files = [ - {file = "types-psycopg2-2.9.21.20240819.tar.gz", hash = "sha256:4ed6b47464d6374fa64e5e3b234cea0f710e72123a4596d67ab50b7415a84666"}, - {file = "types_psycopg2-2.9.21.20240819-py3-none-any.whl", hash = "sha256:c9192311c27d7ad561eef705f1b2df1074f2cdcf445a98a6a2fcaaaad43278cf"}, + {file = "types-psycopg2-2.9.21.20241019.tar.gz", hash = "sha256:bca89b988d2ebd19bcd08b177d22a877ea8b841decb10ed130afcf39404612fa"}, + {file = "types_psycopg2-2.9.21.20241019-py3-none-any.whl", hash = "sha256:44d091e67732d16a941baae48cd7b53bf91911bc36888652447cf1ef0c1fb3f6"}, ] [[package]] From 5d0514f29b897ef98f8d2c0d60660b348d41ec79 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 10:43:40 +0100 Subject: [PATCH 032/147] Bump serde_json from 1.0.128 to 1.0.132 (#17857) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 06d5c48be52f..4b6775cecac5 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -505,9 +505,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.128" +version = "1.0.132" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" +checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" dependencies = [ "itoa", "memchr", From 69e9b753734ebd28d5bed5f7828a8d34e86ec205 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 25 Oct 2024 10:44:12 +0100 Subject: [PATCH 033/147] Bump types-setuptools from 75.1.0.20241014 to 75.2.0.20241019 (#17856) Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 43063ac70fc4..9ca72814a44b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2834,13 +2834,13 @@ urllib3 = ">=2" [[package]] name = "types-setuptools" -version = "75.1.0.20241014" +version = "75.2.0.20241019" description = "Typing stubs for setuptools" optional = false python-versions = ">=3.8" files = [ - {file = "types-setuptools-75.1.0.20241014.tar.gz", hash = "sha256:29b0560a8d4b4a91174be085847002c69abfcb048e20b33fc663005aedf56804"}, - {file = "types_setuptools-75.1.0.20241014-py3-none-any.whl", hash = "sha256:caab58366741fb99673d0138b6e2d760717f154cfb981b74fea5e8de40f0b703"}, + {file = "types-setuptools-75.2.0.20241019.tar.gz", hash = "sha256:86ea31b5f6df2c6b8f2dc8ae3f72b213607f62549b6fa2ed5866e5299f968694"}, + {file = "types_setuptools-75.2.0.20241019-py3-none-any.whl", hash = "sha256:2e48ff3acd4919471e80d5e3f049cce5c177e108d5d36d2d4cee3fa4d4104258"}, ] [[package]] From 6c51f8649d5b17eeb218da61583f0ca132fdd728 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 29 Oct 2024 10:09:25 +0000 Subject: [PATCH 034/147] Include the destination in the error of 'Destination mismatch' (#17830) To help debug problems such as https://github.com/element-hq/synapse/issues/17822 --- changelog.d/17830.misc | 1 + synapse/federation/transport/server/_base.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17830.misc diff --git a/changelog.d/17830.misc b/changelog.d/17830.misc new file mode 100644 index 000000000000..b2cd91c0ad01 --- /dev/null +++ b/changelog.d/17830.misc @@ -0,0 +1 @@ +Include the destination in the error of 'Destination mismatch' on federation requests. diff --git a/synapse/federation/transport/server/_base.py b/synapse/federation/transport/server/_base.py index 9094201da002..cba309635b70 100644 --- a/synapse/federation/transport/server/_base.py +++ b/synapse/federation/transport/server/_base.py @@ -113,7 +113,7 @@ async def authenticate_request( ): raise AuthenticationError( HTTPStatus.UNAUTHORIZED, - "Destination mismatch in auth header", + f"Destination mismatch in auth header, received: {destination!r}", Codes.UNAUTHORIZED, ) if ( From b3b1db4057fea62244804ab83a138a8bb9a93404 Mon Sep 17 00:00:00 2001 From: Till Faelligen <2353100+S7evinK@users.noreply.github.com> Date: Tue, 29 Oct 2024 15:30:10 +0100 Subject: [PATCH 035/147] 1.118.0 --- CHANGES.md | 7 +++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 38338f1fdbee..bf3e8d692df9 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,10 @@ +# Synapse 1.118.0 (2024-10-29) + +No significant changes since 1.118.0rc1. + + + + # Synapse 1.118.0rc1 (2024-10-22) ### Python 3.8 support will be dropped in the next release diff --git a/debian/changelog b/debian/changelog index 9fb360c33022..384887888fba 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.118.0) stable; urgency=medium + + * New Synapse release 1.118.0. + + -- Synapse Packaging team Tue, 29 Oct 2024 15:29:53 +0100 + matrix-synapse-py3 (1.118.0~rc1) stable; urgency=medium * New Synapse release 1.118.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 6cdd6850b11d..5af9a4145bfc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.118.0rc1" +version = "1.118.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 4be3bd41fddd3d6bf3b772be9cf23b16899e089d Mon Sep 17 00:00:00 2001 From: Till Faelligen <2353100+S7evinK@users.noreply.github.com> Date: Tue, 29 Oct 2024 17:05:22 +0100 Subject: [PATCH 036/147] Move announcements up --- CHANGES.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index bf3e8d692df9..bcc834021041 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,11 +2,6 @@ No significant changes since 1.118.0rc1. - - - -# Synapse 1.118.0rc1 (2024-10-22) - ### Python 3.8 support will be dropped in the next release Python 3.8 is now [end-of-life](https://devguide.python.org/versions/). As per our [Deprecation Policy for Platform Dependencies](https://element-hq.github.io/synapse/latest/deprecation_policy.html#policy), Synapse will be dropping support for Python 3.8 in the next release; Synapse 1.119.0. @@ -17,6 +12,9 @@ Synapse 1.118.x will be the final release to support Python 3.8. If you are runn On the other end of the spectrum, Synapse 1.118.0 is the first release to support [Python 3.13](https://www.python.org/downloads/release/python-3130/)! [PostgreSQL 17](https://www.postgresql.org/about/news/postgresql-17-released-2936/) is also supported as of this release. + +# Synapse 1.118.0rc1 (2024-10-22) + ### Features - Added the `display_name_claim` option to the JWT configuration. This option allows specifying the claim key that contains the user's display name in the JWT payload. ([\#17708](https://github.com/element-hq/synapse/issues/17708)) From d427403c6776ca23a6946b67ce3fb2b3cdcb33be Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 29 Oct 2024 17:06:15 +0000 Subject: [PATCH 037/147] Fix check for outdated Rust library (#17861) This failed when install with poetry, so let's properly try and detect what's going on. --- changelog.d/17861.bugfix | 1 + synapse/util/rust.py | 87 ++++++++++++++++++++++++++++++---------- 2 files changed, 66 insertions(+), 22 deletions(-) create mode 100644 changelog.d/17861.bugfix diff --git a/changelog.d/17861.bugfix b/changelog.d/17861.bugfix new file mode 100644 index 000000000000..abee7a30f700 --- /dev/null +++ b/changelog.d/17861.bugfix @@ -0,0 +1 @@ +Fix detection when the built Rust library was outdated when using source installations. diff --git a/synapse/util/rust.py b/synapse/util/rust.py index 0e35d6d18812..37f43459f1c0 100644 --- a/synapse/util/rust.py +++ b/synapse/util/rust.py @@ -19,9 +19,12 @@ # # +import json import os -import sys +import urllib.parse from hashlib import blake2b +from importlib.metadata import Distribution, PackageNotFoundError +from typing import Optional import synapse from synapse.synapse_rust import get_rust_file_digest @@ -32,22 +35,17 @@ def check_rust_lib_up_to_date() -> None: be rebuilt. """ - if not _dist_is_editable(): - return - - synapse_dir = os.path.dirname(synapse.__file__) - synapse_root = os.path.abspath(os.path.join(synapse_dir, "..")) - - # Double check we've not gone into site-packages... - if os.path.basename(synapse_root) == "site-packages": - return - - # ... and it looks like the root of a python project. - if not os.path.exists("pyproject.toml"): - return + # Get the location of the editable install. + synapse_root = get_synapse_source_directory() + if synapse_root is None: + return None # Get the hash of all Rust source files - hash = _hash_rust_files_in_directory(os.path.join(synapse_root, "rust", "src")) + rust_path = os.path.join(synapse_root, "rust", "src") + if not os.path.exists(rust_path): + return None + + hash = _hash_rust_files_in_directory(rust_path) if hash != get_rust_file_digest(): raise Exception("Rust module outdated. Please rebuild using `poetry install`") @@ -82,10 +80,55 @@ def _hash_rust_files_in_directory(directory: str) -> str: return hasher.hexdigest() -def _dist_is_editable() -> bool: - """Is distribution an editable install?""" - for path_item in sys.path: - egg_link = os.path.join(path_item, "matrix-synapse.egg-link") - if os.path.isfile(egg_link): - return True - return False +def get_synapse_source_directory() -> Optional[str]: + """Try and find the source directory of synapse for editable installs (like + those used in development). + + Returns None if not an editable install (or otherwise can't find the source + directory). + """ + + # Try and find the installed matrix-synapse package. + try: + package = Distribution.from_name("matrix-synapse") + except PackageNotFoundError: + # The package is not found, so it's not installed and so must be being + # pulled out from a local directory (usually the current one). + synapse_dir = os.path.dirname(synapse.__file__) + synapse_root = os.path.abspath(os.path.join(synapse_dir, "..")) + + # Double check we've not gone into site-packages... + if os.path.basename(synapse_root) == "site-packages": + return None + + # ... and it looks like the root of a python project. + if not os.path.exists("pyproject.toml"): + return None + + return synapse_root + + # Read the `direct_url.json` metadata for the package. This won't exist for + # packages installed via a repository/etc. + # c.f. https://packaging.python.org/en/latest/specifications/direct-url/ + direct_url_json = package.read_text("direct_url.json") + if direct_url_json is None: + return None + + # c.f. https://packaging.python.org/en/latest/specifications/direct-url/ for + # the format + direct_url_dict: dict = json.loads(direct_url_json) + + # `url` must exist as a key, and point to where we fetched the repo from. + project_url = urllib.parse.urlparse(direct_url_dict["url"]) + + # If its not a local file then we must have built the rust libs either a) + # after we downloaded the package, or b) we built the download wheel. + if project_url.scheme != "file": + return None + + # And finally if its not an editable install then the files can't have + # changed since we installed the package. + if not direct_url_dict.get("dir_info", {}).get("editable", False): + return None + + return project_url.path From 58deef5eba68c24987c6b2d95d397ee023a1c6ec Mon Sep 17 00:00:00 2001 From: Shay Date: Tue, 29 Oct 2024 11:50:13 -0700 Subject: [PATCH 038/147] Add admin handler to list of handlers used for background tasks (#17847) Fixes #17823 While we're at it, makes a change where the redactions are sent as the admin if the user is not a member of the server (otherwise these fail with a "User must be our own" message). --- changelog.d/17847.bugfix | 2 + docs/admin_api/user_admin_api.md | 3 + synapse/handlers/admin.py | 6 +- synapse/server.py | 1 + tests/rest/admin/test_user.py | 98 +++++++++++++++++++++++++++++++- 5 files changed, 108 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17847.bugfix diff --git a/changelog.d/17847.bugfix b/changelog.d/17847.bugfix new file mode 100644 index 000000000000..0ba39df94dfd --- /dev/null +++ b/changelog.d/17847.bugfix @@ -0,0 +1,2 @@ +Fix a bug in the admin redact endpoint where the background task would not run if a worker was specified in +the config option `run_background_tasks_on`. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index cb38e26005a7..96a2994b7b4f 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -1365,6 +1365,9 @@ _Added in Synapse 1.72.0._ ## Redact all the events of a user +This endpoint allows an admin to redact the events of a given user. There are no restrictions on redactions for a +local user. By default, we puppet the user who sent the message to redact it themselves. Redactions for non-local users are issued using the admin user, and will fail in rooms where the admin user is not admin/does not have the specified power level to issue redactions. + The API is ``` POST /_synapse/admin/v1/user/$user_id/redact diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index 851fe57a177f..d1989e9d2c60 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -73,6 +73,8 @@ def __init__(self, hs: "HomeServer"): self._redact_all_events, REDACT_ALL_EVENTS_ACTION_NAME ) + self.hs = hs + async def get_redact_task(self, redact_id: str) -> Optional[ScheduledTask]: """Get the current status of an active redaction process @@ -423,8 +425,10 @@ async def _redact_all_events( user_id = task.params.get("user_id") assert user_id is not None + # puppet the user if they're ours, otherwise use admin to redact requester = create_requester( - user_id, authenticated_entity=admin.user.to_string() + user_id if self.hs.is_mine_id(user_id) else admin.user.to_string(), + authenticated_entity=admin.user.to_string(), ) reason = task.params.get("reason") diff --git a/synapse/server.py b/synapse/server.py index 318c6abf3d88..c7b491881395 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -249,6 +249,7 @@ class HomeServer(metaclass=abc.ABCMeta): """ REQUIRED_ON_BACKGROUND_TASK_STARTUP = [ + "admin", "account_validity", "auth", "deactivate_account", diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 6982c7291a69..f9ae50f40a85 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -23,6 +23,7 @@ import hmac import json import os +import time import urllib.parse from binascii import unhexlify from http import HTTPStatus @@ -56,6 +57,7 @@ from synapse.util import Clock from tests import unittest +from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.test_utils import SMALL_PNG from tests.unittest import override_config @@ -5127,7 +5129,6 @@ def test_redact_messages_all_rooms(self) -> None: """ Test that request to redact events in all rooms user is member of is successful """ - # join rooms, send some messages originals = [] for rm in [self.rm1, self.rm2, self.rm3]: @@ -5404,3 +5405,98 @@ def test_admin_redact_works_if_user_kicked_or_banned(self) -> None: matches.append((event_id, event)) # we redacted 6 messages self.assertEqual(len(matches), 6) + + +class UserRedactionBackgroundTaskTestCase(BaseMultiWorkerStreamTestCase): + servlets = [ + synapse.rest.admin.register_servlets, + login.register_servlets, + admin.register_servlets, + room.register_servlets, + sync.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.admin = self.register_user("thomas", "pass", True) + self.admin_tok = self.login("thomas", "pass") + + self.bad_user = self.register_user("teresa", "pass") + self.bad_user_tok = self.login("teresa", "pass") + + # create rooms - room versions 11+ store the `redacts` key in content while + # earlier ones don't so we use a mix of room versions + self.rm1 = self.helper.create_room_as( + self.admin, tok=self.admin_tok, room_version="7" + ) + self.rm2 = self.helper.create_room_as(self.admin, tok=self.admin_tok) + self.rm3 = self.helper.create_room_as( + self.admin, tok=self.admin_tok, room_version="11" + ) + + @override_config({"run_background_tasks_on": "worker1"}) + def test_redact_messages_all_rooms(self) -> None: + """ + Test that redact task successfully runs when `run_background_tasks_on` is specified + """ + self.make_worker_hs( + "synapse.app.generic_worker", + extra_config={ + "worker_name": "worker1", + "run_background_tasks_on": "worker1", + "redis": {"enabled": True}, + }, + ) + + # join rooms, send some messages + original_event_ids = set() + for rm in [self.rm1, self.rm2, self.rm3]: + join = self.helper.join(rm, self.bad_user, tok=self.bad_user_tok) + original_event_ids.add(join["event_id"]) + for i in range(15): + event = {"body": f"hello{i}", "msgtype": "m.text"} + res = self.helper.send_event( + rm, "m.room.message", event, tok=self.bad_user_tok, expect_code=200 + ) + original_event_ids.add(res["event_id"]) + + # redact all events in all rooms + channel = self.make_request( + "POST", + f"/_synapse/admin/v1/user/{self.bad_user}/redact", + content={"rooms": []}, + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + id = channel.json_body.get("redact_id") + + timeout_s = 10 + start_time = time.time() + redact_result = "" + while redact_result != "complete": + if start_time + timeout_s < time.time(): + self.fail("Timed out waiting for redactions.") + + channel2 = self.make_request( + "GET", + f"/_synapse/admin/v1/user/redact_status/{id}", + access_token=self.admin_tok, + ) + redact_result = channel2.json_body["status"] + if redact_result == "failed": + self.fail("Redaction task failed.") + + redaction_ids = set() + for rm in [self.rm1, self.rm2, self.rm3]: + filter = json.dumps({"types": [EventTypes.Redaction]}) + channel = self.make_request( + "GET", + f"rooms/{rm}/messages?filter={filter}&limit=50", + access_token=self.admin_tok, + ) + self.assertEqual(channel.code, 200) + + for event in channel.json_body["chunk"]: + if event["type"] == "m.room.redaction": + redaction_ids.add(event["redacts"]) + + self.assertIncludes(redaction_ids, original_event_ids, exact=True) From 83513b75f7b7d63d6ce2e1565568d8051ba658f8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 30 Oct 2024 10:51:04 +0000 Subject: [PATCH 039/147] Speed up sliding sync by computing extensions in parallel (#17884) The main change here is to add a helper function `gather_optional_coroutines`, which works in a similar way as `yieldable_gather_results` but takes a set of coroutines rather than a function --- changelog.d/17884.misc | 1 + synapse/handlers/sliding_sync/extensions.py | 39 +++++-- synapse/logging/context.py | 40 +++++++ synapse/util/async_helpers.py | 110 +++++++++++++++++++- tests/util/test_async_helpers.py | 108 ++++++++++++++++++- 5 files changed, 285 insertions(+), 13 deletions(-) create mode 100644 changelog.d/17884.misc diff --git a/changelog.d/17884.misc b/changelog.d/17884.misc new file mode 100644 index 000000000000..9dfa13f853c0 --- /dev/null +++ b/changelog.d/17884.misc @@ -0,0 +1 @@ +Minor speed-up of sliding sync by computing extensions results in parallel. diff --git a/synapse/handlers/sliding_sync/extensions.py b/synapse/handlers/sliding_sync/extensions.py index 0c77b5251399..077887ec3211 100644 --- a/synapse/handlers/sliding_sync/extensions.py +++ b/synapse/handlers/sliding_sync/extensions.py @@ -49,7 +49,10 @@ SlidingSyncConfig, SlidingSyncResult, ) -from synapse.util.async_helpers import concurrently_execute +from synapse.util.async_helpers import ( + concurrently_execute, + gather_optional_coroutines, +) if TYPE_CHECKING: from synapse.server import HomeServer @@ -97,26 +100,26 @@ async def get_extensions_response( if sync_config.extensions is None: return SlidingSyncResult.Extensions() - to_device_response = None + to_device_coro = None if sync_config.extensions.to_device is not None: - to_device_response = await self.get_to_device_extension_response( + to_device_coro = self.get_to_device_extension_response( sync_config=sync_config, to_device_request=sync_config.extensions.to_device, to_token=to_token, ) - e2ee_response = None + e2ee_coro = None if sync_config.extensions.e2ee is not None: - e2ee_response = await self.get_e2ee_extension_response( + e2ee_coro = self.get_e2ee_extension_response( sync_config=sync_config, e2ee_request=sync_config.extensions.e2ee, to_token=to_token, from_token=from_token, ) - account_data_response = None + account_data_coro = None if sync_config.extensions.account_data is not None: - account_data_response = await self.get_account_data_extension_response( + account_data_coro = self.get_account_data_extension_response( sync_config=sync_config, previous_connection_state=previous_connection_state, new_connection_state=new_connection_state, @@ -127,9 +130,9 @@ async def get_extensions_response( from_token=from_token, ) - receipts_response = None + receipts_coro = None if sync_config.extensions.receipts is not None: - receipts_response = await self.get_receipts_extension_response( + receipts_coro = self.get_receipts_extension_response( sync_config=sync_config, previous_connection_state=previous_connection_state, new_connection_state=new_connection_state, @@ -141,9 +144,9 @@ async def get_extensions_response( from_token=from_token, ) - typing_response = None + typing_coro = None if sync_config.extensions.typing is not None: - typing_response = await self.get_typing_extension_response( + typing_coro = self.get_typing_extension_response( sync_config=sync_config, actual_lists=actual_lists, actual_room_ids=actual_room_ids, @@ -153,6 +156,20 @@ async def get_extensions_response( from_token=from_token, ) + ( + to_device_response, + e2ee_response, + account_data_response, + receipts_response, + typing_response, + ) = await gather_optional_coroutines( + to_device_coro, + e2ee_coro, + account_data_coro, + receipts_coro, + typing_coro, + ) + return SlidingSyncResult.Extensions( to_device=to_device_response, e2ee=e2ee_response, diff --git a/synapse/logging/context.py b/synapse/logging/context.py index ae2b3d11c075..8a2dfeba13ca 100644 --- a/synapse/logging/context.py +++ b/synapse/logging/context.py @@ -37,6 +37,7 @@ from types import TracebackType from typing import ( TYPE_CHECKING, + Any, Awaitable, Callable, Optional, @@ -850,6 +851,45 @@ def run_in_background( return d +def run_coroutine_in_background( + coroutine: typing.Coroutine[Any, Any, R], +) -> "defer.Deferred[R]": + """Run the coroutine, ensuring that the current context is restored after + return from the function, and that the sentinel context is set once the + deferred returned by the function completes. + + Useful for wrapping coroutines that you don't yield or await on (for + instance because you want to pass it to deferred.gatherResults()). + + This is a special case of `run_in_background` where we can accept a + coroutine directly rather than a function. We can do this because coroutines + do not run until called, and so calling an async function without awaiting + cannot change the log contexts. + """ + + current = current_context() + d = defer.ensureDeferred(coroutine) + + # The function may have reset the context before returning, so + # we need to restore it now. + ctx = set_current_context(current) + + # The original context will be restored when the deferred + # completes, but there is nothing waiting for it, so it will + # get leaked into the reactor or some other function which + # wasn't expecting it. We therefore need to reset the context + # here. + # + # (If this feels asymmetric, consider it this way: we are + # effectively forking a new thread of execution. We are + # probably currently within a ``with LoggingContext()`` block, + # which is supposed to have a single entry and exit point. But + # by spawning off another deferred, we are effectively + # adding a new exit point.) + d.addBoth(_set_context_cb, ctx) + return d + + T = TypeVar("T") diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 8618bb0651c0..e1eb8a486320 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -51,7 +51,7 @@ ) import attr -from typing_extensions import Concatenate, Literal, ParamSpec +from typing_extensions import Concatenate, Literal, ParamSpec, Unpack from twisted.internet import defer from twisted.internet.defer import CancelledError @@ -61,6 +61,7 @@ from synapse.logging.context import ( PreserveLoggingContext, make_deferred_yieldable, + run_coroutine_in_background, run_in_background, ) from synapse.util import Clock @@ -344,6 +345,7 @@ async def yieldable_gather_results_delaying_cancellation( T2 = TypeVar("T2") T3 = TypeVar("T3") T4 = TypeVar("T4") +T5 = TypeVar("T5") @overload @@ -402,6 +404,112 @@ def gather_results( # type: ignore[misc] return deferred.addCallback(tuple) +@overload +async def gather_optional_coroutines( + *coroutines: Unpack[Tuple[Optional[Coroutine[Any, Any, T1]]]], +) -> Tuple[Optional[T1]]: ... + + +@overload +async def gather_optional_coroutines( + *coroutines: Unpack[ + Tuple[ + Optional[Coroutine[Any, Any, T1]], + Optional[Coroutine[Any, Any, T2]], + ] + ], +) -> Tuple[Optional[T1], Optional[T2]]: ... + + +@overload +async def gather_optional_coroutines( + *coroutines: Unpack[ + Tuple[ + Optional[Coroutine[Any, Any, T1]], + Optional[Coroutine[Any, Any, T2]], + Optional[Coroutine[Any, Any, T3]], + ] + ], +) -> Tuple[Optional[T1], Optional[T2], Optional[T3]]: ... + + +@overload +async def gather_optional_coroutines( + *coroutines: Unpack[ + Tuple[ + Optional[Coroutine[Any, Any, T1]], + Optional[Coroutine[Any, Any, T2]], + Optional[Coroutine[Any, Any, T3]], + Optional[Coroutine[Any, Any, T4]], + ] + ], +) -> Tuple[Optional[T1], Optional[T2], Optional[T3], Optional[T4]]: ... + + +@overload +async def gather_optional_coroutines( + *coroutines: Unpack[ + Tuple[ + Optional[Coroutine[Any, Any, T1]], + Optional[Coroutine[Any, Any, T2]], + Optional[Coroutine[Any, Any, T3]], + Optional[Coroutine[Any, Any, T4]], + Optional[Coroutine[Any, Any, T5]], + ] + ], +) -> Tuple[Optional[T1], Optional[T2], Optional[T3], Optional[T4], Optional[T5]]: ... + + +async def gather_optional_coroutines( + *coroutines: Unpack[Tuple[Optional[Coroutine[Any, Any, T1]], ...]], +) -> Tuple[Optional[T1], ...]: + """Helper function that allows waiting on multiple coroutines at once. + + The return value is a tuple of the return values of the coroutines in order. + + If a `None` is passed instead of a coroutine, it will be ignored and a None + is returned in the tuple. + + Note: For typechecking we need to have an explicit overload for each + distinct number of coroutines passed in. If you see type problems, it's + likely because you're using many arguments and you need to add a new + overload above. + """ + + try: + results = await make_deferred_yieldable( + defer.gatherResults( + [ + run_coroutine_in_background(coroutine) + for coroutine in coroutines + if coroutine is not None + ], + consumeErrors=True, + ) + ) + + results_iter = iter(results) + return tuple( + next(results_iter) if coroutine is not None else None + for coroutine in coroutines + ) + except defer.FirstError as dfe: + # unwrap the error from defer.gatherResults. + + # The raised exception's traceback only includes func() etc if + # the 'await' happens before the exception is thrown - ie if the failure + # happens *asynchronously* - otherwise Twisted throws away the traceback as it + # could be large. + # + # We could maybe reconstruct a fake traceback from Failure.frames. Or maybe + # we could throw Twisted into the fires of Mordor. + + # suppress exception chaining, because the FirstError doesn't tell us anything + # very interesting. + assert isinstance(dfe.subFailure.value, BaseException) + raise dfe.subFailure.value from None + + @attr.s(slots=True, auto_attribs=True) class _LinearizerEntry: # The number of things executing. diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py index d82822d00dcf..350a2b7c8cdd 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py @@ -18,7 +18,7 @@ # # import traceback -from typing import Generator, List, NoReturn, Optional +from typing import Any, Coroutine, Generator, List, NoReturn, Optional, Tuple, TypeVar from parameterized import parameterized_class @@ -39,6 +39,7 @@ ObservableDeferred, concurrently_execute, delay_cancellation, + gather_optional_coroutines, stop_cancellation, timeout_deferred, ) @@ -46,6 +47,8 @@ from tests.server import get_clock from tests.unittest import TestCase +T = TypeVar("T") + class ObservableDeferredTest(TestCase): def test_succeed(self) -> None: @@ -588,3 +591,106 @@ def test_multiple_sleepers_wake(self) -> None: sleeper.wake("name") self.assertTrue(d1.called) self.assertTrue(d2.called) + + +class GatherCoroutineTests(TestCase): + """Tests for `gather_optional_coroutines`""" + + def make_coroutine(self) -> Tuple[Coroutine[Any, Any, T], "defer.Deferred[T]"]: + """Returns a coroutine and a deferred that it is waiting on to resolve""" + + d: "defer.Deferred[T]" = defer.Deferred() + + async def inner() -> T: + with PreserveLoggingContext(): + return await d + + return inner(), d + + def test_single(self) -> None: + "Test passing in a single coroutine works" + + with LoggingContext("test_ctx") as text_ctx: + deferred: "defer.Deferred[None]" + coroutine, deferred = self.make_coroutine() + + gather_deferred = defer.ensureDeferred( + gather_optional_coroutines(coroutine) + ) + + # We shouldn't have a result yet, and should be in the sentinel + # context. + self.assertNoResult(gather_deferred) + self.assertEqual(current_context(), SENTINEL_CONTEXT) + + # Resolving the deferred will resolve the coroutine + deferred.callback(None) + + # All coroutines have resolved, and so we should have the results + result = self.successResultOf(gather_deferred) + self.assertEqual(result, (None,)) + + # We should be back in the normal context. + self.assertEqual(current_context(), text_ctx) + + def test_multiple_resolve(self) -> None: + "Test passing in multiple coroutine that all resolve works" + + with LoggingContext("test_ctx") as test_ctx: + deferred1: "defer.Deferred[int]" + coroutine1, deferred1 = self.make_coroutine() + deferred2: "defer.Deferred[str]" + coroutine2, deferred2 = self.make_coroutine() + + gather_deferred = defer.ensureDeferred( + gather_optional_coroutines(coroutine1, coroutine2) + ) + + # We shouldn't have a result yet, and should be in the sentinel + # context. + self.assertNoResult(gather_deferred) + self.assertEqual(current_context(), SENTINEL_CONTEXT) + + # Even if we resolve one of the coroutines, we shouldn't have a result + # yet + deferred2.callback("test") + self.assertNoResult(gather_deferred) + self.assertEqual(current_context(), SENTINEL_CONTEXT) + + deferred1.callback(1) + + # All coroutines have resolved, and so we should have the results + result = self.successResultOf(gather_deferred) + self.assertEqual(result, (1, "test")) + + # We should be back in the normal context. + self.assertEqual(current_context(), test_ctx) + + def test_multiple_fail(self) -> None: + "Test passing in multiple coroutine where one fails does the right thing" + + with LoggingContext("test_ctx") as test_ctx: + deferred1: "defer.Deferred[int]" + coroutine1, deferred1 = self.make_coroutine() + deferred2: "defer.Deferred[str]" + coroutine2, deferred2 = self.make_coroutine() + + gather_deferred = defer.ensureDeferred( + gather_optional_coroutines(coroutine1, coroutine2) + ) + + # We shouldn't have a result yet, and should be in the sentinel + # context. + self.assertNoResult(gather_deferred) + self.assertEqual(current_context(), SENTINEL_CONTEXT) + + # Throw an exception in one of the coroutines + exc = Exception("test") + deferred2.errback(exc) + + # Expect the gather deferred to immediately fail + result_exc = self.failureResultOf(gather_deferred) + self.assertEqual(result_exc.value, exc) + + # We should be back in the normal context. + self.assertEqual(current_context(), test_ctx) From 9cd3545bcab2f75e933e3a19e7266dfd97987a31 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 11:54:38 +0000 Subject: [PATCH 040/147] Bump regex from 1.11.0 to 1.11.1 (#17874) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4b6775cecac5..9d4a85b3d5fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -444,9 +444,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" dependencies = [ "aho-corasick", "memchr", From 3e750ab0d8738c1126f79c0595a2a44d0d435c74 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 11:54:48 +0000 Subject: [PATCH 041/147] Bump serde from 1.0.210 to 1.0.213 (#17875) --- Cargo.lock | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9d4a85b3d5fc..c45e7e7e9fcd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -302,9 +302,9 @@ checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" [[package]] name = "proc-macro2" -version = "1.0.82" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ad3d49ab951a01fbaafe34f2ec74122942fe18a3f9814c3268f1bb72042131b" +checksum = "f139b0662de085916d1fb67d2b4169d1addddda1919e696f3252b740b629986e" dependencies = [ "unicode-ident", ] @@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "serde" -version = "1.0.210" +version = "1.0.213" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" +checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.210" +version = "1.0.213" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" +checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5" dependencies = [ "proc-macro2", "quote", @@ -551,9 +551,9 @@ checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" -version = "2.0.61" +version = "2.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c993ed8ccba56ae856363b1845da7266a7cb78e1d146c8a32d54b45a8b831fc9" +checksum = "5023162dfcd14ef8f32034d8bcd4cc5ddc61ef7a247c024a33e24e1f24d21b56" dependencies = [ "proc-macro2", "quote", From bf03361c862f85abdc5fde3594299f7b4e228700 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 11:54:59 +0000 Subject: [PATCH 042/147] Bump anyhow from 1.0.90 to 1.0.91 (#17876) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c45e7e7e9fcd..499cf0c96033 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.90" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37bf3594c4c988a53154954629820791dde498571819ae4ca50ca811e060cc95" +checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" [[package]] name = "arc-swap" From 330f170c0e2dad8da3cd397c40bad97f0d856a17 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 11:55:17 +0000 Subject: [PATCH 043/147] Bump bytes from 1.7.2 to 1.8.0 (#17877) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 499cf0c96033..e2f22ba81243 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -67,9 +67,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytes" -version = "1.7.2" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" +checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" [[package]] name = "cfg-if" From 6d65c3944bed99f56e4d636ab30784135ecc708a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 11:56:12 +0000 Subject: [PATCH 044/147] Bump python-multipart from 0.0.12 to 0.0.16 (#17879) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 9ca72814a44b..322eccb0c793 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1974,13 +1974,13 @@ six = ">=1.5" [[package]] name = "python-multipart" -version = "0.0.12" +version = "0.0.16" description = "A streaming multipart parser for Python" optional = false python-versions = ">=3.8" files = [ - {file = "python_multipart-0.0.12-py3-none-any.whl", hash = "sha256:43dcf96cf65888a9cd3423544dd0d75ac10f7aa0c3c28a175bbcd00c9ce1aebf"}, - {file = "python_multipart-0.0.12.tar.gz", hash = "sha256:045e1f98d719c1ce085ed7f7e1ef9d8ccc8c02ba02b5566d5f7521410ced58cb"}, + {file = "python_multipart-0.0.16-py3-none-any.whl", hash = "sha256:c2759b7b976ef3937214dfb592446b59dfaa5f04682a076f78b117c94776d87a"}, + {file = "python_multipart-0.0.16.tar.gz", hash = "sha256:8dee37b88dab9b59922ca173c35acb627cc12ec74019f5cd4578369c6df36554"}, ] [[package]] From 418fbba8dec740d23ec929391d1c0f7e7721d46c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 11:56:20 +0000 Subject: [PATCH 045/147] Bump phonenumbers from 8.13.47 to 8.13.48 (#17880) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 322eccb0c793..678146762521 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1451,13 +1451,13 @@ dev = ["jinja2"] [[package]] name = "phonenumbers" -version = "8.13.47" +version = "8.13.48" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.47-py2.py3-none-any.whl", hash = "sha256:5d3c0142ef7055ca5551884352e3b6b93bfe002a0bc95b8eaba39b0e2184541b"}, - {file = "phonenumbers-8.13.47.tar.gz", hash = "sha256:53c5e7c6d431cafe4efdd44956078404ae9bc8b0eacc47be3105d3ccc88aaffa"}, + {file = "phonenumbers-8.13.48-py2.py3-none-any.whl", hash = "sha256:5c51939acefa390eb74119750afb10a85d3c628dc83fd62c52d6f532fcf5d205"}, + {file = "phonenumbers-8.13.48.tar.gz", hash = "sha256:62d8df9b0f3c3c41571c6b396f044ddd999d61631534001b8be7fdf7ba1b18f3"}, ] [[package]] From 5c781b578d5c2bfdf26138ce4345e1cc68525ec6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 30 Oct 2024 11:57:36 +0000 Subject: [PATCH 046/147] Bump ruff from 0.6.9 to 0.7.1 (#17868) --- poetry.lock | 40 ++++++++++++++++++++-------------------- pyproject.toml | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/poetry.lock b/poetry.lock index 678146762521..f311e787f98f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2277,29 +2277,29 @@ files = [ [[package]] name = "ruff" -version = "0.6.9" +version = "0.7.1" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.9-py3-none-linux_armv6l.whl", hash = "sha256:064df58d84ccc0ac0fcd63bc3090b251d90e2a372558c0f057c3f75ed73e1ccd"}, - {file = "ruff-0.6.9-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:140d4b5c9f5fc7a7b074908a78ab8d384dd7f6510402267bc76c37195c02a7ec"}, - {file = "ruff-0.6.9-py3-none-macosx_11_0_arm64.whl", hash = "sha256:53fd8ca5e82bdee8da7f506d7b03a261f24cd43d090ea9db9a1dc59d9313914c"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:645d7d8761f915e48a00d4ecc3686969761df69fb561dd914a773c1a8266e14e"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:eae02b700763e3847595b9d2891488989cac00214da7f845f4bcf2989007d577"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d5ccc9e58112441de8ad4b29dcb7a86dc25c5f770e3c06a9d57e0e5eba48829"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:417b81aa1c9b60b2f8edc463c58363075412866ae4e2b9ab0f690dc1e87ac1b5"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3c866b631f5fbce896a74a6e4383407ba7507b815ccc52bcedabb6810fdb3ef7"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b118afbb3202f5911486ad52da86d1d52305b59e7ef2031cea3425142b97d6f"}, - {file = "ruff-0.6.9-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a67267654edc23c97335586774790cde402fb6bbdb3c2314f1fc087dee320bfa"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:3ef0cc774b00fec123f635ce5c547dac263f6ee9fb9cc83437c5904183b55ceb"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:12edd2af0c60fa61ff31cefb90aef4288ac4d372b4962c2864aeea3a1a2460c0"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_i686.whl", hash = "sha256:55bb01caeaf3a60b2b2bba07308a02fca6ab56233302406ed5245180a05c5625"}, - {file = "ruff-0.6.9-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:925d26471fa24b0ce5a6cdfab1bb526fb4159952385f386bdcc643813d472039"}, - {file = "ruff-0.6.9-py3-none-win32.whl", hash = "sha256:eb61ec9bdb2506cffd492e05ac40e5bc6284873aceb605503d8494180d6fc84d"}, - {file = "ruff-0.6.9-py3-none-win_amd64.whl", hash = "sha256:785d31851c1ae91f45b3d8fe23b8ae4b5170089021fbb42402d811135f0b7117"}, - {file = "ruff-0.6.9-py3-none-win_arm64.whl", hash = "sha256:a9641e31476d601f83cd602608739a0840e348bda93fec9f1ee816f8b6798b93"}, - {file = "ruff-0.6.9.tar.gz", hash = "sha256:b076ef717a8e5bc819514ee1d602bbdca5b4420ae13a9cf61a0c0a4f53a2baa2"}, + {file = "ruff-0.7.1-py3-none-linux_armv6l.whl", hash = "sha256:cb1bc5ed9403daa7da05475d615739cc0212e861b7306f314379d958592aaa89"}, + {file = "ruff-0.7.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:27c1c52a8d199a257ff1e5582d078eab7145129aa02721815ca8fa4f9612dc35"}, + {file = "ruff-0.7.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:588a34e1ef2ea55b4ddfec26bbe76bc866e92523d8c6cdec5e8aceefeff02d99"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94fc32f9cdf72dc75c451e5f072758b118ab8100727168a3df58502b43a599ca"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:985818742b833bffa543a84d1cc11b5e6871de1b4e0ac3060a59a2bae3969250"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32f1e8a192e261366c702c5fb2ece9f68d26625f198a25c408861c16dc2dea9c"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:699085bf05819588551b11751eff33e9ca58b1b86a6843e1b082a7de40da1565"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:344cc2b0814047dc8c3a8ff2cd1f3d808bb23c6658db830d25147339d9bf9ea7"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4316bbf69d5a859cc937890c7ac7a6551252b6a01b1d2c97e8fc96e45a7c8b4a"}, + {file = "ruff-0.7.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79d3af9dca4c56043e738a4d6dd1e9444b6d6c10598ac52d146e331eb155a8ad"}, + {file = "ruff-0.7.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c5c121b46abde94a505175524e51891f829414e093cd8326d6e741ecfc0a9112"}, + {file = "ruff-0.7.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8422104078324ea250886954e48f1373a8fe7de59283d747c3a7eca050b4e378"}, + {file = "ruff-0.7.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:56aad830af8a9db644e80098fe4984a948e2b6fc2e73891538f43bbe478461b8"}, + {file = "ruff-0.7.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:658304f02f68d3a83c998ad8bf91f9b4f53e93e5412b8f2388359d55869727fd"}, + {file = "ruff-0.7.1-py3-none-win32.whl", hash = "sha256:b517a2011333eb7ce2d402652ecaa0ac1a30c114fbbd55c6b8ee466a7f600ee9"}, + {file = "ruff-0.7.1-py3-none-win_amd64.whl", hash = "sha256:f38c41fcde1728736b4eb2b18850f6d1e3eedd9678c914dede554a70d5241307"}, + {file = "ruff-0.7.1-py3-none-win_arm64.whl", hash = "sha256:19aa200ec824c0f36d0c9114c8ec0087082021732979a359d6f3c390a6ff2a37"}, + {file = "ruff-0.7.1.tar.gz", hash = "sha256:9d8a41d4aa2dad1575adb98a82870cf5db5f76b2938cf2206c22c940034a36f4"}, ] [[package]] @@ -3122,4 +3122,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "c8a22f901970b2f851151e731532757fd3acf7ba02930952636d2e6c5c9c0c90" +content-hash = "aa1f6d97809596c23a6d160c0c5804971dad0ba49e34b137bbfb79df038fe6f0" diff --git a/pyproject.toml b/pyproject.toml index 5af9a4145bfc..93e9f38ca90b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -320,7 +320,7 @@ all = [ # failing on new releases. Keeping lower bounds loose here means that dependabot # can bump versions without having to update the content-hash in the lockfile. # This helps prevents merge conflicts when running a batch of dependabot updates. -ruff = "0.6.9" +ruff = "0.7.1" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" From 3ae80b0de4abeff2bdbbb9d8735057e6b301a670 Mon Sep 17 00:00:00 2001 From: Lama Date: Wed, 30 Oct 2024 19:55:23 +0300 Subject: [PATCH 047/147] Check if user is in room before being able to tag it (#17839) Fix #17819 --- changelog.d/17839.bugfix | 1 + synapse/handlers/room_member.py | 20 +++++++ synapse/rest/client/tags.py | 7 +++ tests/rest/client/test_tags.py | 95 +++++++++++++++++++++++++++++++++ 4 files changed, 123 insertions(+) create mode 100644 changelog.d/17839.bugfix create mode 100644 tests/rest/client/test_tags.py diff --git a/changelog.d/17839.bugfix b/changelog.d/17839.bugfix new file mode 100644 index 000000000000..57667a6df5d7 --- /dev/null +++ b/changelog.d/17839.bugfix @@ -0,0 +1 @@ +Check if user has membership in a room before tagging it. Contributed by Lama Alosaimi. \ No newline at end of file diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 75c60e3c34de..70cbbc352be1 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -1190,6 +1190,26 @@ async def update_membership_locked( origin_server_ts=origin_server_ts, ) + async def check_for_any_membership_in_room( + self, *, user_id: str, room_id: str + ) -> None: + """ + Check if the user has any membership in the room and raise error if not. + + Args: + user_id: The user to check. + room_id: The room to check. + + Raises: + AuthError if the user doesn't have any membership in the room. + """ + result = await self.store.get_local_current_membership_for_user_in_room( + user_id=user_id, room_id=room_id + ) + + if result is None or result == (None, None): + raise AuthError(403, f"User {user_id} has no membership in room {room_id}") + async def _should_perform_remote_join( self, user_id: str, diff --git a/synapse/rest/client/tags.py b/synapse/rest/client/tags.py index 554bcb95dd88..b6648f3499f0 100644 --- a/synapse/rest/client/tags.py +++ b/synapse/rest/client/tags.py @@ -78,6 +78,7 @@ def __init__(self, hs: "HomeServer"): super().__init__() self.auth = hs.get_auth() self.handler = hs.get_account_data_handler() + self.room_member_handler = hs.get_room_member_handler() async def on_PUT( self, request: SynapseRequest, user_id: str, room_id: str, tag: str @@ -85,6 +86,12 @@ async def on_PUT( requester = await self.auth.get_user_by_req(request) if user_id != requester.user.to_string(): raise AuthError(403, "Cannot add tags for other users.") + # Check if the user has any membership in the room and raise error if not. + # Although it's not harmful for users to tag random rooms, it's just superfluous + # data we don't need to track or allow. + await self.room_member_handler.check_for_any_membership_in_room( + user_id=user_id, room_id=room_id + ) body = parse_json_object_from_request(request) diff --git a/tests/rest/client/test_tags.py b/tests/rest/client/test_tags.py new file mode 100644 index 000000000000..5d596409e18c --- /dev/null +++ b/tests/rest/client/test_tags.py @@ -0,0 +1,95 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + +"""Tests REST events for /tags paths.""" + +from http import HTTPStatus + +import synapse.rest.admin +from synapse.rest.client import login, room, tags + +from tests import unittest + + +class RoomTaggingTestCase(unittest.HomeserverTestCase): + """Tests /user/$user_id/rooms/$room_id/tags/$tag REST API.""" + + servlets = [ + room.register_servlets, + tags.register_servlets, + login.register_servlets, + synapse.rest.admin.register_servlets_for_client_rest_resource, + ] + + def test_put_tag_checks_room_membership(self) -> None: + """ + Test that a user can add a tag to a room if they have membership to the room. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + room_id = self.helper.create_room_as(user1_id, tok=user1_tok) + tag = "test_tag" + + # Make the request + channel = self.make_request( + "PUT", + f"/user/{user1_id}/rooms/{room_id}/tags/{tag}", + content={"order": 0.5}, + access_token=user1_tok, + ) + # Check that the request was successful + self.assertEqual(channel.code, HTTPStatus.OK, channel.result) + + def test_put_tag_fails_if_not_in_room(self) -> None: + """ + Test that a user cannot add a tag to a room if they don't have membership to the + room. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + # Create the room with user2 (user1 has no membership in the room) + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + tag = "test_tag" + + # Make the request + channel = self.make_request( + "PUT", + f"/user/{user1_id}/rooms/{room_id}/tags/{tag}", + content={"order": 0.5}, + access_token=user1_tok, + ) + # Check that the request failed with the correct error + self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) + + def test_put_tag_fails_if_room_does_not_exist(self) -> None: + """ + Test that a user cannot add a tag to a room if the room doesn't exist (therefore + no membership in the room.) + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + room_id = "!nonexistent:test" + tag = "test_tag" + + # Make the request + channel = self.make_request( + "PUT", + f"/user/{user1_id}/rooms/{room_id}/tags/{tag}", + content={"order": 0.5}, + access_token=user1_tok, + ) + # Check that the request failed with the correct error + self.assertEqual(channel.code, HTTPStatus.FORBIDDEN, channel.result) From 7987d5e638b3c60494f4695f67bc0000a804f68d Mon Sep 17 00:00:00 2001 From: Jason Little Date: Wed, 30 Oct 2024 19:34:11 -0500 Subject: [PATCH 048/147] Remove `Generator` in `_quarantine_media_txn()` (#17813) --- changelog.d/17813.bugfix | 1 + synapse/storage/databases/main/room.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17813.bugfix diff --git a/changelog.d/17813.bugfix b/changelog.d/17813.bugfix new file mode 100644 index 000000000000..5dd276709b0b --- /dev/null +++ b/changelog.d/17813.bugfix @@ -0,0 +1 @@ +Avoid lost data on some database query retries. diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index e0b7b7e1949f..33569a4391e6 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -1175,7 +1175,7 @@ def _quarantine_media_txn( SET quarantined_by = ? WHERE media_origin = ? AND media_id = ? """, - ((quarantined_by, origin, media_id) for origin, media_id in remote_mxcs), + [(quarantined_by, origin, media_id) for origin, media_id in remote_mxcs], ) total_media_quarantined += txn.rowcount if txn.rowcount > 0 else 0 From af59a99933a54abf34296f12531d56d9342e2592 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Wed, 30 Oct 2024 20:14:36 -0500 Subject: [PATCH 049/147] Remove `Generator` from 4 places in `PersistEventStore` (#17818) Context: https://github.com/matrix-org/synapse/issues/15439 (https://github.com/element-hq/synapse/issues/15439) Also see discussion in https://github.com/element-hq/synapse/pull/17813 --- changelog.d/17818.bugfix | 1 + synapse/storage/databases/main/events.py | 20 ++++++++++---------- 2 files changed, 11 insertions(+), 10 deletions(-) create mode 100644 changelog.d/17818.bugfix diff --git a/changelog.d/17818.bugfix b/changelog.d/17818.bugfix new file mode 100644 index 000000000000..5dd276709b0b --- /dev/null +++ b/changelog.d/17818.bugfix @@ -0,0 +1 @@ +Avoid lost data on some database query retries. diff --git a/synapse/storage/databases/main/events.py b/synapse/storage/databases/main/events.py index e5eae9cee997..dd6ac909e9fb 100644 --- a/synapse/storage/databases/main/events.py +++ b/synapse/storage/databases/main/events.py @@ -1686,7 +1686,7 @@ def _update_current_state_txn( """ txn.execute_batch( sql, - ( + [ ( stream_id, self._instance_name, @@ -1699,17 +1699,17 @@ def _update_current_state_txn( state_key, ) for etype, state_key in itertools.chain(to_delete, to_insert) - ), + ], ) # Now we actually update the current_state_events table txn.execute_batch( "DELETE FROM current_state_events" " WHERE room_id = ? AND type = ? AND state_key = ?", - ( + [ (room_id, etype, state_key) for etype, state_key in itertools.chain(to_delete, to_insert) - ), + ], ) # We include the membership in the current state table, hence we do @@ -1799,11 +1799,11 @@ def _update_current_state_txn( txn.execute_batch( "DELETE FROM local_current_membership" " WHERE room_id = ? AND user_id = ?", - ( + [ (room_id, state_key) for etype, state_key in itertools.chain(to_delete, to_insert) if etype == EventTypes.Member and self.is_mine_id(state_key) - ), + ], ) if to_insert: @@ -3208,7 +3208,7 @@ def _set_push_actions_for_event_and_users_txn( if notifiable_events: txn.execute_batch( sql, - ( + [ ( event.room_id, event.internal_metadata.stream_ordering, @@ -3216,18 +3216,18 @@ def _set_push_actions_for_event_and_users_txn( event.event_id, ) for event in notifiable_events - ), + ], ) # Now we delete the staging area for *all* events that were being # persisted. txn.execute_batch( "DELETE FROM event_push_actions_staging WHERE event_id = ?", - ( + [ (event.event_id,) for event, _ in all_events_and_contexts if event.internal_metadata.is_notifiable() - ), + ], ) def _remove_push_actions_for_event_id_txn( From 2e5fe3f187490c8d593313ce70336f61bb010f25 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Wed, 30 Oct 2024 20:15:57 -0500 Subject: [PATCH 050/147] Remove `Generator` in `store_search_entries_txn` (#17817) Context: https://github.com/matrix-org/synapse/issues/15439 (https://github.com/element-hq/synapse/issues/15439) Also see discussion in https://github.com/element-hq/synapse/pull/17813 --- changelog.d/17817.bugfix | 1 + synapse/storage/databases/main/search.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17817.bugfix diff --git a/changelog.d/17817.bugfix b/changelog.d/17817.bugfix new file mode 100644 index 000000000000..5dd276709b0b --- /dev/null +++ b/changelog.d/17817.bugfix @@ -0,0 +1 @@ +Avoid lost data on some database query retries. diff --git a/synapse/storage/databases/main/search.py b/synapse/storage/databases/main/search.py index b436275f3fda..1d5c5e72ffc3 100644 --- a/synapse/storage/databases/main/search.py +++ b/synapse/storage/databases/main/search.py @@ -94,7 +94,7 @@ def store_search_entries_txn( VALUES (?,?,?,to_tsvector('english', ?),?,?) """ - args1 = ( + args1 = [ ( entry.event_id, entry.room_id, @@ -104,7 +104,7 @@ def store_search_entries_txn( entry.origin_server_ts, ) for entry in entries - ) + ] txn.execute_batch(sql, args1) From 0c429fae1ded5b7348abf1d7695fec908018c3c1 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Wed, 30 Oct 2024 20:16:24 -0500 Subject: [PATCH 051/147] Remove `Generator` in `update_cached_last_access_time` (#17816) Context: https://github.com/matrix-org/synapse/issues/15439 (https://github.com/element-hq/synapse/issues/15439) Also see discussion in https://github.com/element-hq/synapse/pull/17813 --- changelog.d/17816.bugfix | 1 + synapse/storage/databases/main/media_repository.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/17816.bugfix diff --git a/changelog.d/17816.bugfix b/changelog.d/17816.bugfix new file mode 100644 index 000000000000..5dd276709b0b --- /dev/null +++ b/changelog.d/17816.bugfix @@ -0,0 +1 @@ +Avoid lost data on some database query retries. diff --git a/synapse/storage/databases/main/media_repository.py b/synapse/storage/databases/main/media_repository.py index 7617fd3ad498..7a96e2543218 100644 --- a/synapse/storage/databases/main/media_repository.py +++ b/synapse/storage/databases/main/media_repository.py @@ -729,10 +729,10 @@ def update_cache_txn(txn: LoggingTransaction) -> None: txn.execute_batch( sql, - ( + [ (time_ms, media_origin, media_id) for media_origin, media_id in remote_media - ), + ], ) sql = ( @@ -740,7 +740,7 @@ def update_cache_txn(txn: LoggingTransaction) -> None: " WHERE media_id = ?" ) - txn.execute_batch(sql, ((time_ms, media_id) for media_id in local_media)) + txn.execute_batch(sql, [(time_ms, media_id) for media_id in local_media]) await self.db_pool.runInteraction( "update_cached_last_access_time", update_cache_txn From 034d472688a83c5a41bda8a27c485111ecf95138 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Wed, 30 Oct 2024 20:16:49 -0500 Subject: [PATCH 052/147] Remove `Generator` in `_purge_unreferenced_state_groups` twice (#17815) Context: https://github.com/matrix-org/synapse/issues/15439 (https://github.com/element-hq/synapse/issues/15439) Also see discussion in https://github.com/element-hq/synapse/pull/17813 --- changelog.d/17815.bugfix | 1 + synapse/storage/databases/state/store.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17815.bugfix diff --git a/changelog.d/17815.bugfix b/changelog.d/17815.bugfix new file mode 100644 index 000000000000..5dd276709b0b --- /dev/null +++ b/changelog.d/17815.bugfix @@ -0,0 +1 @@ +Avoid lost data on some database query retries. diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index aea71b8fcc5a..875dba33496c 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -804,11 +804,11 @@ def _purge_unreferenced_state_groups( logger.info("[purge] removing redundant state groups") txn.execute_batch( "DELETE FROM state_groups_state WHERE state_group = ?", - ((sg,) for sg in state_groups_to_delete), + [(sg,) for sg in state_groups_to_delete], ) txn.execute_batch( "DELETE FROM state_groups WHERE id = ?", - ((sg,) for sg in state_groups_to_delete), + [(sg,) for sg in state_groups_to_delete], ) @trace From 47fe6df0134441211588c0bf8fc8f1f18c489c38 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Wed, 30 Oct 2024 21:21:22 -0500 Subject: [PATCH 053/147] Remove `Generator` in `_prune_old_outbound_device_pokes` (#17814) Context: https://github.com/matrix-org/synapse/issues/15439 (https://github.com/element-hq/synapse/issues/15439) Also see discussion in https://github.com/element-hq/synapse/pull/17813 --- changelog.d/17814.bugfix | 1 + synapse/storage/databases/main/devices.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17814.bugfix diff --git a/changelog.d/17814.bugfix b/changelog.d/17814.bugfix new file mode 100644 index 000000000000..5dd276709b0b --- /dev/null +++ b/changelog.d/17814.bugfix @@ -0,0 +1 @@ +Avoid lost data on some database query retries. diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index a83df4075a39..808894325313 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -1422,7 +1422,7 @@ def _prune_txn(txn: LoggingTransaction) -> None: DELETE FROM device_lists_outbound_last_success WHERE destination = ? AND user_id = ? """ - txn.execute_batch(sql, ((row[0], row[1]) for row in rows)) + txn.execute_batch(sql, [(row[0], row[1]) for row in rows]) logger.info("Pruned %d device list outbound pokes", count) From c705beebf7e95f25624ec38280192d96bc207626 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Thu, 31 Oct 2024 03:55:30 -0600 Subject: [PATCH 054/147] Support & use stable endpoints for MSC4151 (#17374) https://github.com/matrix-org/matrix-spec-proposals/pull/4151 has finished FCP. See https://github.com/element-hq/synapse/issues/17373 for unstable endpoint removal --------- Co-authored-by: Andrew Morgan --- changelog.d/17374.feature | 1 + synapse/rest/client/reporting.py | 35 ++++++++++++++++++----------- tests/rest/client/test_reporting.py | 31 ++----------------------- 3 files changed, 25 insertions(+), 42 deletions(-) create mode 100644 changelog.d/17374.feature diff --git a/changelog.d/17374.feature b/changelog.d/17374.feature new file mode 100644 index 000000000000..3321f1894726 --- /dev/null +++ b/changelog.d/17374.feature @@ -0,0 +1 @@ +Support [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151)'s stable report room API. \ No newline at end of file diff --git a/synapse/rest/client/reporting.py b/synapse/rest/client/reporting.py index 97bd5d8c02c2..949f07703518 100644 --- a/synapse/rest/client/reporting.py +++ b/synapse/rest/client/reporting.py @@ -20,11 +20,13 @@ # import logging +import re from http import HTTPStatus from typing import TYPE_CHECKING, Tuple from synapse._pydantic_compat import StrictStr from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError +from synapse.api.urls import CLIENT_API_PREFIX from synapse.http.server import HttpServer from synapse.http.servlet import ( RestServlet, @@ -105,18 +107,17 @@ async def on_POST( class ReportRoomRestServlet(RestServlet): """This endpoint lets clients report a room for abuse. - Whilst MSC4151 is not yet merged, this unstable endpoint is enabled on matrix.org - for content moderation purposes, and therefore backwards compatibility should be - carefully considered when changing anything on this endpoint. - - More details on the MSC: https://github.com/matrix-org/matrix-spec-proposals/pull/4151 + Introduced by MSC4151: https://github.com/matrix-org/matrix-spec-proposals/pull/4151 """ - PATTERNS = client_patterns( - "/org.matrix.msc4151/rooms/(?P[^/]*)/report$", - releases=[], - v1=False, - unstable=True, + # Cast the Iterable to a list so that we can `append` below. + PATTERNS = list( + client_patterns( + "/rooms/(?P[^/]*)/report$", + releases=("v3",), + unstable=False, + v1=False, + ) ) def __init__(self, hs: "HomeServer"): @@ -126,6 +127,16 @@ def __init__(self, hs: "HomeServer"): self.clock = hs.get_clock() self.store = hs.get_datastores().main + # TODO: Remove the unstable variant after 2-3 releases + # https://github.com/element-hq/synapse/issues/17373 + if hs.config.experimental.msc4151_enabled: + self.PATTERNS.append( + re.compile( + f"^{CLIENT_API_PREFIX}/unstable/org.matrix.msc4151" + "/rooms/(?P[^/]*)/report$" + ) + ) + class PostBody(RequestBodyModel): reason: StrictStr @@ -153,6 +164,4 @@ async def on_POST( def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: ReportEventRestServlet(hs).register(http_server) - - if hs.config.experimental.msc4151_enabled: - ReportRoomRestServlet(hs).register(http_server) + ReportRoomRestServlet(hs).register(http_server) diff --git a/tests/rest/client/test_reporting.py b/tests/rest/client/test_reporting.py index 009deb9cb056..723553979f7d 100644 --- a/tests/rest/client/test_reporting.py +++ b/tests/rest/client/test_reporting.py @@ -156,58 +156,31 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.room_id = self.helper.create_room_as( self.other_user, tok=self.other_user_tok, is_public=True ) - self.report_path = ( - f"/_matrix/client/unstable/org.matrix.msc4151/rooms/{self.room_id}/report" - ) + self.report_path = f"/_matrix/client/v3/rooms/{self.room_id}/report" - @unittest.override_config( - { - "experimental_features": {"msc4151_enabled": True}, - } - ) def test_reason_str(self) -> None: data = {"reason": "this makes me sad"} self._assert_status(200, data) - @unittest.override_config( - { - "experimental_features": {"msc4151_enabled": True}, - } - ) def test_no_reason(self) -> None: data = {"not_reason": "for typechecking"} self._assert_status(400, data) - @unittest.override_config( - { - "experimental_features": {"msc4151_enabled": True}, - } - ) def test_reason_nonstring(self) -> None: data = {"reason": 42} self._assert_status(400, data) - @unittest.override_config( - { - "experimental_features": {"msc4151_enabled": True}, - } - ) def test_reason_null(self) -> None: data = {"reason": None} self._assert_status(400, data) - @unittest.override_config( - { - "experimental_features": {"msc4151_enabled": True}, - } - ) def test_cannot_report_nonexistent_room(self) -> None: """ Tests that we don't accept event reports for rooms which do not exist. """ channel = self.make_request( "POST", - "/_matrix/client/unstable/org.matrix.msc4151/rooms/!bloop:example.org/report", + "/_matrix/client/v3/rooms/!bloop:example.org/report", {"reason": "i am very sad"}, access_token=self.other_user_tok, shorthand=False, From da7d71e2a285438f18d1dea236956318d0a0d4a4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 16:13:16 +0000 Subject: [PATCH 055/147] Bump mypy-zope from 1.0.7 to 1.0.8 (#17898) --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index f311e787f98f..85084eb14176 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1380,17 +1380,17 @@ files = [ [[package]] name = "mypy-zope" -version = "1.0.7" +version = "1.0.8" description = "Plugin for mypy to support zope interfaces" optional = false python-versions = "*" files = [ - {file = "mypy_zope-1.0.7-py3-none-any.whl", hash = "sha256:f19de249574319d81083b15f8a022c6b15583582f23340a860922141f1b651ca"}, - {file = "mypy_zope-1.0.7.tar.gz", hash = "sha256:32a79ce78647c0bea61e7e0c0eb1233fcb97bb94e8950cca73f17d3419c602f7"}, + {file = "mypy_zope-1.0.8-py3-none-any.whl", hash = "sha256:8794a77dae0c7e2f28b8ac48569091310b3ee45bb9d6cd4797dcb837c40f9976"}, + {file = "mypy_zope-1.0.8.tar.gz", hash = "sha256:854303a95aefc4289e8a0796808e002c2c7ecde0a10a8f7b8f48092f94ef9b9f"}, ] [package.dependencies] -mypy = ">=1.0.0,<1.12.0" +mypy = ">=1.0.0,<1.13.0" "zope.interface" = "*" "zope.schema" = "*" From b5493899c5a9df9c83e5b4fafa42aa8c284f1f69 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 16:14:01 +0000 Subject: [PATCH 056/147] Bump serde from 1.0.213 to 1.0.214 (#17900) --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e2f22ba81243..113f28bb9883 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "serde" -version = "1.0.213" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ea7893ff5e2466df8d720bb615088341b295f849602c6956047f8f80f0e9bc1" +checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.213" +version = "1.0.214" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e85ad2009c50b58e87caa8cd6dac16bdf511bbfb7af6c33df902396aa480fa5" +checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" dependencies = [ "proc-macro2", "quote", From 541a00956434c62c3cf777db4c14d93e2e58bc8a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 16:14:10 +0000 Subject: [PATCH 057/147] Bump anyhow from 1.0.91 to 1.0.92 (#17901) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 113f28bb9883..2e23b95f85a1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.91" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c042108f3ed77fd83760a5fd79b53be043192bb3b9dba91d8c574c0ada7850c8" +checksum = "74f37166d7d48a0284b99dd824694c26119c700b53bf0d1540cdb147dbdaaf13" [[package]] name = "arc-swap" From 5580a820aec4a20de36da11fad42fc3a8ed3740c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 16:14:46 +0000 Subject: [PATCH 058/147] Bump ruff from 0.7.1 to 0.7.2 (#17897) --- poetry.lock | 40 ++++++++++++++++++++-------------------- pyproject.toml | 2 +- 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/poetry.lock b/poetry.lock index 85084eb14176..787041b5b39f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2277,29 +2277,29 @@ files = [ [[package]] name = "ruff" -version = "0.7.1" +version = "0.7.2" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.7.1-py3-none-linux_armv6l.whl", hash = "sha256:cb1bc5ed9403daa7da05475d615739cc0212e861b7306f314379d958592aaa89"}, - {file = "ruff-0.7.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:27c1c52a8d199a257ff1e5582d078eab7145129aa02721815ca8fa4f9612dc35"}, - {file = "ruff-0.7.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:588a34e1ef2ea55b4ddfec26bbe76bc866e92523d8c6cdec5e8aceefeff02d99"}, - {file = "ruff-0.7.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94fc32f9cdf72dc75c451e5f072758b118ab8100727168a3df58502b43a599ca"}, - {file = "ruff-0.7.1-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:985818742b833bffa543a84d1cc11b5e6871de1b4e0ac3060a59a2bae3969250"}, - {file = "ruff-0.7.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:32f1e8a192e261366c702c5fb2ece9f68d26625f198a25c408861c16dc2dea9c"}, - {file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:699085bf05819588551b11751eff33e9ca58b1b86a6843e1b082a7de40da1565"}, - {file = "ruff-0.7.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:344cc2b0814047dc8c3a8ff2cd1f3d808bb23c6658db830d25147339d9bf9ea7"}, - {file = "ruff-0.7.1-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4316bbf69d5a859cc937890c7ac7a6551252b6a01b1d2c97e8fc96e45a7c8b4a"}, - {file = "ruff-0.7.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79d3af9dca4c56043e738a4d6dd1e9444b6d6c10598ac52d146e331eb155a8ad"}, - {file = "ruff-0.7.1-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:c5c121b46abde94a505175524e51891f829414e093cd8326d6e741ecfc0a9112"}, - {file = "ruff-0.7.1-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:8422104078324ea250886954e48f1373a8fe7de59283d747c3a7eca050b4e378"}, - {file = "ruff-0.7.1-py3-none-musllinux_1_2_i686.whl", hash = "sha256:56aad830af8a9db644e80098fe4984a948e2b6fc2e73891538f43bbe478461b8"}, - {file = "ruff-0.7.1-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:658304f02f68d3a83c998ad8bf91f9b4f53e93e5412b8f2388359d55869727fd"}, - {file = "ruff-0.7.1-py3-none-win32.whl", hash = "sha256:b517a2011333eb7ce2d402652ecaa0ac1a30c114fbbd55c6b8ee466a7f600ee9"}, - {file = "ruff-0.7.1-py3-none-win_amd64.whl", hash = "sha256:f38c41fcde1728736b4eb2b18850f6d1e3eedd9678c914dede554a70d5241307"}, - {file = "ruff-0.7.1-py3-none-win_arm64.whl", hash = "sha256:19aa200ec824c0f36d0c9114c8ec0087082021732979a359d6f3c390a6ff2a37"}, - {file = "ruff-0.7.1.tar.gz", hash = "sha256:9d8a41d4aa2dad1575adb98a82870cf5db5f76b2938cf2206c22c940034a36f4"}, + {file = "ruff-0.7.2-py3-none-linux_armv6l.whl", hash = "sha256:b73f873b5f52092e63ed540adefc3c36f1f803790ecf2590e1df8bf0a9f72cb8"}, + {file = "ruff-0.7.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:5b813ef26db1015953daf476202585512afd6a6862a02cde63f3bafb53d0b2d4"}, + {file = "ruff-0.7.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:853277dbd9675810c6826dad7a428d52a11760744508340e66bf46f8be9701d9"}, + {file = "ruff-0.7.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21aae53ab1490a52bf4e3bf520c10ce120987b047c494cacf4edad0ba0888da2"}, + {file = "ruff-0.7.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ccc7e0fc6e0cb3168443eeadb6445285abaae75142ee22b2b72c27d790ab60ba"}, + {file = "ruff-0.7.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd77877a4e43b3a98e5ef4715ba3862105e299af0c48942cc6d51ba3d97dc859"}, + {file = "ruff-0.7.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e00163fb897d35523c70d71a46fbaa43bf7bf9af0f4534c53ea5b96b2e03397b"}, + {file = "ruff-0.7.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3c54b538633482dc342e9b634d91168fe8cc56b30a4b4f99287f4e339103e88"}, + {file = "ruff-0.7.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b792468e9804a204be221b14257566669d1db5c00d6bb335996e5cd7004ba80"}, + {file = "ruff-0.7.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dba53ed84ac19ae4bfb4ea4bf0172550a2285fa27fbb13e3746f04c80f7fa088"}, + {file = "ruff-0.7.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b19fafe261bf741bca2764c14cbb4ee1819b67adb63ebc2db6401dcd652e3748"}, + {file = "ruff-0.7.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:28bd8220f4d8f79d590db9e2f6a0674f75ddbc3847277dd44ac1f8d30684b828"}, + {file = "ruff-0.7.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9fd67094e77efbea932e62b5d2483006154794040abb3a5072e659096415ae1e"}, + {file = "ruff-0.7.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:576305393998b7bd6c46018f8104ea3a9cb3fa7908c21d8580e3274a3b04b691"}, + {file = "ruff-0.7.2-py3-none-win32.whl", hash = "sha256:fa993cfc9f0ff11187e82de874dfc3611df80852540331bc85c75809c93253a8"}, + {file = "ruff-0.7.2-py3-none-win_amd64.whl", hash = "sha256:dd8800cbe0254e06b8fec585e97554047fb82c894973f7ff18558eee33d1cb88"}, + {file = "ruff-0.7.2-py3-none-win_arm64.whl", hash = "sha256:bb8368cd45bba3f57bb29cbb8d64b4a33f8415d0149d2655c5c8539452ce7760"}, + {file = "ruff-0.7.2.tar.gz", hash = "sha256:2b14e77293380e475b4e3a7a368e14549288ed2931fce259a6f99978669e844f"}, ] [[package]] @@ -3122,4 +3122,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.8.0" -content-hash = "aa1f6d97809596c23a6d160c0c5804971dad0ba49e34b137bbfb79df038fe6f0" +content-hash = "eaded26b4770b9d19bfcee6dee8b96203df358ce51939d9b90fdbcf605e2f5fd" diff --git a/pyproject.toml b/pyproject.toml index 93e9f38ca90b..3ec01701c3ba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -320,7 +320,7 @@ all = [ # failing on new releases. Keeping lower bounds loose here means that dependabot # can bump versions without having to update the content-hash in the lockfile. # This helps prevents merge conflicts when running a batch of dependabot updates. -ruff = "0.7.1" +ruff = "0.7.2" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" From 0932c775399575bba509728dc6721d1e48a6f689 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 4 Nov 2024 10:17:58 -0600 Subject: [PATCH 059/147] Sliding Sync: Lazy-loading room members on incremental sync (remember memberships) (#17809) Lazy-loading room members on incremental sync and remember which memberships we've sent down the connection before (up-to 100) Fix https://github.com/element-hq/synapse/issues/17804 --- changelog.d/17809.bugfix | 1 + synapse/handlers/sliding_sync/__init__.py | 168 +++++-- tests/handlers/test_sliding_sync.py | 421 +++++++++++++++++- .../sliding_sync/test_rooms_required_state.py | 257 ++++++++++- 4 files changed, 788 insertions(+), 59 deletions(-) create mode 100644 changelog.d/17809.bugfix diff --git a/changelog.d/17809.bugfix b/changelog.d/17809.bugfix new file mode 100644 index 000000000000..e244a36bd36a --- /dev/null +++ b/changelog.d/17809.bugfix @@ -0,0 +1 @@ +Fix bug with sliding sync where `$LAZY`-loading room members would not return `required_state` membership in incremental syncs. diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index a1a6728fb934..85cfbc6dbf57 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -12,6 +12,7 @@ # . # +import itertools import logging from itertools import chain from typing import TYPE_CHECKING, AbstractSet, Dict, List, Mapping, Optional, Set, Tuple @@ -79,6 +80,15 @@ ["initial"], ) +# Limit the number of state_keys we should remember sending down the connection for each +# (room_id, user_id). We don't want to store and pull out too much data in the database. +# +# 100 is an arbitrary but small-ish number. The idea is that we probably won't send down +# too many redundant member state events (that the client already knows about) for a +# given ongoing conversation if we keep 100 around. Most rooms don't have 100 members +# anyway and it takes a while to cycle through 100 members. +MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER = 100 + class SlidingSyncHandler: def __init__(self, hs: "HomeServer"): @@ -873,6 +883,14 @@ async def get_room_sync_data( # # Calculate the `StateFilter` based on the `required_state` for the room required_state_filter = StateFilter.none() + # The requested `required_state_map` with the lazy membership expanded and + # `$ME` replaced with the user's ID. This allows us to see what membership we've + # sent down to the client in the next request. + # + # Make a copy so we can modify it. Still need to be careful to make a copy of + # the state key sets if we want to add/remove from them. We could make a deep + # copy but this saves us some work. + expanded_required_state_map = dict(room_sync_config.required_state_map) if room_membership_for_user_at_to_token.membership not in ( Membership.INVITE, Membership.KNOCK, @@ -938,21 +956,48 @@ async def get_room_sync_data( ): lazy_load_room_members = True # Everyone in the timeline is relevant + # + # FIXME: We probably also care about invite, ban, kick, targets, etc + # but the spec only mentions "senders". timeline_membership: Set[str] = set() if timeline_events is not None: for timeline_event in timeline_events: timeline_membership.add(timeline_event.sender) + # Update the required state filter so we pick up the new + # membership for user_id in timeline_membership: required_state_types.append( (EventTypes.Member, user_id) ) - # FIXME: We probably also care about invite, ban, kick, targets, etc - # but the spec only mentions "senders". + # Add an explicit entry for each user in the timeline + # + # Make a new set or copy of the state key set so we can + # modify it without affecting the original + # `required_state_map` + expanded_required_state_map[EventTypes.Member] = ( + expanded_required_state_map.get( + EventTypes.Member, set() + ) + | timeline_membership + ) elif state_key == StateValues.ME: num_others += 1 required_state_types.append((state_type, user.to_string())) + # Replace `$ME` with the user's ID so we can deduplicate + # when someone requests the same state with `$ME` or with + # their user ID. + # + # Make a new set or copy of the state key set so we can + # modify it without affecting the original + # `required_state_map` + expanded_required_state_map[EventTypes.Member] = ( + expanded_required_state_map.get( + EventTypes.Member, set() + ) + | {user.to_string()} + ) else: num_others += 1 required_state_types.append((state_type, state_key)) @@ -1016,8 +1061,8 @@ async def get_room_sync_data( changed_required_state_map, added_state_filter = ( _required_state_changes( user.to_string(), - previous_room_config=prev_room_sync_config, - room_sync_config=room_sync_config, + prev_required_state_map=prev_room_sync_config.required_state_map, + request_required_state_map=expanded_required_state_map, state_deltas=room_state_delta_id_map, ) ) @@ -1131,7 +1176,9 @@ async def get_room_sync_data( # sensible order again. bump_stamp = 0 - room_sync_required_state_map_to_persist = room_sync_config.required_state_map + room_sync_required_state_map_to_persist: Mapping[str, AbstractSet[str]] = ( + expanded_required_state_map + ) if changed_required_state_map: room_sync_required_state_map_to_persist = changed_required_state_map @@ -1185,7 +1232,10 @@ async def get_room_sync_data( ) else: - new_connection_state.room_configs[room_id] = room_sync_config + new_connection_state.room_configs[room_id] = RoomSyncConfig( + timeline_limit=room_sync_config.timeline_limit, + required_state_map=room_sync_required_state_map_to_persist, + ) set_tag(SynapseTags.RESULT_PREFIX + "initial", initial) @@ -1320,8 +1370,8 @@ async def _get_bump_stamp( def _required_state_changes( user_id: str, *, - previous_room_config: "RoomSyncConfig", - room_sync_config: RoomSyncConfig, + prev_required_state_map: Mapping[str, AbstractSet[str]], + request_required_state_map: Mapping[str, AbstractSet[str]], state_deltas: StateMap[str], ) -> Tuple[Optional[Mapping[str, AbstractSet[str]]], StateFilter]: """Calculates the changes between the required state room config from the @@ -1342,10 +1392,6 @@ def _required_state_changes( and the state filter to use to fetch extra current state that we need to return. """ - - prev_required_state_map = previous_room_config.required_state_map - request_required_state_map = room_sync_config.required_state_map - if prev_required_state_map == request_required_state_map: # There has been no change. Return immediately. return None, StateFilter.none() @@ -1378,12 +1424,19 @@ def _required_state_changes( # client. Passed to `StateFilter.from_types(...)` added: List[Tuple[str, Optional[str]]] = [] + # Convert the list of state deltas to map from type to state_keys that have + # changed. + changed_types_to_state_keys: Dict[str, Set[str]] = {} + for event_type, state_key in state_deltas: + changed_types_to_state_keys.setdefault(event_type, set()).add(state_key) + # First we calculate what, if anything, has been *added*. for event_type in ( prev_required_state_map.keys() | request_required_state_map.keys() ): old_state_keys = prev_required_state_map.get(event_type, set()) request_state_keys = request_required_state_map.get(event_type, set()) + changed_state_keys = changed_types_to_state_keys.get(event_type, set()) if old_state_keys == request_state_keys: # No change to this type @@ -1393,8 +1446,55 @@ def _required_state_changes( # Nothing *added*, so we skip. Removals happen below. continue - # Always update changes to include the newly added keys - changes[event_type] = request_state_keys + # We only remove state keys from the effective state if they've been + # removed from the request *and* the state has changed. This ensures + # that if a client removes and then re-adds a state key, we only send + # down the associated current state event if its changed (rather than + # sending down the same event twice). + invalidated_state_keys = ( + old_state_keys - request_state_keys + ) & changed_state_keys + + # Figure out which state keys we should remember sending down the connection + inheritable_previous_state_keys = ( + # Retain the previous state_keys that we've sent down before. + # Wildcard and lazy state keys are not sticky from previous requests. + (old_state_keys - {StateValues.WILDCARD, StateValues.LAZY}) + - invalidated_state_keys + ) + + # Always update changes to include the newly added keys (we've expanded the set + # of state keys), use the new requested set with whatever hasn't been + # invalidated from the previous set. + changes[event_type] = request_state_keys | inheritable_previous_state_keys + # Limit the number of state_keys we should remember sending down the connection + # for each (room_id, user_id). We don't want to store and pull out too much data + # in the database. This is a happy-medium between remembering nothing and + # everything. We can avoid sending redundant state down the connection most of + # the time given that most rooms don't have 100 members anyway and it takes a + # while to cycle through 100 members. + # + # Only remember up to (MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER) + if len(changes[event_type]) > MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER: + # Reset back to only the requested state keys + changes[event_type] = request_state_keys + + # Skip if there isn't any room to fill in the rest with previous state keys + if len(request_state_keys) < MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER: + # Fill the rest with previous state_keys. Ideally, we could sort + # these by recency but it's just a set so just pick an arbitrary + # subset (good enough). + changes[event_type] = changes[event_type] | set( + itertools.islice( + inheritable_previous_state_keys, + # Just taking the difference isn't perfect as there could be + # overlap in the keys between the requested and previous but we + # will decide to just take the easy route for now and avoid + # additional set operations to figure it out. + MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER + - len(request_state_keys), + ) + ) if StateValues.WILDCARD in old_state_keys: # We were previously fetching everything for this type, so we don't need to @@ -1421,12 +1521,6 @@ def _required_state_changes( added_state_filter = StateFilter.from_types(added) - # Convert the list of state deltas to map from type to state_keys that have - # changed. - changed_types_to_state_keys: Dict[str, Set[str]] = {} - for event_type, state_key in state_deltas: - changed_types_to_state_keys.setdefault(event_type, set()).add(state_key) - # Figure out what changes we need to apply to the effective required state # config. for event_type, changed_state_keys in changed_types_to_state_keys.items(): @@ -1437,15 +1531,23 @@ def _required_state_changes( # No change. continue + # If we see the `user_id` as a state_key, also add "$ME" to the list of state + # that has changed to account for people requesting `required_state` with `$ME` + # or their user ID. + if user_id in changed_state_keys: + changed_state_keys.add(StateValues.ME) + + # We only remove state keys from the effective state if they've been + # removed from the request *and* the state has changed. This ensures + # that if a client removes and then re-adds a state key, we only send + # down the associated current state event if its changed (rather than + # sending down the same event twice). + invalidated_state_keys = ( + old_state_keys - request_state_keys + ) & changed_state_keys + + # We've expanded the set of state keys, ... (already handled above) if request_state_keys - old_state_keys: - # We've expanded the set of state keys, so we just clobber the - # current set with the new set. - # - # We could also ensure that we keep entries where the state hasn't - # changed, but are no longer in the requested required state, but - # that's a sufficient edge case that we can ignore (as its only a - # performance optimization). - changes[event_type] = request_state_keys continue old_state_key_wildcard = StateValues.WILDCARD in old_state_keys @@ -1467,11 +1569,6 @@ def _required_state_changes( changes[event_type] = request_state_keys continue - # Handle "$ME" values by adding "$ME" if the state key matches the user - # ID. - if user_id in changed_state_keys: - changed_state_keys.add(StateValues.ME) - # At this point there are no wildcards and no additions to the set of # state keys requested, only deletions. # @@ -1480,9 +1577,8 @@ def _required_state_changes( # that if a client removes and then re-adds a state key, we only send # down the associated current state event if its changed (rather than # sending down the same event twice). - invalidated = (old_state_keys - request_state_keys) & changed_state_keys - if invalidated: - changes[event_type] = old_state_keys - invalidated + if invalidated_state_keys: + changes[event_type] = old_state_keys - invalidated_state_keys if changes: # Update the required state config based on the changes. diff --git a/tests/handlers/test_sliding_sync.py b/tests/handlers/test_sliding_sync.py index 9a68d1dd958c..5b7e2937f836 100644 --- a/tests/handlers/test_sliding_sync.py +++ b/tests/handlers/test_sliding_sync.py @@ -33,6 +33,7 @@ ) from synapse.api.room_versions import RoomVersions from synapse.handlers.sliding_sync import ( + MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER, RoomsForUserType, RoomSyncConfig, StateValues, @@ -3319,6 +3320,32 @@ class RequiredStateChangesTestCase(unittest.TestCase): ), ), ), + ( + "simple_retain_previous_state_keys", + """Test adding a state key to the config and retaining a previously sent state_key""", + RequiredStateChangesTestParameters( + previous_required_state_map={"type": {"state_key1"}}, + request_required_state_map={"type": {"state_key2", "state_key3"}}, + state_deltas={("type", "state_key2"): "$event_id"}, + expected_with_state_deltas=( + # We've added a key so we should persist the changed required state + # config. + # + # Retain `state_key1` from the `previous_required_state_map` + {"type": {"state_key1", "state_key2", "state_key3"}}, + # We should see the new state_keys added + StateFilter.from_types( + [("type", "state_key2"), ("type", "state_key3")] + ), + ), + expected_without_state_deltas=( + {"type": {"state_key1", "state_key2", "state_key3"}}, + StateFilter.from_types( + [("type", "state_key2"), ("type", "state_key3")] + ), + ), + ), + ), ( "simple_remove_type", """ @@ -3724,6 +3751,249 @@ class RequiredStateChangesTestCase(unittest.TestCase): ), ), ), + ( + "state_key_lazy_keep_previous_memberships_and_no_new_memberships", + """ + This test mimics a request with lazy-loading room members enabled where + we have previously sent down user2 and user3's membership events and now + we're sending down another response without any timeline events. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + EventTypes.Member: { + StateValues.LAZY, + "@user2:test", + "@user3:test", + } + }, + request_required_state_map={EventTypes.Member: {StateValues.LAZY}}, + state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"}, + expected_with_state_deltas=( + # Remove "@user2:test" since that state has changed and is no + # longer being requested anymore. Since something was removed, + # we should persist the changed to required state. That way next + # time, they request "@user2:test", we see that we haven't sent + # it before and send the new state. (we should still keep track + # that we've sent specific `EventTypes.Member` before) + { + EventTypes.Member: { + StateValues.LAZY, + "@user3:test", + } + }, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # We're not requesting any specific `EventTypes.Member` now but + # since that state hasn't changed, nothing should change (we + # should still keep track that we've sent specific + # `EventTypes.Member` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_lazy_keep_previous_memberships_with_new_memberships", + """ + This test mimics a request with lazy-loading room members enabled where + we have previously sent down user2 and user3's membership events and now + we're sending down another response with a new event from user4. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + EventTypes.Member: { + StateValues.LAZY, + "@user2:test", + "@user3:test", + } + }, + request_required_state_map={ + EventTypes.Member: {StateValues.LAZY, "@user4:test"} + }, + state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"}, + expected_with_state_deltas=( + # Since "@user4:test" was added, we should persist the changed + # required state config. + # + # Also remove "@user2:test" since that state has changed and is no + # longer being requested anymore. Since something was removed, + # we also should persist the changed to required state. That way next + # time, they request "@user2:test", we see that we haven't sent + # it before and send the new state. (we should still keep track + # that we've sent specific `EventTypes.Member` before) + { + EventTypes.Member: { + StateValues.LAZY, + "@user3:test", + "@user4:test", + } + }, + # We should see the new state_keys added + StateFilter.from_types([(EventTypes.Member, "@user4:test")]), + ), + expected_without_state_deltas=( + # Since "@user4:test" was added, we should persist the changed + # required state config. + { + EventTypes.Member: { + StateValues.LAZY, + "@user2:test", + "@user3:test", + "@user4:test", + } + }, + # We should see the new state_keys added + StateFilter.from_types([(EventTypes.Member, "@user4:test")]), + ), + ), + ), + ( + "state_key_expand_lazy_keep_previous_memberships", + """ + Test expanding the `required_state` to lazy-loading room members. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + EventTypes.Member: {"@user2:test", "@user3:test"} + }, + request_required_state_map={EventTypes.Member: {StateValues.LAZY}}, + state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"}, + expected_with_state_deltas=( + # Since `StateValues.LAZY` was added, we should persist the + # changed required state config. + # + # Also remove "@user2:test" since that state has changed and is no + # longer being requested anymore. Since something was removed, + # we also should persist the changed to required state. That way next + # time, they request "@user2:test", we see that we haven't sent + # it before and send the new state. (we should still keep track + # that we've sent specific `EventTypes.Member` before) + { + EventTypes.Member: { + StateValues.LAZY, + "@user3:test", + } + }, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # Since `StateValues.LAZY` was added, we should persist the + # changed required state config. + { + EventTypes.Member: { + StateValues.LAZY, + "@user2:test", + "@user3:test", + } + }, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_retract_lazy_keep_previous_memberships_no_new_memberships", + """ + Test retracting the `required_state` to no longer lazy-loading room members. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + EventTypes.Member: { + StateValues.LAZY, + "@user2:test", + "@user3:test", + } + }, + request_required_state_map={}, + state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"}, + expected_with_state_deltas=( + # Remove `EventTypes.Member` since there's been a change to that + # state, (persist the change to required state). That way next + # time, they request `EventTypes.Member`, we see that we haven't + # sent it before and send the new state. (if we were tracking + # that we sent any other state, we should still keep track + # that). + # + # This acts the same as the `simple_remove_type` test. It's + # possible that we could remember the specific `state_keys` that + # we have sent down before but this currently just acts the same + # as if a whole `type` was removed. Perhaps it's good that we + # "garbage collect" and forget what we've sent before for a + # given `type` when the client stops caring about a certain + # `type`. + {}, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + expected_without_state_deltas=( + # `EventTypes.Member` is no longer requested but since that + # state hasn't changed, nothing should change (we should still + # keep track that we've sent `EventTypes.Member` before). + None, + # We don't need to request anything more if they are requesting + # less state now + StateFilter.none(), + ), + ), + ), + ( + "state_key_retract_lazy_keep_previous_memberships_with_new_memberships", + """ + Test retracting the `required_state` to no longer lazy-loading room members. + """, + RequiredStateChangesTestParameters( + previous_required_state_map={ + EventTypes.Member: { + StateValues.LAZY, + "@user2:test", + "@user3:test", + } + }, + request_required_state_map={EventTypes.Member: {"@user4:test"}}, + state_deltas={(EventTypes.Member, "@user2:test"): "$event_id"}, + expected_with_state_deltas=( + # Since "@user4:test" was added, we should persist the changed + # required state config. + # + # Also remove "@user2:test" since that state has changed and is no + # longer being requested anymore. Since something was removed, + # we also should persist the changed to required state. That way next + # time, they request "@user2:test", we see that we haven't sent + # it before and send the new state. (we should still keep track + # that we've sent specific `EventTypes.Member` before) + { + EventTypes.Member: { + "@user3:test", + "@user4:test", + } + }, + # We should see the new state_keys added + StateFilter.from_types([(EventTypes.Member, "@user4:test")]), + ), + expected_without_state_deltas=( + # Since "@user4:test" was added, we should persist the changed + # required state config. + { + EventTypes.Member: { + "@user2:test", + "@user3:test", + "@user4:test", + } + }, + # We should see the new state_keys added + StateFilter.from_types([(EventTypes.Member, "@user4:test")]), + ), + ), + ), ( "type_wildcard_with_state_key_wildcard_to_explicit_state_keys", """ @@ -3824,7 +4094,7 @@ class RequiredStateChangesTestCase(unittest.TestCase): ), ), ( - "state_key_wildcard_to_explicit_state_keys", + "explicit_state_keys_to_wildcard_state_key", """Test switching from a wildcard to explicit state keys with a concrete type""", RequiredStateChangesTestParameters( previous_required_state_map={ @@ -3837,11 +4107,18 @@ class RequiredStateChangesTestCase(unittest.TestCase): # request. And we need to request all of the state for that type # because we previously, only sent down a few keys. expected_with_state_deltas=( - {"type1": {StateValues.WILDCARD}}, + {"type1": {StateValues.WILDCARD, "state_key2", "state_key3"}}, StateFilter.from_types([("type1", None)]), ), expected_without_state_deltas=( - {"type1": {StateValues.WILDCARD}}, + { + "type1": { + StateValues.WILDCARD, + "state_key1", + "state_key2", + "state_key3", + } + }, StateFilter.from_types([("type1", None)]), ), ), @@ -3857,14 +4134,8 @@ def test_xxx( # Without `state_deltas` changed_required_state_map, added_state_filter = _required_state_changes( user_id="@user:test", - previous_room_config=RoomSyncConfig( - timeline_limit=0, - required_state_map=test_parameters.previous_required_state_map, - ), - room_sync_config=RoomSyncConfig( - timeline_limit=0, - required_state_map=test_parameters.request_required_state_map, - ), + prev_required_state_map=test_parameters.previous_required_state_map, + request_required_state_map=test_parameters.request_required_state_map, state_deltas={}, ) @@ -3882,14 +4153,8 @@ def test_xxx( # With `state_deltas` changed_required_state_map, added_state_filter = _required_state_changes( user_id="@user:test", - previous_room_config=RoomSyncConfig( - timeline_limit=0, - required_state_map=test_parameters.previous_required_state_map, - ), - room_sync_config=RoomSyncConfig( - timeline_limit=0, - required_state_map=test_parameters.request_required_state_map, - ), + prev_required_state_map=test_parameters.previous_required_state_map, + request_required_state_map=test_parameters.request_required_state_map, state_deltas=test_parameters.state_deltas, ) @@ -3903,3 +4168,121 @@ def test_xxx( test_parameters.expected_with_state_deltas[1], "added_state_filter does not match (with state_deltas)", ) + + @parameterized.expand( + [ + # Test with a normal arbitrary type (no special meaning) + ("arbitrary_type", "type", set()), + # Test with membership + ("membership", EventTypes.Member, set()), + # Test with lazy-loading room members + ("lazy_loading_membership", EventTypes.Member, {StateValues.LAZY}), + ] + ) + def test_limit_retained_previous_state_keys( + self, + _test_label: str, + event_type: str, + extra_state_keys: Set[str], + ) -> None: + """ + Test that we limit the number of state_keys that we remember but always include + the state_keys that we've just requested. + """ + previous_required_state_map = { + event_type: { + # Prefix the state_keys we've "prev_"iously sent so they are easier to + # identify in our assertions. + f"prev_state_key{i}" + for i in range(MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER - 30) + } + | extra_state_keys + } + request_required_state_map = { + event_type: {f"state_key{i}" for i in range(50)} | extra_state_keys + } + + # (function under test) + changed_required_state_map, added_state_filter = _required_state_changes( + user_id="@user:test", + prev_required_state_map=previous_required_state_map, + request_required_state_map=request_required_state_map, + state_deltas={}, + ) + assert changed_required_state_map is not None + + # We should only remember up to the maximum number of state keys + self.assertGreaterEqual( + len(changed_required_state_map[event_type]), + # Most of the time this will be `MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER` but + # because we are just naively selecting enough previous state_keys to fill + # the limit, there might be some overlap in what's added back which means we + # might have slightly less than the limit. + # + # `extra_state_keys` overlaps in the previous and requested + # `required_state_map` so we might see this this scenario. + MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER - len(extra_state_keys), + ) + + # Should include all of the requested state + self.assertIncludes( + changed_required_state_map[event_type], + request_required_state_map[event_type], + ) + # And the rest is filled with the previous state keys + # + # We can't assert the exact state_keys since we don't know the order so we just + # check that they all start with "prev_" and that we have the correct amount. + remaining_state_keys = ( + changed_required_state_map[event_type] + - request_required_state_map[event_type] + ) + self.assertGreater( + len(remaining_state_keys), + 0, + ) + assert all( + state_key.startswith("prev_") for state_key in remaining_state_keys + ), "Remaining state_keys should be the previous state_keys" + + def test_request_more_state_keys_than_remember_limit(self) -> None: + """ + Test requesting more state_keys than fit in our limit to remember from previous + requests. + """ + previous_required_state_map = { + "type": { + # Prefix the state_keys we've "prev_"iously sent so they are easier to + # identify in our assertions. + f"prev_state_key{i}" + for i in range(MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER - 30) + } + } + request_required_state_map = { + "type": { + f"state_key{i}" + # Requesting more than the MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER + for i in range(MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER + 20) + } + } + # Ensure that we are requesting more than the limit + self.assertGreater( + len(request_required_state_map["type"]), + MAX_NUMBER_PREVIOUS_STATE_KEYS_TO_REMEMBER, + ) + + # (function under test) + changed_required_state_map, added_state_filter = _required_state_changes( + user_id="@user:test", + prev_required_state_map=previous_required_state_map, + request_required_state_map=request_required_state_map, + state_deltas={}, + ) + assert changed_required_state_map is not None + + # Should include all of the requested state + self.assertIncludes( + changed_required_state_map["type"], + request_required_state_map["type"], + exact=True, + ) diff --git a/tests/rest/client/sliding_sync/test_rooms_required_state.py b/tests/rest/client/sliding_sync/test_rooms_required_state.py index 7da51d4954a8..ecea5f2d5b32 100644 --- a/tests/rest/client/sliding_sync/test_rooms_required_state.py +++ b/tests/rest/client/sliding_sync/test_rooms_required_state.py @@ -381,10 +381,10 @@ def test_rooms_required_state_wildcard_state_key(self) -> None: ) self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) - def test_rooms_required_state_lazy_loading_room_members(self) -> None: + def test_rooms_required_state_lazy_loading_room_members_initial_sync(self) -> None: """ - Test `rooms.required_state` returns people relevant to the timeline when - lazy-loading room members, `["m.room.member","$LAZY"]`. + On initial sync, test `rooms.required_state` returns people relevant to the + timeline when lazy-loading room members, `["m.room.member","$LAZY"]`. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -432,6 +432,255 @@ def test_rooms_required_state_lazy_loading_room_members(self) -> None: ) self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + def test_rooms_required_state_lazy_loading_room_members_incremental_sync( + self, + ) -> None: + """ + On incremental sync, test `rooms.required_state` returns people relevant to the + timeline when lazy-loading room members, `["m.room.member","$LAZY"]`. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + self.helper.join(room_id1, user4_id, tok=user4_tok) + + self.helper.send(room_id1, "1", tok=user2_tok) + self.helper.send(room_id1, "2", tok=user2_tok) + self.helper.send(room_id1, "3", tok=user2_tok) + + # Make the Sliding Sync request with lazy loading for the room members + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.LAZY], + ], + "timeline_limit": 3, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send more timeline events into the room + self.helper.send(room_id1, "4", tok=user2_tok) + self.helper.send(room_id1, "5", tok=user4_tok) + self.helper.send(room_id1, "6", tok=user4_tok) + + # Make an incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Only user2 and user4 sent events in the last 3 events we see in the `timeline` + # but since we've seen user2 in the last sync (and their membership hasn't + # changed), we should only see user4 here. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Member, user4_id)], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + def test_rooms_required_state_expand_lazy_loading_room_members_incremental_sync( + self, + ) -> None: + """ + Test that when we expand the `required_state` to include lazy-loading room + members, it returns people relevant to the timeline. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + self.helper.join(room_id1, user4_id, tok=user4_tok) + + self.helper.send(room_id1, "1", tok=user2_tok) + self.helper.send(room_id1, "2", tok=user2_tok) + self.helper.send(room_id1, "3", tok=user2_tok) + + # Make the Sliding Sync request *without* lazy loading for the room members + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 3, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send more timeline events into the room + self.helper.send(room_id1, "4", tok=user2_tok) + self.helper.send(room_id1, "5", tok=user4_tok) + self.helper.send(room_id1, "6", tok=user4_tok) + + # Expand `required_state` and make an incremental Sliding Sync request *with* + # lazy-loading room members + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.LAZY], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Only user2 and user4 sent events in the last 3 events we see in the `timeline` + # and we haven't seen any membership before this sync so we should see both + # users. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Member, user2_id)], + state_map[(EventTypes.Member, user4_id)], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "7", tok=user2_tok) + self.helper.send(room_id1, "8", tok=user4_tok) + self.helper.send(room_id1, "9", tok=user4_tok) + + # Make another incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # Only user2 and user4 sent events in the last 3 events we see in the `timeline` + # but since we've seen both memberships in the last sync, they shouldn't appear + # again. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1].get("required_state", []), + set(), + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + def test_rooms_required_state_expand_retract_expand_lazy_loading_room_members_incremental_sync( + self, + ) -> None: + """ + Test that when we expand the `required_state` to include lazy-loading room + members, it returns people relevant to the timeline. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + self.helper.join(room_id1, user4_id, tok=user4_tok) + + self.helper.send(room_id1, "1", tok=user2_tok) + self.helper.send(room_id1, "2", tok=user2_tok) + self.helper.send(room_id1, "3", tok=user2_tok) + + # Make the Sliding Sync request *without* lazy loading for the room members + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + ], + "timeline_limit": 3, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send more timeline events into the room + self.helper.send(room_id1, "4", tok=user2_tok) + self.helper.send(room_id1, "5", tok=user4_tok) + self.helper.send(room_id1, "6", tok=user4_tok) + + # Expand `required_state` and make an incremental Sliding Sync request *with* + # lazy-loading room members + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.LAZY], + ] + response_body, from_token = self.do_sync( + sync_body, since=from_token, tok=user1_tok + ) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Only user2 and user4 sent events in the last 3 events we see in the `timeline` + # and we haven't seen any membership before this sync so we should see both + # users because we're lazy-loading the room members. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Member, user2_id)], + state_map[(EventTypes.Member, user4_id)], + }, + exact=True, + ) + + # Send a message so the room comes down sync. + self.helper.send(room_id1, "msg", tok=user4_tok) + + # Retract `required_state` and make an incremental Sliding Sync request + # requesting a few memberships + sync_body["lists"]["foo-list"]["required_state"] = [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.ME], + [EventTypes.Member, user2_id], + ] + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # We've seen user2's membership in the last sync so we shouldn't see it here + # even though it's requested. We should only see user1's membership. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + state_map[(EventTypes.Member, user1_id)], + }, + exact=True, + ) + def test_rooms_required_state_me(self) -> None: """ Test `rooms.required_state` correctly handles $ME. @@ -561,7 +810,7 @@ def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: ) self.helper.leave(room_id1, user3_id, tok=user3_tok) - # Make the Sliding Sync request with lazy loading for the room members + # Make an incremental Sliding Sync request response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) # Only user2 and user3 sent events in the 3 events we see in the `timeline` From 9c0a3963bc6381007d7df9aaed939d5ab4e9ec08 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 17:21:05 +0000 Subject: [PATCH 060/147] Bump phonenumbers from 8.13.48 to 8.13.49 (#17899) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 787041b5b39f..6a5845fd1ec6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1451,13 +1451,13 @@ dev = ["jinja2"] [[package]] name = "phonenumbers" -version = "8.13.48" +version = "8.13.49" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.48-py2.py3-none-any.whl", hash = "sha256:5c51939acefa390eb74119750afb10a85d3c628dc83fd62c52d6f532fcf5d205"}, - {file = "phonenumbers-8.13.48.tar.gz", hash = "sha256:62d8df9b0f3c3c41571c6b396f044ddd999d61631534001b8be7fdf7ba1b18f3"}, + {file = "phonenumbers-8.13.49-py2.py3-none-any.whl", hash = "sha256:e17140955ab3d8f9580727372ea64c5ada5327932d6021ef6fd203c3db8c8139"}, + {file = "phonenumbers-8.13.49.tar.gz", hash = "sha256:e608ccb61f0bd42e6db1d2c421f7c22186b88f494870bf40aa31d1a2718ab0ae"}, ] [[package]] From 2c9ed5e5106ffa223366e437095a662cd7a63b82 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 4 Nov 2024 12:20:07 -0600 Subject: [PATCH 061/147] Remove usage of internal header encoding API (#17894) ```py from twisted.web.http_headers import Headers Headers()._canonicalNameCaps Headers()._encodeName ``` Introduced in https://github.com/matrix-org/synapse/pull/15913 <- https://github.com/matrix-org/synapse/pull/15773 --- changelog.d/17894.misc | 1 + synapse/http/proxy.py | 40 +++++++++-------------- tests/http/test_matrixfederationclient.py | 23 ++++++++++--- tests/http/test_proxy.py | 32 +++++++++++++----- 4 files changed, 60 insertions(+), 36 deletions(-) create mode 100644 changelog.d/17894.misc diff --git a/changelog.d/17894.misc b/changelog.d/17894.misc new file mode 100644 index 000000000000..dc1a7577abfd --- /dev/null +++ b/changelog.d/17894.misc @@ -0,0 +1 @@ +Remove usage of internal header encoding API. diff --git a/synapse/http/proxy.py b/synapse/http/proxy.py index 97aa429e7d46..5cd990b0d076 100644 --- a/synapse/http/proxy.py +++ b/synapse/http/proxy.py @@ -51,25 +51,17 @@ # "Hop-by-hop" headers (as opposed to "end-to-end" headers) as defined by RFC2616 # section 13.5.1 and referenced in RFC9110 section 7.6.1. These are meant to only be # consumed by the immediate recipient and not be forwarded on. -HOP_BY_HOP_HEADERS = { - "Connection", - "Keep-Alive", - "Proxy-Authenticate", - "Proxy-Authorization", - "TE", - "Trailers", - "Transfer-Encoding", - "Upgrade", +HOP_BY_HOP_HEADERS_LOWERCASE = { + "connection", + "keep-alive", + "proxy-authenticate", + "proxy-authorization", + "te", + "trailers", + "transfer-encoding", + "upgrade", } - -if hasattr(Headers, "_canonicalNameCaps"): - # Twisted < 24.7.0rc1 - _canonicalHeaderName = Headers()._canonicalNameCaps # type: ignore[attr-defined] -else: - # Twisted >= 24.7.0rc1 - # But note that `_encodeName` still exists on prior versions, - # it just encodes differently - _canonicalHeaderName = Headers()._encodeName +assert all(header.lower() == header for header in HOP_BY_HOP_HEADERS_LOWERCASE) def parse_connection_header_value( @@ -92,12 +84,12 @@ def parse_connection_header_value( Returns: The set of header names that should not be copied over from the remote response. - The keys are capitalized in canonical capitalization. + The keys are lowercased. """ extra_headers_to_remove: Set[str] = set() if connection_header_value: extra_headers_to_remove = { - _canonicalHeaderName(connection_option.strip()).decode("ascii") + connection_option.decode("ascii").strip().lower() for connection_option in connection_header_value.split(b",") } @@ -194,7 +186,7 @@ def _send_response( # The `Connection` header also defines which headers should not be copied over. connection_header = response_headers.getRawHeaders(b"connection") - extra_headers_to_remove = parse_connection_header_value( + extra_headers_to_remove_lowercase = parse_connection_header_value( connection_header[0] if connection_header else None ) @@ -202,10 +194,10 @@ def _send_response( for k, v in response_headers.getAllRawHeaders(): # Do not copy over any hop-by-hop headers. These are meant to only be # consumed by the immediate recipient and not be forwarded on. - header_key = k.decode("ascii") + header_key_lowercase = k.decode("ascii").lower() if ( - header_key in HOP_BY_HOP_HEADERS - or header_key in extra_headers_to_remove + header_key_lowercase in HOP_BY_HOP_HEADERS_LOWERCASE + or header_key_lowercase in extra_headers_to_remove_lowercase ): continue diff --git a/tests/http/test_matrixfederationclient.py b/tests/http/test_matrixfederationclient.py index 6588695e3734..e34df54e13c9 100644 --- a/tests/http/test_matrixfederationclient.py +++ b/tests/http/test_matrixfederationclient.py @@ -903,12 +903,19 @@ def test_proxy_requests_and_discards_hop_by_hop_headers(self) -> None: headers=Headers( { "Content-Type": ["application/json"], - "Connection": ["close, X-Foo, X-Bar"], + "X-Test": ["test"], + # Define some hop-by-hop headers (try with varying casing to + # make sure we still match-up the headers) + "Connection": ["close, X-fOo, X-Bar, X-baz"], # Should be removed because it's defined in the `Connection` header "X-Foo": ["foo"], "X-Bar": ["bar"], + # (not in canonical case) + "x-baZ": ["baz"], # Should be removed because it's a hop-by-hop header "Proxy-Authorization": "abcdef", + # Should be removed because it's a hop-by-hop header (not in canonical case) + "transfer-EnCoDiNg": "abcdef", } ), ) @@ -938,9 +945,17 @@ def test_proxy_requests_and_discards_hop_by_hop_headers(self) -> None: header_names = set(headers.keys()) # Make sure the response does not include the hop-by-hop headers - self.assertNotIn(b"X-Foo", header_names) - self.assertNotIn(b"X-Bar", header_names) - self.assertNotIn(b"Proxy-Authorization", header_names) + self.assertIncludes( + header_names, + { + b"Content-Type", + b"X-Test", + # Default headers from Twisted + b"Date", + b"Server", + }, + exact=True, + ) # Make sure the response is as expected back on the main worker self.assertEqual(res, {"foo": "bar"}) diff --git a/tests/http/test_proxy.py b/tests/http/test_proxy.py index 589527049489..7110dcf9f94b 100644 --- a/tests/http/test_proxy.py +++ b/tests/http/test_proxy.py @@ -22,27 +22,42 @@ from parameterized import parameterized -from synapse.http.proxy import parse_connection_header_value +from synapse.http.proxy import ( + HOP_BY_HOP_HEADERS_LOWERCASE, + parse_connection_header_value, +) from tests.unittest import TestCase +def mix_case(s: str) -> str: + """ + Mix up the case of each character in the string (upper or lower case) + """ + return "".join(c.upper() if i % 2 == 0 else c.lower() for i, c in enumerate(s)) + + class ProxyTests(TestCase): @parameterized.expand( [ - [b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}], + [b"close, X-Foo, X-Bar", {"close", "x-foo", "x-bar"}], # No whitespace - [b"close,X-Foo,X-Bar", {"Close", "X-Foo", "X-Bar"}], + [b"close,X-Foo,X-Bar", {"close", "x-foo", "x-bar"}], # More whitespace - [b"close, X-Foo, X-Bar", {"Close", "X-Foo", "X-Bar"}], + [b"close, X-Foo, X-Bar", {"close", "x-foo", "x-bar"}], # "close" directive in not the first position - [b"X-Foo, X-Bar, close", {"X-Foo", "X-Bar", "Close"}], + [b"X-Foo, X-Bar, close", {"x-foo", "x-bar", "close"}], # Normalizes header capitalization - [b"keep-alive, x-fOo, x-bAr", {"Keep-Alive", "X-Foo", "X-Bar"}], + [b"keep-alive, x-fOo, x-bAr", {"keep-alive", "x-foo", "x-bar"}], # Handles header names with whitespace [ b"keep-alive, x foo, x bar", - {"Keep-Alive", "X foo", "X bar"}, + {"keep-alive", "x foo", "x bar"}, + ], + # Make sure we handle all of the hop-by-hop headers + [ + mix_case(", ".join(HOP_BY_HOP_HEADERS_LOWERCASE)).encode("ascii"), + HOP_BY_HOP_HEADERS_LOWERCASE, ], ] ) @@ -54,7 +69,8 @@ def test_parse_connection_header_value( """ Tests that the connection header value is parsed correctly """ - self.assertEqual( + self.assertIncludes( expected_extra_headers_to_remove, parse_connection_header_value(connection_header_value), + exact=True, ) From 1c2b18a704ec81e9f4aefe3e87d120d7a38066ce Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 5 Nov 2024 13:15:10 +0000 Subject: [PATCH 062/147] Bump Synapse Dockerfile default to Python 3.12 (#17887) --- changelog.d/17887.misc | 1 + docker/Dockerfile | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17887.misc diff --git a/changelog.d/17887.misc b/changelog.d/17887.misc new file mode 100644 index 000000000000..6be32caee6a4 --- /dev/null +++ b/changelog.d/17887.misc @@ -0,0 +1 @@ +Bump the default Python version in the Synapse Dockerfile from 3.11 -> 3.12. \ No newline at end of file diff --git a/docker/Dockerfile b/docker/Dockerfile index 1da196b12e76..a4931011a78c 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -20,7 +20,7 @@ # `poetry export | pip install -r /dev/stdin`, but beware: we have experienced bugs in # in `poetry export` in the past. -ARG PYTHON_VERSION=3.11 +ARG PYTHON_VERSION=3.12 ### ### Stage 0: generate requirements.txt From 361bdafb877e4303497591a1612aa02f0b00c472 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Nov 2024 14:45:57 +0000 Subject: [PATCH 063/147] Add experimental support for MSC4222 (#17888) Basically, if the client sets a special query param on `/sync` v2 instead of responding with `state` at the *start* of the timeline, we instead respond with `state_after` at the *end* of the timeline. We do this by using the `current_state_delta_stream` table, which is actually reliable, rather than messing around with "state at" points on the timeline. c.f. MSC4222 --- changelog.d/17888.feature | 1 + docs/admin_api/experimental_features.md | 1 + synapse/config/experimental.py | 3 + synapse/handlers/sync.py | 128 +++++++++- synapse/rest/admin/experimental_features.py | 3 + synapse/rest/client/sync.py | 45 +++- tests/handlers/test_sync.py | 270 ++++++++++++++++---- 7 files changed, 395 insertions(+), 56 deletions(-) create mode 100644 changelog.d/17888.feature diff --git a/changelog.d/17888.feature b/changelog.d/17888.feature new file mode 100644 index 000000000000..3ede8886abdb --- /dev/null +++ b/changelog.d/17888.feature @@ -0,0 +1 @@ +Add experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222). diff --git a/docs/admin_api/experimental_features.md b/docs/admin_api/experimental_features.md index ef1b58c9ba05..e32728e56d3d 100644 --- a/docs/admin_api/experimental_features.md +++ b/docs/admin_api/experimental_features.md @@ -5,6 +5,7 @@ basis. The currently supported features are: - [MSC3881](https://github.com/matrix-org/matrix-spec-proposals/pull/3881): enable remotely toggling push notifications for another client - [MSC3575](https://github.com/matrix-org/matrix-spec-proposals/pull/3575): enable experimental sliding sync support +- [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222): adding `state_after` to sync v2 To use it, you will need to authenticate by providing an `access_token` for a server admin: see [Admin API](../usage/administration/admin_api/). diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index fd14db0d0244..b26ce25d715d 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -450,3 +450,6 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # MSC4210: Remove legacy mentions self.msc4210_enabled: bool = experimental.get("msc4210_enabled", False) + + # MSC4222: Adding `state_after` to sync v2 + self.msc4222_enabled: bool = experimental.get("msc4222_enabled", False) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index f4ea90fbd78d..df9a088063fb 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -143,6 +143,7 @@ class SyncConfig: filter_collection: FilterCollection is_guest: bool device_id: Optional[str] + use_state_after: bool @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -1141,6 +1142,7 @@ async def compute_state_delta( since_token: Optional[StreamToken], end_token: StreamToken, full_state: bool, + joined: bool, ) -> MutableStateMap[EventBase]: """Works out the difference in state between the end of the previous sync and the start of the timeline. @@ -1155,6 +1157,7 @@ async def compute_state_delta( the point just after their leave event. full_state: Whether to force returning the full state. `lazy_load_members` still applies when `full_state` is `True`. + joined: whether the user is currently joined to the room Returns: The state to return in the sync response for the room. @@ -1230,11 +1233,12 @@ async def compute_state_delta( if full_state: state_ids = await self._compute_state_delta_for_full_sync( room_id, - sync_config.user, + sync_config, batch, end_token, members_to_fetch, timeline_state, + joined, ) else: # If this is an initial sync then full_state should be set, and @@ -1244,6 +1248,7 @@ async def compute_state_delta( state_ids = await self._compute_state_delta_for_incremental_sync( room_id, + sync_config, batch, since_token, end_token, @@ -1316,20 +1321,24 @@ async def compute_state_delta( async def _compute_state_delta_for_full_sync( self, room_id: str, - syncing_user: UserID, + sync_config: SyncConfig, batch: TimelineBatch, end_token: StreamToken, members_to_fetch: Optional[Set[str]], timeline_state: StateMap[str], + joined: bool, ) -> StateMap[str]: """Calculate the state events to be included in a full sync response. As with `_compute_state_delta_for_incremental_sync`, the result will include the membership events for the senders of each event in `members_to_fetch`. + Note that whether this returns the state at the start or the end of the + batch depends on `sync_config.use_state_after` (c.f. MSC4222). + Args: room_id: The room we are calculating for. - syncing_user: The user that is calling `/sync`. + sync_confg: The user that is calling `/sync`. batch: The timeline batch for the room that will be sent to the user. end_token: Token of the end of the current batch. Normally this will be the same as the global "now_token", but if the user has left the room, @@ -1338,10 +1347,11 @@ async def _compute_state_delta_for_full_sync( events in the timeline. timeline_state: The contribution to the room state from state events in `batch`. Only contains the last event for any given state key. + joined: whether the user is currently joined to the room Returns: A map from (type, state_key) to event_id, for each event that we believe - should be included in the `state` part of the sync response. + should be included in the `state` or `state_after` part of the sync response. """ if members_to_fetch is not None: # Lazy-loading of membership events is enabled. @@ -1359,7 +1369,7 @@ async def _compute_state_delta_for_full_sync( # is no guarantee that our membership will be in the auth events of # timeline events when the room is partial stated. state_filter = StateFilter.from_lazy_load_member_list( - members_to_fetch.union((syncing_user.to_string(),)) + members_to_fetch.union((sync_config.user.to_string(),)) ) # We are happy to use partial state to compute the `/sync` response. @@ -1373,6 +1383,61 @@ async def _compute_state_delta_for_full_sync( await_full_state = True lazy_load_members = False + # Check if we are wanting to return the state at the start or end of the + # timeline. If at the end we can just use the current state. + if sync_config.use_state_after: + # If we're getting the state at the end of the timeline, we can just + # use the current state of the room (and roll back any changes + # between when we fetched the current state and `end_token`). + # + # For rooms we're not joined to, there might be a very large number + # of deltas between `end_token` and "now", and so instead we fetch + # the state at the end of the timeline. + if joined: + state_ids = await self._state_storage_controller.get_current_state_ids( + room_id, + state_filter=state_filter, + await_full_state=await_full_state, + ) + + # Now roll back the state by looking at the state deltas between + # end_token and now. + deltas = await self.store.get_current_state_deltas_for_room( + room_id, + from_token=end_token.room_key, + to_token=self.store.get_room_max_token(), + ) + if deltas: + mutable_state_ids = dict(state_ids) + + # We iterate over the deltas backwards so that if there are + # multiple changes of the same type/state_key we'll + # correctly pick the earliest delta. + for delta in reversed(deltas): + if delta.prev_event_id: + mutable_state_ids[(delta.event_type, delta.state_key)] = ( + delta.prev_event_id + ) + elif (delta.event_type, delta.state_key) in mutable_state_ids: + mutable_state_ids.pop((delta.event_type, delta.state_key)) + + state_ids = mutable_state_ids + + return state_ids + + else: + # Just use state groups to get the state at the end of the + # timeline, i.e. the state at the leave/etc event. + state_at_timeline_end = ( + await self._state_storage_controller.get_state_ids_at( + room_id, + stream_position=end_token, + state_filter=state_filter, + await_full_state=await_full_state, + ) + ) + return state_at_timeline_end + state_at_timeline_end = await self._state_storage_controller.get_state_ids_at( room_id, stream_position=end_token, @@ -1405,6 +1470,7 @@ async def _compute_state_delta_for_full_sync( async def _compute_state_delta_for_incremental_sync( self, room_id: str, + sync_config: SyncConfig, batch: TimelineBatch, since_token: StreamToken, end_token: StreamToken, @@ -1419,8 +1485,12 @@ async def _compute_state_delta_for_incremental_sync( (`compute_state_delta`) is responsible for keeping track of which membership events we have already sent to the client, and hence ripping them out. + Note that whether this returns the state at the start or the end of the + batch depends on `sync_config.use_state_after` (c.f. MSC4222). + Args: room_id: The room we are calculating for. + sync_config batch: The timeline batch for the room that will be sent to the user. since_token: Token of the end of the previous batch. end_token: Token of the end of the current batch. Normally this will be @@ -1433,7 +1503,7 @@ async def _compute_state_delta_for_incremental_sync( Returns: A map from (type, state_key) to event_id, for each event that we believe - should be included in the `state` part of the sync response. + should be included in the `state` or `state_after` part of the sync response. """ if members_to_fetch is not None: # Lazy-loading is enabled. Only return the state that is needed. @@ -1445,6 +1515,51 @@ async def _compute_state_delta_for_incremental_sync( await_full_state = True lazy_load_members = False + # Check if we are wanting to return the state at the start or end of the + # timeline. If at the end we can just use the current state delta stream. + if sync_config.use_state_after: + delta_state_ids: MutableStateMap[str] = {} + + if members_to_fetch is not None: + # We're lazy-loading, so the client might need some more member + # events to understand the events in this timeline. So we always + # fish out all the member events corresponding to the timeline + # here. The caller will then dedupe any redundant ones. + member_ids = await self._state_storage_controller.get_current_state_ids( + room_id=room_id, + state_filter=StateFilter.from_types( + (EventTypes.Member, member) for member in members_to_fetch + ), + await_full_state=await_full_state, + ) + delta_state_ids.update(member_ids) + + # We don't do LL filtering for incremental syncs - see + # https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346 + # N.B. this slows down incr syncs as we are now processing way more + # state in the server than if we were LLing. + # + # i.e. we return all state deltas, including membership changes that + # we'd normally exclude due to LL. + deltas = await self.store.get_current_state_deltas_for_room( + room_id=room_id, + from_token=since_token.room_key, + to_token=end_token.room_key, + ) + for delta in deltas: + if delta.event_id is None: + # There was a state reset and this state entry is no longer + # present, but we have no way of informing the client about + # this, so we just skip it for now. + continue + + # Note that deltas are in stream ordering, so if there are + # multiple deltas for a given type/state_key we'll always pick + # the latest one. + delta_state_ids[(delta.event_type, delta.state_key)] = delta.event_id + + return delta_state_ids + # For a non-gappy sync if the events in the timeline are simply a linear # chain (i.e. no merging/branching of the graph), then we know the state # delta between the end of the previous sync and start of the new one is @@ -2867,6 +2982,7 @@ async def _generate_room_entry( since_token, room_builder.end_token, full_state=full_state, + joined=room_builder.rtype == "joined", ) else: # An out of band room won't have any state changes. diff --git a/synapse/rest/admin/experimental_features.py b/synapse/rest/admin/experimental_features.py index d7913896d910..afb71f4a0fca 100644 --- a/synapse/rest/admin/experimental_features.py +++ b/synapse/rest/admin/experimental_features.py @@ -43,12 +43,15 @@ class ExperimentalFeature(str, Enum): MSC3881 = "msc3881" MSC3575 = "msc3575" + MSC4222 = "msc4222" def is_globally_enabled(self, config: "HomeServerConfig") -> bool: if self is ExperimentalFeature.MSC3881: return config.experimental.msc3881_enabled if self is ExperimentalFeature.MSC3575: return config.experimental.msc3575_enabled + if self is ExperimentalFeature.MSC4222: + return config.experimental.msc4222_enabled assert_never(self) diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 122708e933ca..5c62a74f41c4 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -152,6 +152,14 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: filter_id = parse_string(request, "filter") full_state = parse_boolean(request, "full_state", default=False) + use_state_after = False + if await self.store.is_feature_enabled( + user.to_string(), ExperimentalFeature.MSC4222 + ): + use_state_after = parse_boolean( + request, "org.matrix.msc4222.use_state_after", default=False + ) + logger.debug( "/sync: user=%r, timeout=%r, since=%r, " "set_presence=%r, filter_id=%r, device_id=%r", @@ -184,6 +192,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: full_state, device_id, last_ignore_accdata_streampos, + use_state_after, ) if filter_id is None: @@ -220,6 +229,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: filter_collection=filter_collection, is_guest=requester.is_guest, device_id=device_id, + use_state_after=use_state_after, ) since_token = None @@ -258,7 +268,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: # We know that the the requester has an access token since appservices # cannot use sync. response_content = await self.encode_response( - time_now, sync_result, requester, filter_collection + time_now, sync_config, sync_result, requester, filter_collection ) logger.debug("Event formatting complete") @@ -268,6 +278,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: async def encode_response( self, time_now: int, + sync_config: SyncConfig, sync_result: SyncResult, requester: Requester, filter: FilterCollection, @@ -292,7 +303,7 @@ async def encode_response( ) joined = await self.encode_joined( - sync_result.joined, time_now, serialize_options + sync_config, sync_result.joined, time_now, serialize_options ) invited = await self.encode_invited( @@ -304,7 +315,7 @@ async def encode_response( ) archived = await self.encode_archived( - sync_result.archived, time_now, serialize_options + sync_config, sync_result.archived, time_now, serialize_options ) logger.debug("building sync response dict") @@ -372,6 +383,7 @@ def encode_presence(events: List[UserPresenceState], time_now: int) -> JsonDict: @trace_with_opname("sync.encode_joined") async def encode_joined( self, + sync_config: SyncConfig, rooms: List[JoinedSyncResult], time_now: int, serialize_options: SerializeEventConfig, @@ -380,6 +392,7 @@ async def encode_joined( Encode the joined rooms in a sync result Args: + sync_config rooms: list of sync results for rooms this user is joined to time_now: current time - used as a baseline for age calculations serialize_options: Event serializer options @@ -389,7 +402,11 @@ async def encode_joined( joined = {} for room in rooms: joined[room.room_id] = await self.encode_room( - room, time_now, joined=True, serialize_options=serialize_options + sync_config, + room, + time_now, + joined=True, + serialize_options=serialize_options, ) return joined @@ -477,6 +494,7 @@ async def encode_knocked( @trace_with_opname("sync.encode_archived") async def encode_archived( self, + sync_config: SyncConfig, rooms: List[ArchivedSyncResult], time_now: int, serialize_options: SerializeEventConfig, @@ -485,6 +503,7 @@ async def encode_archived( Encode the archived rooms in a sync result Args: + sync_config rooms: list of sync results for rooms this user is joined to time_now: current time - used as a baseline for age calculations serialize_options: Event serializer options @@ -494,13 +513,18 @@ async def encode_archived( joined = {} for room in rooms: joined[room.room_id] = await self.encode_room( - room, time_now, joined=False, serialize_options=serialize_options + sync_config, + room, + time_now, + joined=False, + serialize_options=serialize_options, ) return joined async def encode_room( self, + sync_config: SyncConfig, room: Union[JoinedSyncResult, ArchivedSyncResult], time_now: int, joined: bool, @@ -508,6 +532,7 @@ async def encode_room( ) -> JsonDict: """ Args: + sync_config room: sync result for a single room time_now: current time - used as a baseline for age calculations token_id: ID of the user's auth token - used for namespacing @@ -548,13 +573,20 @@ async def encode_room( account_data = room.account_data + # We either include a `state` or `state_after` field depending on + # whether the client has opted in to the newer `state_after` behavior. + if sync_config.use_state_after: + state_key_name = "org.matrix.msc4222.state_after" + else: + state_key_name = "state" + result: JsonDict = { "timeline": { "events": serialized_timeline, "prev_batch": await room.timeline.prev_batch.to_string(self.store), "limited": room.timeline.limited, }, - "state": {"events": serialized_state}, + state_key_name: {"events": serialized_state}, "account_data": {"events": account_data}, } @@ -688,6 +720,7 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: filter_collection=self.only_member_events_filter_collection, is_guest=requester.is_guest, device_id=device_id, + use_state_after=False, # We don't return any rooms so this flag is a no-op ) since_token = None diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index d7bbc6803731..1960d2f0e108 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -20,7 +20,7 @@ from typing import Collection, ContextManager, List, Optional from unittest.mock import AsyncMock, Mock, patch -from parameterized import parameterized +from parameterized import parameterized, parameterized_class from twisted.internet import defer from twisted.test.proto_helpers import MemoryReactor @@ -32,7 +32,13 @@ from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.federation.federation_base import event_from_pdu_json -from synapse.handlers.sync import SyncConfig, SyncRequestKey, SyncResult, SyncVersion +from synapse.handlers.sync import ( + SyncConfig, + SyncRequestKey, + SyncResult, + SyncVersion, + TimelineBatch, +) from synapse.rest import admin from synapse.rest.client import knock, login, room from synapse.server import HomeServer @@ -58,9 +64,21 @@ def generate_request_key() -> SyncRequestKey: return ("request_key", _request_key) +@parameterized_class( + ("use_state_after",), + [ + (True,), + (False,), + ], + class_name_func=lambda cls, + num, + params_dict: f"{cls.__name__}_{'state_after' if params_dict['use_state_after'] else 'state'}", +) class SyncTestCase(tests.unittest.HomeserverTestCase): """Tests Sync Handler.""" + use_state_after: bool + servlets = [ admin.register_servlets, knock.register_servlets, @@ -79,7 +97,9 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: def test_wait_for_sync_for_user_auth_blocking(self) -> None: user_id1 = "@user1:test" user_id2 = "@user2:test" - sync_config = generate_sync_config(user_id1) + sync_config = generate_sync_config( + user_id1, use_state_after=self.use_state_after + ) requester = create_requester(user_id1) self.reactor.advance(100) # So we get not 0 time @@ -112,7 +132,9 @@ def test_wait_for_sync_for_user_auth_blocking(self) -> None: self.auth_blocking._hs_disabled = False - sync_config = generate_sync_config(user_id2) + sync_config = generate_sync_config( + user_id2, use_state_after=self.use_state_after + ) requester = create_requester(user_id2) e = self.get_failure( @@ -141,7 +163,9 @@ def test_unknown_room_version(self) -> None: initial_result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, - sync_config=generate_sync_config(user, device_id="dev"), + sync_config=generate_sync_config( + user, device_id="dev", use_state_after=self.use_state_after + ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -175,7 +199,9 @@ def test_unknown_room_version(self) -> None: result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, - sync_config=generate_sync_config(user), + sync_config=generate_sync_config( + user, use_state_after=self.use_state_after + ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -188,7 +214,9 @@ def test_unknown_room_version(self) -> None: result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, - sync_config=generate_sync_config(user, device_id="dev"), + sync_config=generate_sync_config( + user, device_id="dev", use_state_after=self.use_state_after + ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), since_token=initial_result.next_batch, @@ -220,7 +248,9 @@ def test_unknown_room_version(self) -> None: result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, - sync_config=generate_sync_config(user), + sync_config=generate_sync_config( + user, use_state_after=self.use_state_after + ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -233,7 +263,9 @@ def test_unknown_room_version(self) -> None: result = self.get_success( self.sync_handler.wait_for_sync_for_user( requester, - sync_config=generate_sync_config(user, device_id="dev"), + sync_config=generate_sync_config( + user, device_id="dev", use_state_after=self.use_state_after + ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), since_token=initial_result.next_batch, @@ -276,7 +308,7 @@ def test_ban_wins_race_with_join(self) -> None: alice_sync_result: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( create_requester(owner), - generate_sync_config(owner), + generate_sync_config(owner, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -296,7 +328,9 @@ def test_ban_wins_race_with_join(self) -> None: # Eve syncs. eve_requester = create_requester(eve) - eve_sync_config = generate_sync_config(eve) + eve_sync_config = generate_sync_config( + eve, use_state_after=self.use_state_after + ) eve_sync_after_ban: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( eve_requester, @@ -367,7 +401,7 @@ def test_state_includes_changes_on_forks(self) -> None: initial_sync_result = self.get_success( self.sync_handler.wait_for_sync_for_user( alice_requester, - generate_sync_config(alice), + generate_sync_config(alice, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -396,6 +430,7 @@ def test_state_includes_changes_on_forks(self) -> None: filter_collection=FilterCollection( self.hs, {"room": {"timeline": {"limit": 2}}} ), + use_state_after=self.use_state_after, ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), @@ -442,7 +477,7 @@ def test_state_includes_changes_on_forks_when_events_excluded(self) -> None: initial_sync_result = self.get_success( self.sync_handler.wait_for_sync_for_user( alice_requester, - generate_sync_config(alice), + generate_sync_config(alice, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -481,6 +516,7 @@ def test_state_includes_changes_on_forks_when_events_excluded(self) -> None: } }, ), + use_state_after=self.use_state_after, ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), @@ -518,6 +554,8 @@ def test_state_includes_changes_on_long_lived_forks(self) -> None: ... and a filter that means we only return 1 event, represented by the dashed horizontal lines: `S2` must be included in the `state` section on the second sync. + + When `use_state_after` is enabled, then we expect to see `s2` in the first sync. """ alice = self.register_user("alice", "password") alice_tok = self.login(alice, "password") @@ -528,7 +566,7 @@ def test_state_includes_changes_on_long_lived_forks(self) -> None: initial_sync_result = self.get_success( self.sync_handler.wait_for_sync_for_user( alice_requester, - generate_sync_config(alice), + generate_sync_config(alice, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -554,6 +592,7 @@ def test_state_includes_changes_on_long_lived_forks(self) -> None: filter_collection=FilterCollection( self.hs, {"room": {"timeline": {"limit": 1}}} ), + use_state_after=self.use_state_after, ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), @@ -567,10 +606,18 @@ def test_state_includes_changes_on_long_lived_forks(self) -> None: [e.event_id for e in room_sync.timeline.events], [e3_event], ) - self.assertEqual( - [e.event_id for e in room_sync.state.values()], - [], - ) + + if self.use_state_after: + # When using `state_after` we get told about s2 immediately + self.assertEqual( + [e.event_id for e in room_sync.state.values()], + [s2_event], + ) + else: + self.assertEqual( + [e.event_id for e in room_sync.state.values()], + [], + ) # Now send another event that points to S2, but not E3. with self._patch_get_latest_events([s2_event]): @@ -585,6 +632,7 @@ def test_state_includes_changes_on_long_lived_forks(self) -> None: filter_collection=FilterCollection( self.hs, {"room": {"timeline": {"limit": 1}}} ), + use_state_after=self.use_state_after, ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), @@ -598,10 +646,19 @@ def test_state_includes_changes_on_long_lived_forks(self) -> None: [e.event_id for e in room_sync.timeline.events], [e4_event], ) - self.assertEqual( - [e.event_id for e in room_sync.state.values()], - [s2_event], - ) + + if self.use_state_after: + # When using `state_after` we got told about s2 previously, so we + # don't again. + self.assertEqual( + [e.event_id for e in room_sync.state.values()], + [], + ) + else: + self.assertEqual( + [e.event_id for e in room_sync.state.values()], + [s2_event], + ) def test_state_includes_changes_on_ungappy_syncs(self) -> None: """Test `state` where the sync is not gappy. @@ -638,6 +695,8 @@ def test_state_includes_changes_on_ungappy_syncs(self) -> None: This is the last chance for us to tell the client about S2, so it *must* be included in the response. + + When `use_state_after` is enabled, then we expect to see `s2` in the first sync. """ alice = self.register_user("alice", "password") alice_tok = self.login(alice, "password") @@ -648,7 +707,7 @@ def test_state_includes_changes_on_ungappy_syncs(self) -> None: initial_sync_result = self.get_success( self.sync_handler.wait_for_sync_for_user( alice_requester, - generate_sync_config(alice), + generate_sync_config(alice, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -673,6 +732,7 @@ def test_state_includes_changes_on_ungappy_syncs(self) -> None: filter_collection=FilterCollection( self.hs, {"room": {"timeline": {"limit": 1}}} ), + use_state_after=self.use_state_after, ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), @@ -684,7 +744,11 @@ def test_state_includes_changes_on_ungappy_syncs(self) -> None: [e.event_id for e in room_sync.timeline.events], [e3_event], ) - self.assertNotIn(s2_event, [e.event_id for e in room_sync.state.values()]) + if self.use_state_after: + # When using `state_after` we get told about s2 immediately + self.assertIn(s2_event, [e.event_id for e in room_sync.state.values()]) + else: + self.assertNotIn(s2_event, [e.event_id for e in room_sync.state.values()]) # More events, E4 and E5 with self._patch_get_latest_events([e3_event]): @@ -695,7 +759,7 @@ def test_state_includes_changes_on_ungappy_syncs(self) -> None: incremental_sync = self.get_success( self.sync_handler.wait_for_sync_for_user( alice_requester, - generate_sync_config(alice), + generate_sync_config(alice, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), since_token=initial_sync_result.next_batch, @@ -710,10 +774,19 @@ def test_state_includes_changes_on_ungappy_syncs(self) -> None: [e.event_id for e in room_sync.timeline.events], [e4_event, e5_event], ) - self.assertEqual( - [e.event_id for e in room_sync.state.values()], - [s2_event], - ) + + if self.use_state_after: + # When using `state_after` we got told about s2 previously, so we + # don't again. + self.assertEqual( + [e.event_id for e in room_sync.state.values()], + [], + ) + else: + self.assertEqual( + [e.event_id for e in room_sync.state.values()], + [s2_event], + ) @parameterized.expand( [ @@ -721,7 +794,8 @@ def test_state_includes_changes_on_ungappy_syncs(self) -> None: (True, False), (False, True), (True, True), - ] + ], + name_func=lambda func, num, p: f"{func.__name__}_{p.args[0]}_{p.args[1]}", ) def test_archived_rooms_do_not_include_state_after_leave( self, initial_sync: bool, empty_timeline: bool @@ -749,7 +823,7 @@ def test_archived_rooms_do_not_include_state_after_leave( initial_sync_result = self.get_success( self.sync_handler.wait_for_sync_for_user( bob_requester, - generate_sync_config(bob), + generate_sync_config(bob, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -780,7 +854,9 @@ def test_archived_rooms_do_not_include_state_after_leave( self.sync_handler.wait_for_sync_for_user( bob_requester, generate_sync_config( - bob, filter_collection=FilterCollection(self.hs, filter_dict) + bob, + filter_collection=FilterCollection(self.hs, filter_dict), + use_state_after=self.use_state_after, ), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), @@ -791,7 +867,15 @@ def test_archived_rooms_do_not_include_state_after_leave( if empty_timeline: # The timeline should be empty self.assertEqual(sync_room_result.timeline.events, []) + else: + # The last three events in the timeline should be those leading up to the + # leave + self.assertEqual( + [e.event_id for e in sync_room_result.timeline.events[-3:]], + [before_message_event, before_state_event, leave_event], + ) + if empty_timeline or self.use_state_after: # And the state should include the leave event... self.assertEqual( sync_room_result.state[("m.room.member", bob)].event_id, leave_event @@ -801,12 +885,6 @@ def test_archived_rooms_do_not_include_state_after_leave( sync_room_result.state[("test_state", "")].event_id, before_state_event ) else: - # The last three events in the timeline should be those leading up to the - # leave - self.assertEqual( - [e.event_id for e in sync_room_result.timeline.events[-3:]], - [before_message_event, before_state_event, leave_event], - ) # ... And the state should be empty self.assertEqual(sync_room_result.state, {}) @@ -879,7 +957,7 @@ async def _check_sigs_and_hash_for_pulled_events_and_fetch( sync_result: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( create_requester(user), - generate_sync_config(user), + generate_sync_config(user, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -928,7 +1006,7 @@ async def _check_sigs_and_hash_for_pulled_events_and_fetch( private_sync_result: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( create_requester(user2), - generate_sync_config(user2), + generate_sync_config(user2, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -954,7 +1032,7 @@ def test_push_rules_with_bad_account_data(self) -> None: sync_result: SyncResult = self.get_success( self.sync_handler.wait_for_sync_for_user( create_requester(user), - generate_sync_config(user), + generate_sync_config(user, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), ) @@ -991,7 +1069,7 @@ def test_wait_for_future_sync_token(self) -> None: sync_d = defer.ensureDeferred( self.sync_handler.wait_for_sync_for_user( create_requester(user), - generate_sync_config(user), + generate_sync_config(user, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), since_token=since_token, @@ -1046,7 +1124,7 @@ def test_wait_for_invalid_future_sync_token( sync_d = defer.ensureDeferred( self.sync_handler.wait_for_sync_for_user( create_requester(user), - generate_sync_config(user), + generate_sync_config(user, use_state_after=self.use_state_after), sync_version=SyncVersion.SYNC_V2, request_key=generate_request_key(), since_token=since_token, @@ -1062,6 +1140,7 @@ def generate_sync_config( user_id: str, device_id: Optional[str] = "device_id", filter_collection: Optional[FilterCollection] = None, + use_state_after: bool = False, ) -> SyncConfig: """Generate a sync config (with a unique request key). @@ -1069,7 +1148,8 @@ def generate_sync_config( user_id: user who is syncing. device_id: device that is syncing. Defaults to "device_id". filter_collection: filter to apply. Defaults to the default filter (ie, - return everything, with a default limit) + return everything, with a default limit) + use_state_after: whether the `use_state_after` flag was set. """ if filter_collection is None: filter_collection = Filtering(Mock()).DEFAULT_FILTER_COLLECTION @@ -1079,4 +1159,106 @@ def generate_sync_config( filter_collection=filter_collection, is_guest=False, device_id=device_id, + use_state_after=use_state_after, ) + + +class SyncStateAfterTestCase(tests.unittest.HomeserverTestCase): + """Tests Sync Handler state behavior when using `use_state_after.""" + + servlets = [ + admin.register_servlets, + knock.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.sync_handler = self.hs.get_sync_handler() + self.store = self.hs.get_datastores().main + + # AuthBlocking reads from the hs' config on initialization. We need to + # modify its config instead of the hs' + self.auth_blocking = self.hs.get_auth_blocking() + + def test_initial_sync_multiple_deltas(self) -> None: + """Test that if multiple state deltas have happened during processing of + a full state sync we return the correct state""" + + user = self.register_user("user", "password") + tok = self.login("user", "password") + + # Create a room as the user and set some custom state. + joined_room = self.helper.create_room_as(user, tok=tok) + + first_state = self.helper.send_state( + joined_room, event_type="m.test_event", body={"num": 1}, tok=tok + ) + + # Take a snapshot of the stream token, to simulate doing an initial sync + # at this point. + end_stream_token = self.hs.get_event_sources().get_current_token() + + # Send some state *after* the stream token + self.helper.send_state( + joined_room, event_type="m.test_event", body={"num": 2}, tok=tok + ) + + # Calculating the full state will return the first state, and not the + # second. + state = self.get_success( + self.sync_handler._compute_state_delta_for_full_sync( + room_id=joined_room, + sync_config=generate_sync_config(user, use_state_after=True), + batch=TimelineBatch( + prev_batch=end_stream_token, events=[], limited=True + ), + end_token=end_stream_token, + members_to_fetch=None, + timeline_state={}, + joined=True, + ) + ) + self.assertEqual(state[("m.test_event", "")], first_state["event_id"]) + + def test_incremental_sync_multiple_deltas(self) -> None: + """Test that if multiple state deltas have happened since an incremental + state sync we return the correct state""" + + user = self.register_user("user", "password") + tok = self.login("user", "password") + + # Create a room as the user and set some custom state. + joined_room = self.helper.create_room_as(user, tok=tok) + + # Take a snapshot of the stream token, to simulate doing an incremental sync + # from this point. + since_token = self.hs.get_event_sources().get_current_token() + + self.helper.send_state( + joined_room, event_type="m.test_event", body={"num": 1}, tok=tok + ) + + # Send some state *after* the stream token + second_state = self.helper.send_state( + joined_room, event_type="m.test_event", body={"num": 2}, tok=tok + ) + + end_stream_token = self.hs.get_event_sources().get_current_token() + + # Calculating the incrementals state will return the second state, and not the + # first. + state = self.get_success( + self.sync_handler._compute_state_delta_for_incremental_sync( + room_id=joined_room, + sync_config=generate_sync_config(user, use_state_after=True), + batch=TimelineBatch( + prev_batch=end_stream_token, events=[], limited=True + ), + since_token=since_token, + end_token=end_stream_token, + members_to_fetch=None, + timeline_state={}, + ) + ) + self.assertEqual(state[("m.test_event", "")], second_state["event_id"]) From 211c31dbd77281d114b9f08ef61c867d56316e8c Mon Sep 17 00:00:00 2001 From: Alexander Udovichenko Date: Tue, 5 Nov 2024 21:08:17 +0300 Subject: [PATCH 064/147] Fix WheelTimer implementation that can expired timeout early (#17850) When entries insert in the end of timer queue, then unnecessary entry inserted (with duplicated key). This can lead to some timeouts expired early and consume memory. --- changelog.d/17850.bugfix | 1 + synapse/util/wheel_timer.py | 6 ++-- tests/util/test_wheel_timer.py | 50 ++++++++++++++++++---------------- 3 files changed, 29 insertions(+), 28 deletions(-) create mode 100644 changelog.d/17850.bugfix diff --git a/changelog.d/17850.bugfix b/changelog.d/17850.bugfix new file mode 100644 index 000000000000..8ea99c4ef9ac --- /dev/null +++ b/changelog.d/17850.bugfix @@ -0,0 +1 @@ +Fix bug when some presence and typing timeouts can expire early. \ No newline at end of file diff --git a/synapse/util/wheel_timer.py b/synapse/util/wheel_timer.py index 44b109bdfd63..95eb1d718590 100644 --- a/synapse/util/wheel_timer.py +++ b/synapse/util/wheel_timer.py @@ -47,7 +47,6 @@ def __init__(self, bucket_size: int = 5000) -> None: """ self.bucket_size: int = bucket_size self.entries: List[_Entry[T]] = [] - self.current_tick: int = 0 def insert(self, now: int, obj: T, then: int) -> None: """Inserts object into timer. @@ -78,11 +77,10 @@ def insert(self, now: int, obj: T, then: int) -> None: self.entries[max(min_key, then_key) - min_key].elements.add(obj) return - next_key = now_key + 1 if self.entries: - last_key = self.entries[-1].end_key + last_key = self.entries[-1].end_key + 1 else: - last_key = next_key + last_key = now_key + 1 # Handle the case when `then` is in the past and `entries` is empty. then_key = max(last_key, then_key) diff --git a/tests/util/test_wheel_timer.py b/tests/util/test_wheel_timer.py index 173a7cfaeca2..6fa575a18e49 100644 --- a/tests/util/test_wheel_timer.py +++ b/tests/util/test_wheel_timer.py @@ -28,53 +28,55 @@ class WheelTimerTestCase(unittest.TestCase): def test_single_insert_fetch(self) -> None: wheel: WheelTimer[object] = WheelTimer(bucket_size=5) - obj = object() - wheel.insert(100, obj, 150) + wheel.insert(100, "1", 150) self.assertListEqual(wheel.fetch(101), []) self.assertListEqual(wheel.fetch(110), []) self.assertListEqual(wheel.fetch(120), []) self.assertListEqual(wheel.fetch(130), []) self.assertListEqual(wheel.fetch(149), []) - self.assertListEqual(wheel.fetch(156), [obj]) + self.assertListEqual(wheel.fetch(156), ["1"]) self.assertListEqual(wheel.fetch(170), []) def test_multi_insert(self) -> None: wheel: WheelTimer[object] = WheelTimer(bucket_size=5) - obj1 = object() - obj2 = object() - obj3 = object() - wheel.insert(100, obj1, 150) - wheel.insert(105, obj2, 130) - wheel.insert(106, obj3, 160) + wheel.insert(100, "1", 150) + wheel.insert(105, "2", 130) + wheel.insert(106, "3", 160) self.assertListEqual(wheel.fetch(110), []) - self.assertListEqual(wheel.fetch(135), [obj2]) + self.assertListEqual(wheel.fetch(135), ["2"]) self.assertListEqual(wheel.fetch(149), []) - self.assertListEqual(wheel.fetch(158), [obj1]) + self.assertListEqual(wheel.fetch(158), ["1"]) self.assertListEqual(wheel.fetch(160), []) - self.assertListEqual(wheel.fetch(200), [obj3]) + self.assertListEqual(wheel.fetch(200), ["3"]) self.assertListEqual(wheel.fetch(210), []) def test_insert_past(self) -> None: wheel: WheelTimer[object] = WheelTimer(bucket_size=5) - obj = object() - wheel.insert(100, obj, 50) - self.assertListEqual(wheel.fetch(120), [obj]) + wheel.insert(100, "1", 50) + self.assertListEqual(wheel.fetch(120), ["1"]) def test_insert_past_multi(self) -> None: wheel: WheelTimer[object] = WheelTimer(bucket_size=5) - obj1 = object() - obj2 = object() - obj3 = object() - wheel.insert(100, obj1, 150) - wheel.insert(100, obj2, 140) - wheel.insert(100, obj3, 50) - self.assertListEqual(wheel.fetch(110), [obj3]) + wheel.insert(100, "1", 150) + wheel.insert(100, "2", 140) + wheel.insert(100, "3", 50) + self.assertListEqual(wheel.fetch(110), ["3"]) self.assertListEqual(wheel.fetch(120), []) - self.assertListEqual(wheel.fetch(147), [obj2]) - self.assertListEqual(wheel.fetch(200), [obj1]) + self.assertListEqual(wheel.fetch(147), ["2"]) + self.assertListEqual(wheel.fetch(200), ["1"]) self.assertListEqual(wheel.fetch(240), []) + + def test_multi_insert_then_past(self) -> None: + wheel: WheelTimer[object] = WheelTimer(bucket_size=5) + + wheel.insert(100, "1", 150) + wheel.insert(100, "2", 160) + wheel.insert(100, "3", 155) + + self.assertListEqual(wheel.fetch(110), []) + self.assertListEqual(wheel.fetch(158), ["1"]) From eac170b21b89c86780e22bab7a2a74b33de50fc6 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 5 Nov 2024 16:54:18 -0600 Subject: [PATCH 065/147] Use more correct changelog entries for refactoring `Generator` usage (#17890) Use more correct changelog entries for refactoring `Generator` usage - https://github.com/element-hq/synapse/pull/17813 - https://github.com/element-hq/synapse/pull/17814 - https://github.com/element-hq/synapse/pull/17815 - https://github.com/element-hq/synapse/pull/17816 - https://github.com/element-hq/synapse/pull/17817 ### Pull Request Checklist * [x] Pull request is based on the develop branch * [x] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [x] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --- changelog.d/17813.bugfix | 1 - changelog.d/17813.misc | 1 + changelog.d/17814.bugfix | 1 - changelog.d/17814.misc | 1 + changelog.d/17815.bugfix | 1 - changelog.d/17815.misc | 1 + changelog.d/17816.bugfix | 1 - changelog.d/17816.misc | 1 + changelog.d/17817.bugfix | 1 - changelog.d/17817.misc | 1 + changelog.d/17818.bugfix | 1 - changelog.d/17818.misc | 1 + changelog.d/17890.misc | 1 + 13 files changed, 7 insertions(+), 6 deletions(-) delete mode 100644 changelog.d/17813.bugfix create mode 100644 changelog.d/17813.misc delete mode 100644 changelog.d/17814.bugfix create mode 100644 changelog.d/17814.misc delete mode 100644 changelog.d/17815.bugfix create mode 100644 changelog.d/17815.misc delete mode 100644 changelog.d/17816.bugfix create mode 100644 changelog.d/17816.misc delete mode 100644 changelog.d/17817.bugfix create mode 100644 changelog.d/17817.misc delete mode 100644 changelog.d/17818.bugfix create mode 100644 changelog.d/17818.misc create mode 100644 changelog.d/17890.misc diff --git a/changelog.d/17813.bugfix b/changelog.d/17813.bugfix deleted file mode 100644 index 5dd276709b0b..000000000000 --- a/changelog.d/17813.bugfix +++ /dev/null @@ -1 +0,0 @@ -Avoid lost data on some database query retries. diff --git a/changelog.d/17813.misc b/changelog.d/17813.misc new file mode 100644 index 000000000000..f8676aee599f --- /dev/null +++ b/changelog.d/17813.misc @@ -0,0 +1 @@ +Refactor database calls to remove `Generator` usage. diff --git a/changelog.d/17814.bugfix b/changelog.d/17814.bugfix deleted file mode 100644 index 5dd276709b0b..000000000000 --- a/changelog.d/17814.bugfix +++ /dev/null @@ -1 +0,0 @@ -Avoid lost data on some database query retries. diff --git a/changelog.d/17814.misc b/changelog.d/17814.misc new file mode 100644 index 000000000000..f8676aee599f --- /dev/null +++ b/changelog.d/17814.misc @@ -0,0 +1 @@ +Refactor database calls to remove `Generator` usage. diff --git a/changelog.d/17815.bugfix b/changelog.d/17815.bugfix deleted file mode 100644 index 5dd276709b0b..000000000000 --- a/changelog.d/17815.bugfix +++ /dev/null @@ -1 +0,0 @@ -Avoid lost data on some database query retries. diff --git a/changelog.d/17815.misc b/changelog.d/17815.misc new file mode 100644 index 000000000000..f8676aee599f --- /dev/null +++ b/changelog.d/17815.misc @@ -0,0 +1 @@ +Refactor database calls to remove `Generator` usage. diff --git a/changelog.d/17816.bugfix b/changelog.d/17816.bugfix deleted file mode 100644 index 5dd276709b0b..000000000000 --- a/changelog.d/17816.bugfix +++ /dev/null @@ -1 +0,0 @@ -Avoid lost data on some database query retries. diff --git a/changelog.d/17816.misc b/changelog.d/17816.misc new file mode 100644 index 000000000000..f8676aee599f --- /dev/null +++ b/changelog.d/17816.misc @@ -0,0 +1 @@ +Refactor database calls to remove `Generator` usage. diff --git a/changelog.d/17817.bugfix b/changelog.d/17817.bugfix deleted file mode 100644 index 5dd276709b0b..000000000000 --- a/changelog.d/17817.bugfix +++ /dev/null @@ -1 +0,0 @@ -Avoid lost data on some database query retries. diff --git a/changelog.d/17817.misc b/changelog.d/17817.misc new file mode 100644 index 000000000000..f8676aee599f --- /dev/null +++ b/changelog.d/17817.misc @@ -0,0 +1 @@ +Refactor database calls to remove `Generator` usage. diff --git a/changelog.d/17818.bugfix b/changelog.d/17818.bugfix deleted file mode 100644 index 5dd276709b0b..000000000000 --- a/changelog.d/17818.bugfix +++ /dev/null @@ -1 +0,0 @@ -Avoid lost data on some database query retries. diff --git a/changelog.d/17818.misc b/changelog.d/17818.misc new file mode 100644 index 000000000000..f8676aee599f --- /dev/null +++ b/changelog.d/17818.misc @@ -0,0 +1 @@ +Refactor database calls to remove `Generator` usage. diff --git a/changelog.d/17890.misc b/changelog.d/17890.misc new file mode 100644 index 000000000000..f8676aee599f --- /dev/null +++ b/changelog.d/17890.misc @@ -0,0 +1 @@ +Refactor database calls to remove `Generator` usage. From 46bd7e136dc8c4be967d9fcb571e07493d16a9fc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 6 Nov 2024 00:24:40 +0000 Subject: [PATCH 066/147] Bump actions/download-artifact from 3 to 4.1.7 in /.github/workflows (#17657) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 3 to 4.1.7.
Release notes

Sourced from actions/download-artifact's releases.

v4.1.7

What's Changed

Full Changelog: https://github.com/actions/download-artifact/compare/v4.1.6...v4.1.7

v4.1.6

What's Changed

Full Changelog: https://github.com/actions/download-artifact/compare/v4.1.5...v4.1.6

v4.1.5

What's Changed

Full Changelog: https://github.com/actions/download-artifact/compare/v4.1.4...v4.1.5

v4.1.4

What's Changed

Full Changelog: https://github.com/actions/download-artifact/compare/v4...v4.1.4

v4.1.3

What's Changed

New Contributors

Full Changelog: https://github.com/actions/download-artifact/compare/v4...v4.1.3

v4.1.2

v4.1.1

v4.1.0

What's Changed

... (truncated)

Commits
  • 65a9edc Merge pull request #325 from bethanyj28/main
  • fdd1595 licensed
  • c13dba1 update @​actions/artifact dependency
  • 0daa75e Merge pull request #324 from actions/eggyhead/use-artifact-v2.1.6
  • 9c19ed7 Merge branch 'main' into eggyhead/use-artifact-v2.1.6
  • 3d3ea87 updating license
  • 89af5db updating artifact package v2.1.6
  • b4aefff Merge pull request #323 from actions/eggyhead/update-artifact-v215
  • 8caf195 package lock update
  • d7a2ec4 updating package version
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/download-artifact&package-manager=github_actions&previous-version=3&new-version=4.1.7)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) You can trigger a rebase of this PR by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) You can disable automated security fix PRs for this repo from the [Security Alerts page](https://github.com/element-hq/synapse/network/alerts).
> **Note** > Automatic rebases have been disabled on this pull request as it has been open for over 30 days. --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Devon Hudson --- .github/workflows/release-artifacts.yml | 8 ++++---- changelog.d/17657.misc | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/17657.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 9f0feffd9483..61e118c97331 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -92,7 +92,7 @@ jobs: mv /tmp/.buildx-cache-new /tmp/.buildx-cache - name: Upload debs as artifacts - uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes + uses: actions/upload-artifact@v4 with: name: debs path: debs/* @@ -156,7 +156,7 @@ jobs: CARGO_NET_GIT_FETCH_WITH_CLI: true CIBW_ENVIRONMENT_PASS_LINUX: CARGO_NET_GIT_FETCH_WITH_CLI - - uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes + - uses: actions/upload-artifact@v4 with: name: Wheel path: ./wheelhouse/*.whl @@ -177,7 +177,7 @@ jobs: - name: Build sdist run: python -m build --sdist - - uses: actions/upload-artifact@v3 # Don't upgrade to v4; broken: https://github.com/actions/upload-artifact#breaking-changes + - uses: actions/upload-artifact@v4 with: name: Sdist path: dist/*.tar.gz @@ -194,7 +194,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Download all workflow run artifacts - uses: actions/download-artifact@v3 # Don't upgrade to v4, it should match upload-artifact + uses: actions/download-artifact@v4 - name: Build a tarball for the debs run: tar -cvJf debs.tar.xz debs - name: Attach to release diff --git a/changelog.d/17657.misc b/changelog.d/17657.misc new file mode 100644 index 000000000000..aff558adf792 --- /dev/null +++ b/changelog.d/17657.misc @@ -0,0 +1 @@ +Bump actions/download-artifact and actions/upload-artifact from v3 -> v4. From 75698a3e53c18e8baecf47bcc2084a6d167bdbde Mon Sep 17 00:00:00 2001 From: Sandro Date: Wed, 6 Nov 2024 15:03:46 +0100 Subject: [PATCH 067/147] Improve nix flake to use nixpkgs-unstable in lieu of master (#17852) --- changelog.d/17852.misc | 1 + flake.lock | 8 ++++---- flake.nix | 22 +++++++++++----------- 3 files changed, 16 insertions(+), 15 deletions(-) create mode 100644 changelog.d/17852.misc diff --git a/changelog.d/17852.misc b/changelog.d/17852.misc new file mode 100644 index 000000000000..b1b7ac97345c --- /dev/null +++ b/changelog.d/17852.misc @@ -0,0 +1 @@ +The nix flake inside the repository no longer tracks nixpkgs/master to not catch the latest bugs from a PR merged 5 minutes ago. diff --git a/flake.lock b/flake.lock index 9b360fa33eed..6b25cef3fc96 100644 --- a/flake.lock +++ b/flake.lock @@ -186,16 +186,16 @@ }, "nixpkgs_2": { "locked": { - "lastModified": 1690535733, - "narHash": "sha256-WgjUPscQOw3cB8yySDGlyzo6cZNihnRzUwE9kadv/5I=", + "lastModified": 1729265718, + "narHash": "sha256-4HQI+6LsO3kpWTYuVGIzhJs1cetFcwT7quWCk/6rqeo=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "8cacc05fbfffeaab910e8c2c9e2a7c6b32ce881a", + "rev": "ccc0c2126893dd20963580b6478d1a10a4512185", "type": "github" }, "original": { "owner": "NixOS", - "ref": "master", + "ref": "nixpkgs-unstable", "repo": "nixpkgs", "type": "github" } diff --git a/flake.nix b/flake.nix index 31f283293971..bc360ae44a2b 100644 --- a/flake.nix +++ b/flake.nix @@ -3,13 +3,13 @@ # (https://github.com/matrix-org/complement) Matrix homeserver test suites are also # installed automatically. # -# You must have already installed Nix (https://nixos.org) on your system to use this. -# Nix can be installed on Linux or MacOS; NixOS is not required. Windows is not -# directly supported, but Nix can be installed inside of WSL2 or even Docker +# You must have already installed Nix (https://nixos.org/download/) on your system to use this. +# Nix can be installed on any Linux distribiution or MacOS; NixOS is not required. +# Windows is not directly supported, but Nix can be installed inside of WSL2 or even Docker # containers. Please refer to https://nixos.org/download for details. # # You must also enable support for flakes in Nix. See the following for how to -# do so permanently: https://nixos.wiki/wiki/Flakes#Enable_flakes +# do so permanently: https://wiki.nixos.org/wiki/Flakes#Other_Distros,_without_Home-Manager # # Be warned: you'll need over 3.75 GB of free space to download all the dependencies. # @@ -20,7 +20,7 @@ # locally from "services", such as PostgreSQL and Redis. # # You should now be dropped into a new shell with all programs and dependencies -# availabile to you! +# available to you! # # You can start up pre-configured local Synapse, PostgreSQL and Redis instances by # running: `devenv up`. To stop them, use Ctrl-C. @@ -39,9 +39,9 @@ { inputs = { - # Use the master/unstable branch of nixpkgs. Used to fetch the latest + # Use the rolling/unstable branch of nixpkgs. Used to fetch the latest # available versions of packages. - nixpkgs.url = "github:NixOS/nixpkgs/master"; + nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable"; # Output a development shell for x86_64/aarch64 Linux/Darwin (MacOS). systems.url = "github:nix-systems/default"; # A development environment manager built on Nix. See https://devenv.sh. @@ -50,7 +50,7 @@ rust-overlay.url = "github:oxalica/rust-overlay"; }; - outputs = { self, nixpkgs, devenv, systems, rust-overlay, ... } @ inputs: + outputs = { nixpkgs, devenv, systems, rust-overlay, ... } @ inputs: let forEachSystem = nixpkgs.lib.genAttrs (import systems); in { @@ -126,7 +126,7 @@ # Automatically activate the poetry virtualenv upon entering the shell. languages.python.poetry.activate.enable = true; # Install all extra Python dependencies; this is needed to run the unit - # tests and utilitise all Synapse features. + # tests and utilise all Synapse features. languages.python.poetry.install.arguments = ["--extras all"]; # Install the 'matrix-synapse' package from the local checkout. languages.python.poetry.install.installRootPackage = true; @@ -163,8 +163,8 @@ # Create a postgres user called 'synapse_user' which has ownership # over the 'synapse' database. services.postgres.initialScript = '' - CREATE USER synapse_user; - ALTER DATABASE synapse OWNER TO synapse_user; + CREATE USER synapse_user; + ALTER DATABASE synapse OWNER TO synapse_user; ''; # Redis is needed in order to run Synapse in worker mode. From 61aadb158f10122039a78351b125cb5fd7293aba Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Wed, 6 Nov 2024 15:21:45 +0000 Subject: [PATCH 068/147] Use unique name for each os.arch variant when uploading Wheels (#17905) ### Pull Request Checklist * [X] Pull request is based on the develop branch * [X] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [X] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --- .github/workflows/release-artifacts.yml | 2 +- changelog.d/17905.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17905.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 61e118c97331..1e2513b28991 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -158,7 +158,7 @@ jobs: - uses: actions/upload-artifact@v4 with: - name: Wheel + name: Wheel-${{ matrix.os }}-${{ matrix.arch }} path: ./wheelhouse/*.whl build-sdist: diff --git a/changelog.d/17905.misc b/changelog.d/17905.misc new file mode 100644 index 000000000000..32ef50dbacf8 --- /dev/null +++ b/changelog.d/17905.misc @@ -0,0 +1 @@ +Use unique name for each os.arch variant when uploading Wheel artifacts. From 9266ba72b517357bd78cc66d9b109db038a0372c Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Wed, 6 Nov 2024 09:03:06 -0700 Subject: [PATCH 069/147] 1.119.0rc1 --- CHANGES.md | 51 +++++++++++++++++++++++++++++++++++++++ changelog.d/17374.feature | 1 - changelog.d/17657.misc | 1 - changelog.d/17786.misc | 1 - changelog.d/17809.bugfix | 1 - changelog.d/17813.misc | 1 - changelog.d/17814.misc | 1 - changelog.d/17815.misc | 1 - changelog.d/17816.misc | 1 - changelog.d/17817.misc | 1 - changelog.d/17818.misc | 1 - changelog.d/17830.misc | 1 - changelog.d/17839.bugfix | 1 - changelog.d/17847.bugfix | 2 -- changelog.d/17850.bugfix | 1 - changelog.d/17852.misc | 1 - changelog.d/17861.bugfix | 1 - changelog.d/17884.misc | 1 - changelog.d/17887.misc | 1 - changelog.d/17888.feature | 1 - changelog.d/17890.misc | 1 - changelog.d/17894.misc | 1 - changelog.d/17905.misc | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- 25 files changed, 58 insertions(+), 24 deletions(-) delete mode 100644 changelog.d/17374.feature delete mode 100644 changelog.d/17657.misc delete mode 100644 changelog.d/17786.misc delete mode 100644 changelog.d/17809.bugfix delete mode 100644 changelog.d/17813.misc delete mode 100644 changelog.d/17814.misc delete mode 100644 changelog.d/17815.misc delete mode 100644 changelog.d/17816.misc delete mode 100644 changelog.d/17817.misc delete mode 100644 changelog.d/17818.misc delete mode 100644 changelog.d/17830.misc delete mode 100644 changelog.d/17839.bugfix delete mode 100644 changelog.d/17847.bugfix delete mode 100644 changelog.d/17850.bugfix delete mode 100644 changelog.d/17852.misc delete mode 100644 changelog.d/17861.bugfix delete mode 100644 changelog.d/17884.misc delete mode 100644 changelog.d/17887.misc delete mode 100644 changelog.d/17888.feature delete mode 100644 changelog.d/17890.misc delete mode 100644 changelog.d/17894.misc delete mode 100644 changelog.d/17905.misc diff --git a/CHANGES.md b/CHANGES.md index bcc834021041..6f263f7f7d32 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,54 @@ +# Synapse 1.119.0rc1 (2024-11-06) + +### Features + +- Support [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151)'s stable report room API. ([\#17374](https://github.com/element-hq/synapse/issues/17374)) +- Add experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222). ([\#17888](https://github.com/element-hq/synapse/issues/17888)) + +### Bugfixes + +- Fix bug with sliding sync where `$LAZY`-loading room members would not return `required_state` membership in incremental syncs. ([\#17809](https://github.com/element-hq/synapse/issues/17809)) +- Check if user has membership in a room before tagging it. Contributed by Lama Alosaimi. ([\#17839](https://github.com/element-hq/synapse/issues/17839)) +- Fix a bug in the admin redact endpoint where the background task would not run if a worker was specified in + the config option `run_background_tasks_on`. ([\#17847](https://github.com/element-hq/synapse/issues/17847)) +- Fix bug where some presence and typing timeouts can expire early. ([\#17850](https://github.com/element-hq/synapse/issues/17850)) +- Fix detection when the built Rust library was outdated when using source installations. ([\#17861](https://github.com/element-hq/synapse/issues/17861)) + +### Internal Changes + +- Bump actions/download-artifact and actions/upload-artifact from v3 -> v4. ([\#17657](https://github.com/element-hq/synapse/issues/17657)) +- Add a test for downloading and thumbnailing a CMYK JPEG. ([\#17786](https://github.com/element-hq/synapse/issues/17786)) +- Refactor database calls to remove `Generator` usage. ([\#17813](https://github.com/element-hq/synapse/issues/17813), [\#17814](https://github.com/element-hq/synapse/issues/17814), [\#17815](https://github.com/element-hq/synapse/issues/17815), [\#17816](https://github.com/element-hq/synapse/issues/17816), [\#17817](https://github.com/element-hq/synapse/issues/17817), [\#17818](https://github.com/element-hq/synapse/issues/17818), [\#17890](https://github.com/element-hq/synapse/issues/17890)) +- Include the destination in the error of 'Destination mismatch' on federation requests. ([\#17830](https://github.com/element-hq/synapse/issues/17830)) +- The nix flake inside the repository no longer tracks nixpkgs/master to not catch the latest bugs from a PR merged 5 minutes ago. ([\#17852](https://github.com/element-hq/synapse/issues/17852)) +- Minor speed-up of sliding sync by computing extensions results in parallel. ([\#17884](https://github.com/element-hq/synapse/issues/17884)) +- Bump the default Python version in the Synapse Dockerfile from 3.11 -> 3.12. ([\#17887](https://github.com/element-hq/synapse/issues/17887)) +- Remove usage of internal header encoding API. ([\#17894](https://github.com/element-hq/synapse/issues/17894)) +- Use unique name for each os.arch variant when uploading Wheel artifacts. ([\#17905](https://github.com/element-hq/synapse/issues/17905)) + + + +### Updates to locked dependencies + +* Bump actions/download-artifact from 3 to 4.1.7 in /.github/workflows. ([\#17657](https://github.com/element-hq/synapse/issues/17657)) +* Bump anyhow from 1.0.89 to 1.0.90. ([\#17858](https://github.com/element-hq/synapse/issues/17858)) +* Bump anyhow from 1.0.90 to 1.0.91. ([\#17876](https://github.com/element-hq/synapse/issues/17876)) +* Bump anyhow from 1.0.91 to 1.0.92. ([\#17901](https://github.com/element-hq/synapse/issues/17901)) +* Bump bytes from 1.7.2 to 1.8.0. ([\#17877](https://github.com/element-hq/synapse/issues/17877)) +* Bump cryptography from 43.0.1 to 43.0.3. ([\#17853](https://github.com/element-hq/synapse/issues/17853)) +* Bump mypy-zope from 1.0.7 to 1.0.8. ([\#17898](https://github.com/element-hq/synapse/issues/17898)) +* Bump phonenumbers from 8.13.47 to 8.13.48. ([\#17880](https://github.com/element-hq/synapse/issues/17880)) +* Bump phonenumbers from 8.13.48 to 8.13.49. ([\#17899](https://github.com/element-hq/synapse/issues/17899)) +* Bump python-multipart from 0.0.12 to 0.0.16. ([\#17879](https://github.com/element-hq/synapse/issues/17879)) +* Bump regex from 1.11.0 to 1.11.1. ([\#17874](https://github.com/element-hq/synapse/issues/17874)) +* Bump ruff from 0.6.9 to 0.7.1. ([\#17868](https://github.com/element-hq/synapse/issues/17868)) +* Bump ruff from 0.7.1 to 0.7.2. ([\#17897](https://github.com/element-hq/synapse/issues/17897)) +* Bump serde from 1.0.210 to 1.0.213. ([\#17875](https://github.com/element-hq/synapse/issues/17875)) +* Bump serde from 1.0.213 to 1.0.214. ([\#17900](https://github.com/element-hq/synapse/issues/17900)) +* Bump serde_json from 1.0.128 to 1.0.132. ([\#17857](https://github.com/element-hq/synapse/issues/17857)) +* Bump types-psycopg2 from 2.9.21.20240819 to 2.9.21.20241019. ([\#17855](https://github.com/element-hq/synapse/issues/17855)) +* Bump types-setuptools from 75.1.0.20241014 to 75.2.0.20241019. ([\#17856](https://github.com/element-hq/synapse/issues/17856)) + # Synapse 1.118.0 (2024-10-29) No significant changes since 1.118.0rc1. diff --git a/changelog.d/17374.feature b/changelog.d/17374.feature deleted file mode 100644 index 3321f1894726..000000000000 --- a/changelog.d/17374.feature +++ /dev/null @@ -1 +0,0 @@ -Support [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151)'s stable report room API. \ No newline at end of file diff --git a/changelog.d/17657.misc b/changelog.d/17657.misc deleted file mode 100644 index aff558adf792..000000000000 --- a/changelog.d/17657.misc +++ /dev/null @@ -1 +0,0 @@ -Bump actions/download-artifact and actions/upload-artifact from v3 -> v4. diff --git a/changelog.d/17786.misc b/changelog.d/17786.misc deleted file mode 100644 index 59eb3f4dbdb4..000000000000 --- a/changelog.d/17786.misc +++ /dev/null @@ -1 +0,0 @@ -Add a test for downloading and thumbnailing a CMYK JPEG. \ No newline at end of file diff --git a/changelog.d/17809.bugfix b/changelog.d/17809.bugfix deleted file mode 100644 index e244a36bd36a..000000000000 --- a/changelog.d/17809.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug with sliding sync where `$LAZY`-loading room members would not return `required_state` membership in incremental syncs. diff --git a/changelog.d/17813.misc b/changelog.d/17813.misc deleted file mode 100644 index f8676aee599f..000000000000 --- a/changelog.d/17813.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor database calls to remove `Generator` usage. diff --git a/changelog.d/17814.misc b/changelog.d/17814.misc deleted file mode 100644 index f8676aee599f..000000000000 --- a/changelog.d/17814.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor database calls to remove `Generator` usage. diff --git a/changelog.d/17815.misc b/changelog.d/17815.misc deleted file mode 100644 index f8676aee599f..000000000000 --- a/changelog.d/17815.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor database calls to remove `Generator` usage. diff --git a/changelog.d/17816.misc b/changelog.d/17816.misc deleted file mode 100644 index f8676aee599f..000000000000 --- a/changelog.d/17816.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor database calls to remove `Generator` usage. diff --git a/changelog.d/17817.misc b/changelog.d/17817.misc deleted file mode 100644 index f8676aee599f..000000000000 --- a/changelog.d/17817.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor database calls to remove `Generator` usage. diff --git a/changelog.d/17818.misc b/changelog.d/17818.misc deleted file mode 100644 index f8676aee599f..000000000000 --- a/changelog.d/17818.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor database calls to remove `Generator` usage. diff --git a/changelog.d/17830.misc b/changelog.d/17830.misc deleted file mode 100644 index b2cd91c0ad01..000000000000 --- a/changelog.d/17830.misc +++ /dev/null @@ -1 +0,0 @@ -Include the destination in the error of 'Destination mismatch' on federation requests. diff --git a/changelog.d/17839.bugfix b/changelog.d/17839.bugfix deleted file mode 100644 index 57667a6df5d7..000000000000 --- a/changelog.d/17839.bugfix +++ /dev/null @@ -1 +0,0 @@ -Check if user has membership in a room before tagging it. Contributed by Lama Alosaimi. \ No newline at end of file diff --git a/changelog.d/17847.bugfix b/changelog.d/17847.bugfix deleted file mode 100644 index 0ba39df94dfd..000000000000 --- a/changelog.d/17847.bugfix +++ /dev/null @@ -1,2 +0,0 @@ -Fix a bug in the admin redact endpoint where the background task would not run if a worker was specified in -the config option `run_background_tasks_on`. \ No newline at end of file diff --git a/changelog.d/17850.bugfix b/changelog.d/17850.bugfix deleted file mode 100644 index 8ea99c4ef9ac..000000000000 --- a/changelog.d/17850.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug when some presence and typing timeouts can expire early. \ No newline at end of file diff --git a/changelog.d/17852.misc b/changelog.d/17852.misc deleted file mode 100644 index b1b7ac97345c..000000000000 --- a/changelog.d/17852.misc +++ /dev/null @@ -1 +0,0 @@ -The nix flake inside the repository no longer tracks nixpkgs/master to not catch the latest bugs from a PR merged 5 minutes ago. diff --git a/changelog.d/17861.bugfix b/changelog.d/17861.bugfix deleted file mode 100644 index abee7a30f700..000000000000 --- a/changelog.d/17861.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix detection when the built Rust library was outdated when using source installations. diff --git a/changelog.d/17884.misc b/changelog.d/17884.misc deleted file mode 100644 index 9dfa13f853c0..000000000000 --- a/changelog.d/17884.misc +++ /dev/null @@ -1 +0,0 @@ -Minor speed-up of sliding sync by computing extensions results in parallel. diff --git a/changelog.d/17887.misc b/changelog.d/17887.misc deleted file mode 100644 index 6be32caee6a4..000000000000 --- a/changelog.d/17887.misc +++ /dev/null @@ -1 +0,0 @@ -Bump the default Python version in the Synapse Dockerfile from 3.11 -> 3.12. \ No newline at end of file diff --git a/changelog.d/17888.feature b/changelog.d/17888.feature deleted file mode 100644 index 3ede8886abdb..000000000000 --- a/changelog.d/17888.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222). diff --git a/changelog.d/17890.misc b/changelog.d/17890.misc deleted file mode 100644 index f8676aee599f..000000000000 --- a/changelog.d/17890.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor database calls to remove `Generator` usage. diff --git a/changelog.d/17894.misc b/changelog.d/17894.misc deleted file mode 100644 index dc1a7577abfd..000000000000 --- a/changelog.d/17894.misc +++ /dev/null @@ -1 +0,0 @@ -Remove usage of internal header encoding API. diff --git a/changelog.d/17905.misc b/changelog.d/17905.misc deleted file mode 100644 index 32ef50dbacf8..000000000000 --- a/changelog.d/17905.misc +++ /dev/null @@ -1 +0,0 @@ -Use unique name for each os.arch variant when uploading Wheel artifacts. diff --git a/debian/changelog b/debian/changelog index 384887888fba..173bcd63a603 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.119.0~rc1) stable; urgency=medium + + * New Synapse release 1.119.0rc1. + + -- Synapse Packaging team Wed, 06 Nov 2024 08:59:43 -0700 + matrix-synapse-py3 (1.118.0) stable; urgency=medium * New Synapse release 1.118.0. diff --git a/pyproject.toml b/pyproject.toml index 3ec01701c3ba..33acff004da1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.118.0" +version = "1.119.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From a4438c9bc10f704d76a6b500b16ebd59bf9a74b3 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Wed, 6 Nov 2024 09:15:59 -0700 Subject: [PATCH 070/147] Cleanup changelog --- CHANGES.md | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 6f263f7f7d32..4d34adc7e1df 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -3,7 +3,7 @@ ### Features - Support [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151)'s stable report room API. ([\#17374](https://github.com/element-hq/synapse/issues/17374)) -- Add experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222). ([\#17888](https://github.com/element-hq/synapse/issues/17888)) +- Add experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) (Adding `state_after` to sync v2). ([\#17888](https://github.com/element-hq/synapse/issues/17888)) ### Bugfixes @@ -16,7 +16,6 @@ ### Internal Changes -- Bump actions/download-artifact and actions/upload-artifact from v3 -> v4. ([\#17657](https://github.com/element-hq/synapse/issues/17657)) - Add a test for downloading and thumbnailing a CMYK JPEG. ([\#17786](https://github.com/element-hq/synapse/issues/17786)) - Refactor database calls to remove `Generator` usage. ([\#17813](https://github.com/element-hq/synapse/issues/17813), [\#17814](https://github.com/element-hq/synapse/issues/17814), [\#17815](https://github.com/element-hq/synapse/issues/17815), [\#17816](https://github.com/element-hq/synapse/issues/17816), [\#17817](https://github.com/element-hq/synapse/issues/17817), [\#17818](https://github.com/element-hq/synapse/issues/17818), [\#17890](https://github.com/element-hq/synapse/issues/17890)) - Include the destination in the error of 'Destination mismatch' on federation requests. ([\#17830](https://github.com/element-hq/synapse/issues/17830)) @@ -30,21 +29,16 @@ ### Updates to locked dependencies -* Bump actions/download-artifact from 3 to 4.1.7 in /.github/workflows. ([\#17657](https://github.com/element-hq/synapse/issues/17657)) -* Bump anyhow from 1.0.89 to 1.0.90. ([\#17858](https://github.com/element-hq/synapse/issues/17858)) -* Bump anyhow from 1.0.90 to 1.0.91. ([\#17876](https://github.com/element-hq/synapse/issues/17876)) -* Bump anyhow from 1.0.91 to 1.0.92. ([\#17901](https://github.com/element-hq/synapse/issues/17901)) +* Bump actions/download-artifact & actions/upload-artifact from 3 to 4 in /.github/workflows. ([\#17657](https://github.com/element-hq/synapse/issues/17657)) +* Bump anyhow from 1.0.89 to 1.0.92. ([\#17858](https://github.com/element-hq/synapse/issues/17858), [\#17876](https://github.com/element-hq/synapse/issues/17876), [\#17901](https://github.com/element-hq/synapse/issues/17901)) * Bump bytes from 1.7.2 to 1.8.0. ([\#17877](https://github.com/element-hq/synapse/issues/17877)) * Bump cryptography from 43.0.1 to 43.0.3. ([\#17853](https://github.com/element-hq/synapse/issues/17853)) * Bump mypy-zope from 1.0.7 to 1.0.8. ([\#17898](https://github.com/element-hq/synapse/issues/17898)) -* Bump phonenumbers from 8.13.47 to 8.13.48. ([\#17880](https://github.com/element-hq/synapse/issues/17880)) -* Bump phonenumbers from 8.13.48 to 8.13.49. ([\#17899](https://github.com/element-hq/synapse/issues/17899)) +* Bump phonenumbers from 8.13.47 to 8.13.49. ([\#17880](https://github.com/element-hq/synapse/issues/17880), [\#17899](https://github.com/element-hq/synapse/issues/17899)) * Bump python-multipart from 0.0.12 to 0.0.16. ([\#17879](https://github.com/element-hq/synapse/issues/17879)) * Bump regex from 1.11.0 to 1.11.1. ([\#17874](https://github.com/element-hq/synapse/issues/17874)) -* Bump ruff from 0.6.9 to 0.7.1. ([\#17868](https://github.com/element-hq/synapse/issues/17868)) -* Bump ruff from 0.7.1 to 0.7.2. ([\#17897](https://github.com/element-hq/synapse/issues/17897)) -* Bump serde from 1.0.210 to 1.0.213. ([\#17875](https://github.com/element-hq/synapse/issues/17875)) -* Bump serde from 1.0.213 to 1.0.214. ([\#17900](https://github.com/element-hq/synapse/issues/17900)) +* Bump ruff from 0.6.9 to 0.7.2. ([\#17868](https://github.com/element-hq/synapse/issues/17868), [\#17897](https://github.com/element-hq/synapse/issues/17897)) +* Bump serde from 1.0.210 to 1.0.214. ([\#17875](https://github.com/element-hq/synapse/issues/17875), [\#17900](https://github.com/element-hq/synapse/issues/17900)) * Bump serde_json from 1.0.128 to 1.0.132. ([\#17857](https://github.com/element-hq/synapse/issues/17857)) * Bump types-psycopg2 from 2.9.21.20240819 to 2.9.21.20241019. ([\#17855](https://github.com/element-hq/synapse/issues/17855)) * Bump types-setuptools from 75.1.0.20241014 to 75.2.0.20241019. ([\#17856](https://github.com/element-hq/synapse/issues/17856)) From e1f5da65e104f15bbe0ed51ab0ff7bc8c7d71613 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Wed, 6 Nov 2024 10:51:19 -0600 Subject: [PATCH 071/147] Update version constraint to allow the latest `poetry-core` `1.9.1` (#17902) Update version constraint to allow the latest `poetry-core` `1.9.1` Context: > I am working on updating poetry-core in Fedora and synapse is one of affected packages. Please run a CI to see if it works properly. Thank you. Mergeable version of https://github.com/element-hq/synapse/pull/17848 --- changelog.d/17902.misc | 1 + pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17902.misc diff --git a/changelog.d/17902.misc b/changelog.d/17902.misc new file mode 100644 index 000000000000..f094f57c2fcc --- /dev/null +++ b/changelog.d/17902.misc @@ -0,0 +1 @@ +Update version constraint to allow the latest poetry-core 1.9.1. diff --git a/pyproject.toml b/pyproject.toml index 3ec01701c3ba..af096a2cd4d1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -370,7 +370,7 @@ tomli = ">=1.2.3" # runtime errors caused by build system changes. # We are happy to raise these upper bounds upon request, # provided we check that it's safe to do so (i.e. that CI passes). -requires = ["poetry-core>=1.1.0,<=1.9.0", "setuptools_rust>=1.3,<=1.8.1"] +requires = ["poetry-core>=1.1.0,<=1.9.1", "setuptools_rust>=1.3,<=1.8.1"] build-backend = "poetry.core.masonry.api" From eda735e4bb472b4239c8d3656c64054cb46750c4 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Wed, 6 Nov 2024 19:36:01 +0000 Subject: [PATCH 072/147] Remove support for python 3.8 (#17908) ### Pull Request Checklist * [X] Pull request is based on the develop branch * [X] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [X] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --------- Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- .ci/scripts/calculate_jobs.py | 24 +++++++-------- .ci/scripts/prepare_old_deps.sh | 2 +- .github/workflows/latest_deps.yml | 4 +-- .github/workflows/release-artifacts.yml | 4 +-- .github/workflows/tests.yml | 10 +++---- .github/workflows/twisted_trunk.yml | 4 +-- changelog.d/17908.misc | 1 + docs/development/contributing_guide.md | 2 +- docs/setup/installation.md | 2 +- docs/upgrade.md | 11 +++++++ mypy.ini | 2 +- poetry.lock | 26 ++-------------- pyproject.toml | 14 ++++----- scripts-dev/build_debian_packages.py | 5 ++-- synapse/__init__.py | 4 +-- synapse/storage/databases/main/room.py | 4 ++- synapse/storage/databases/main/state.py | 4 ++- tests/handlers/test_federation.py | 27 +++++++++++------ tests/handlers/test_room_member.py | 33 ++++++++++++--------- tests/push/test_bulk_push_rule_evaluator.py | 8 +++-- tests/storage/test_stream.py | 33 ++++++++++++--------- tests/util/test_check_dependencies.py | 22 +++++++++----- tox.ini | 2 +- 23 files changed, 135 insertions(+), 113 deletions(-) create mode 100644 changelog.d/17908.misc diff --git a/.ci/scripts/calculate_jobs.py b/.ci/scripts/calculate_jobs.py index 15f2d94a810f..ea278173db38 100755 --- a/.ci/scripts/calculate_jobs.py +++ b/.ci/scripts/calculate_jobs.py @@ -36,11 +36,11 @@ def set_output(key: str, value: str): # First calculate the various trial jobs. # # For PRs, we only run each type of test with the oldest Python version supported (which -# is Python 3.8 right now) +# is Python 3.9 right now) trial_sqlite_tests = [ { - "python-version": "3.8", + "python-version": "3.9", "database": "sqlite", "extras": "all", } @@ -53,12 +53,12 @@ def set_output(key: str, value: str): "database": "sqlite", "extras": "all", } - for version in ("3.9", "3.10", "3.11", "3.12", "3.13") + for version in ("3.10", "3.11", "3.12", "3.13") ) trial_postgres_tests = [ { - "python-version": "3.8", + "python-version": "3.9", "database": "postgres", "postgres-version": "11", "extras": "all", @@ -77,7 +77,7 @@ def set_output(key: str, value: str): trial_no_extra_tests = [ { - "python-version": "3.8", + "python-version": "3.9", "database": "sqlite", "extras": "", } @@ -99,24 +99,24 @@ def set_output(key: str, value: str): # First calculate the various sytest jobs. # -# For each type of test we only run on focal on PRs +# For each type of test we only run on bullseye on PRs sytest_tests = [ { - "sytest-tag": "focal", + "sytest-tag": "bullseye", }, { - "sytest-tag": "focal", + "sytest-tag": "bullseye", "postgres": "postgres", }, { - "sytest-tag": "focal", + "sytest-tag": "bullseye", "postgres": "multi-postgres", "workers": "workers", }, { - "sytest-tag": "focal", + "sytest-tag": "bullseye", "postgres": "multi-postgres", "workers": "workers", "reactor": "asyncio", @@ -127,11 +127,11 @@ def set_output(key: str, value: str): sytest_tests.extend( [ { - "sytest-tag": "focal", + "sytest-tag": "bullseye", "reactor": "asyncio", }, { - "sytest-tag": "focal", + "sytest-tag": "bullseye", "postgres": "postgres", "reactor": "asyncio", }, diff --git a/.ci/scripts/prepare_old_deps.sh b/.ci/scripts/prepare_old_deps.sh index 580f87bbdfb2..3589be26f8cc 100755 --- a/.ci/scripts/prepare_old_deps.sh +++ b/.ci/scripts/prepare_old_deps.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# this script is run by GitHub Actions in a plain `focal` container; it +# this script is run by GitHub Actions in a plain `jammy` container; it # - installs the minimal system requirements, and poetry; # - patches the project definition file to refer to old versions only; # - creates a venv with these old versions using poetry; and finally diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index b9e9a401b9eb..3884b6d402fe 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -132,9 +132,9 @@ jobs: fail-fast: false matrix: include: - - sytest-tag: focal + - sytest-tag: bullseye - - sytest-tag: focal + - sytest-tag: bullseye postgres: postgres workers: workers redis: redis diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 1e2513b28991..d77d7792f0f6 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -102,7 +102,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-20.04, macos-12] + os: [ubuntu-22.04, macos-12] arch: [x86_64, aarch64] # is_pr is a flag used to exclude certain jobs from the matrix on PRs. # It is not read by the rest of the workflow. @@ -144,7 +144,7 @@ jobs: - name: Only build a single wheel on PR if: startsWith(github.ref, 'refs/pull/') - run: echo "CIBW_BUILD="cp38-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV + run: echo "CIBW_BUILD="cp39-manylinux_${{ matrix.arch }}"" >> $GITHUB_ENV - name: Build wheels run: python -m cibuildwheel --output-dir wheelhouse diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5586bd6d9472..27dac8922041 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -397,7 +397,7 @@ jobs: needs: - linting-done - changes - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v4 @@ -409,12 +409,12 @@ jobs: # their build dependencies - run: | sudo apt-get -qq update - sudo apt-get -qq install build-essential libffi-dev python-dev \ + sudo apt-get -qq install build-essential libffi-dev python3-dev \ libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev - uses: actions/setup-python@v5 with: - python-version: '3.8' + python-version: '3.9' - name: Prepare old deps if: steps.cache-poetry-old-deps.outputs.cache-hit != 'true' @@ -458,7 +458,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["pypy-3.8"] + python-version: ["pypy-3.9"] extras: ["all"] steps: @@ -580,7 +580,7 @@ jobs: strategy: matrix: include: - - python-version: "3.8" + - python-version: "3.9" postgres-version: "11" - python-version: "3.11" diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 76609c211875..cdaa00ef90bf 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -99,11 +99,11 @@ jobs: if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest container: - # We're using ubuntu:focal because it uses Python 3.8 which is our minimum supported Python version. + # We're using debian:bullseye because it uses Python 3.9 which is our minimum supported Python version. # This job is a canary to warn us about unreleased twisted changes that would cause problems for us if # they were to be released immediately. For simplicity's sake (and to save CI runners) we use the oldest # version, assuming that any incompatibilities on newer versions would also be present on the oldest. - image: matrixdotorg/sytest-synapse:focal + image: matrixdotorg/sytest-synapse:bullseye volumes: - ${{ github.workspace }}:/src diff --git a/changelog.d/17908.misc b/changelog.d/17908.misc new file mode 100644 index 000000000000..8f1772914865 --- /dev/null +++ b/changelog.d/17908.misc @@ -0,0 +1 @@ +Remove support for python 3.8. diff --git a/docs/development/contributing_guide.md b/docs/development/contributing_guide.md index f079f61b484a..d6efab96cfb1 100644 --- a/docs/development/contributing_guide.md +++ b/docs/development/contributing_guide.md @@ -322,7 +322,7 @@ The following command will let you run the integration test with the most common configuration: ```sh -$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:focal +$ docker run --rm -it -v /path/where/you/have/cloned/the/repository\:/src:ro -v /path/to/where/you/want/logs\:/logs matrixdotorg/sytest-synapse:bullseye ``` (Note that the paths must be full paths! You could also write `$(realpath relative/path)` if needed.) diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 9cebb89b4d94..d717880aa538 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -208,7 +208,7 @@ When following this route please make sure that the [Platform-specific prerequis System requirements: - POSIX-compliant system (tested on Linux & OS X) -- Python 3.8 or later, up to Python 3.11. +- Python 3.9 or later, up to Python 3.13. - At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org If building on an uncommon architecture for which pre-built wheels are diff --git a/docs/upgrade.md b/docs/upgrade.md index 52b1adbe904f..ea9824a5ee94 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -117,6 +117,17 @@ each upgrade are complete before moving on to the next upgrade, to avoid stacking them up. You can monitor the currently running background updates with [the Admin API](usage/administration/admin_api/background_updates.html#status). +# Upgrading to v1.119.0 + +## Minimum supported Python version + +The minimum supported Python version has been increased from v3.8 to v3.9. +You will need Python 3.9+ to run Synapse v1.119.0 (due out Nov 7th, 2024). + +If you use current versions of the Matrix.org-distributed Docker images, no action is required. +Please note that support for Ubuntu `focal` was dropped as well since it uses Python 3.8. + + # Upgrading to v1.111.0 ## New worker endpoints for authenticated client and federation media diff --git a/mypy.ini b/mypy.ini index 3fca15c01beb..cf64248cc52f 100644 --- a/mypy.ini +++ b/mypy.ini @@ -26,7 +26,7 @@ strict_equality = True # Run mypy type checking with the minimum supported Python version to catch new usage # that isn't backwards-compatible (types, overloads, etc). -python_version = 3.8 +python_version = 3.9 files = docker/, diff --git a/poetry.lock b/poetry.lock index 6a5845fd1ec6..16b7dc504eb4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. [[package]] name = "annotated-types" @@ -11,9 +11,6 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] -[package.dependencies] -typing-extensions = {version = ">=4.0.0", markers = "python_version < \"3.9\""} - [[package]] name = "attrs" version = "24.2.0" @@ -874,9 +871,7 @@ files = [ [package.dependencies] attrs = ">=22.2.0" -importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} jsonschema-specifications = ">=2023.03.6" -pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} referencing = ">=0.28.4" rpds-py = ">=0.7.1" @@ -896,7 +891,6 @@ files = [ ] [package.dependencies] -importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} referencing = ">=0.28.0" [[package]] @@ -912,7 +906,6 @@ files = [ [package.dependencies] importlib-metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""} -importlib-resources = {version = "*", markers = "python_version < \"3.9\""} "jaraco.classes" = "*" jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""} pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""} @@ -1571,17 +1564,6 @@ files = [ [package.extras] testing = ["pytest", "pytest-cov"] -[[package]] -name = "pkgutil-resolve-name" -version = "1.3.10" -description = "Resolve a name to an object." -optional = false -python-versions = ">=3.6" -files = [ - {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, - {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, -] - [[package]] name = "prometheus-client" version = "0.21.0" @@ -1948,7 +1930,6 @@ files = [ [package.dependencies] cryptography = ">=3.1" defusedxml = "*" -importlib-resources = {version = "*", markers = "python_version < \"3.9\""} pyopenssl = "*" python-dateutil = "*" pytz = "*" @@ -2164,7 +2145,6 @@ files = [ [package.dependencies] markdown-it-py = ">=2.2.0,<3.0.0" pygments = ">=2.13.0,<3.0.0" -typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] @@ -3121,5 +3101,5 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" -python-versions = "^3.8.0" -content-hash = "eaded26b4770b9d19bfcee6dee8b96203df358ce51939d9b90fdbcf605e2f5fd" +python-versions = "^3.9.0" +content-hash = "0cd942a5193d01cbcef135a0bebd3fa0f12f7dbc63899d6f1c301e0649e9d902" diff --git a/pyproject.toml b/pyproject.toml index af096a2cd4d1..13de146b4ed0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,7 +36,7 @@ [tool.ruff] line-length = 88 -target-version = "py38" +target-version = "py39" [tool.ruff.lint] # See https://beta.ruff.rs/docs/rules/#error-e @@ -155,7 +155,7 @@ synapse_review_recent_signups = "synapse._scripts.review_recent_signups:main" update_synapse_database = "synapse._scripts.update_synapse_database:main" [tool.poetry.dependencies] -python = "^3.8.0" +python = "^3.9.0" # Mandatory Dependencies # ---------------------- @@ -178,7 +178,7 @@ Twisted = {extras = ["tls"], version = ">=18.9.0"} treq = ">=15.1" # Twisted has required pyopenssl 16.0 since about Twisted 16.6. pyOpenSSL = ">=16.0.0" -PyYAML = ">=3.13" +PyYAML = ">=5.3" pyasn1 = ">=0.1.9" pyasn1-modules = ">=0.0.7" bcrypt = ">=3.1.7" @@ -241,7 +241,7 @@ authlib = { version = ">=0.15.1", optional = true } # `contrib/systemd/log_config.yaml`. # Note: systemd-python 231 appears to have been yanked from pypi systemd-python = { version = ">=231", optional = true } -lxml = { version = ">=4.2.0", optional = true } +lxml = { version = ">=4.5.2", optional = true } sentry-sdk = { version = ">=0.7.2", optional = true } opentracing = { version = ">=2.2.0", optional = true } jaeger-client = { version = ">=4.0.0", optional = true } @@ -378,13 +378,13 @@ build-backend = "poetry.core.masonry.api" # Skip unsupported platforms (by us or by Rust). # See https://cibuildwheel.readthedocs.io/en/stable/options/#build-skip for the list of build targets. # We skip: -# - CPython 3.6 and 3.7: EOLed -# - PyPy 3.7: we only support Python 3.8+ +# - CPython 3.6, 3.7 and 3.8: EOLed +# - PyPy 3.7 and 3.8: we only support Python 3.9+ # - musllinux i686: excluded to reduce number of wheels we build. # c.f. https://github.com/matrix-org/synapse/pull/12595#discussion_r963107677 # - PyPy on Aarch64 and musllinux on aarch64: too slow to build. # c.f. https://github.com/matrix-org/synapse/pull/14259 -skip = "cp36* cp37* pp37* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64" +skip = "cp36* cp37* cp38* pp37* pp38* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64" # We need a rust compiler before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal" diff --git a/scripts-dev/build_debian_packages.py b/scripts-dev/build_debian_packages.py index 88c84194009c..6ee695b2ba8f 100755 --- a/scripts-dev/build_debian_packages.py +++ b/scripts-dev/build_debian_packages.py @@ -28,9 +28,8 @@ # example) DISTS = ( "debian:bullseye", # (EOL ~2024-07) (our EOL forced by Python 3.9 is 2025-10-05) - "debian:bookworm", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24) - "debian:sid", # (EOL not specified yet) (our EOL forced by Python 3.11 is 2027-10-24) - "ubuntu:focal", # 20.04 LTS (EOL 2025-04) (our EOL forced by Python 3.8 is 2024-10-14) + "debian:bookworm", # (EOL 2026-06) (our EOL forced by Python 3.11 is 2027-10-24) + "debian:sid", # (rolling distro, no EOL) "ubuntu:jammy", # 22.04 LTS (EOL 2027-04) (our EOL forced by Python 3.10 is 2026-10-04) "ubuntu:noble", # 24.04 LTS (EOL 2029-06) "ubuntu:oracular", # 24.10 (EOL 2025-07) diff --git a/synapse/__init__.py b/synapse/__init__.py index 73b92f12beae..e7784ac5d753 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -39,8 +39,8 @@ # Note that we use an (unneeded) variable here so that pyupgrade doesn't nuke the # if-statement completely. py_version = sys.version_info -if py_version < (3, 8): - print("Synapse requires Python 3.8 or above.") +if py_version < (3, 9): + print("Synapse requires Python 3.9 or above.") sys.exit(1) # Allow using the asyncio reactor via env var. diff --git a/synapse/storage/databases/main/room.py b/synapse/storage/databases/main/room.py index 33569a4391e6..cc3ce0951e74 100644 --- a/synapse/storage/databases/main/room.py +++ b/synapse/storage/databases/main/room.py @@ -2550,7 +2550,9 @@ async def clear_partial_state_room(self, room_id: str) -> Optional[int]: still contains events with partial state. """ try: - async with self._un_partial_stated_rooms_stream_id_gen.get_next() as un_partial_state_room_stream_id: + async with ( + self._un_partial_stated_rooms_stream_id_gen.get_next() as un_partial_state_room_stream_id + ): await self.db_pool.runInteraction( "clear_partial_state_room", self._clear_partial_state_room_txn, diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 60312d770d43..42b3638e1c8c 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -681,7 +681,9 @@ async def update_state_for_partial_state_event( context: EventContext, ) -> None: """Update the state group for a partial state event""" - async with self._un_partial_stated_events_stream_id_gen.get_next() as un_partial_state_event_stream_id: + async with ( + self._un_partial_stated_events_stream_id_gen.get_next() as un_partial_state_event_stream_id + ): await self.db_pool.runInteraction( "update_state_for_partial_state_event", self._update_state_for_partial_state_event_txn, diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index 9847893fce74..b64a8a86a2b8 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -661,9 +661,12 @@ def test_failed_partial_join_is_clean(self) -> None: ) ) - with patch.object( - fed_client, "make_membership_event", mock_make_membership_event - ), patch.object(fed_client, "send_join", mock_send_join): + with ( + patch.object( + fed_client, "make_membership_event", mock_make_membership_event + ), + patch.object(fed_client, "send_join", mock_send_join), + ): # Join and check that our join event is rejected # (The join event is rejected because it doesn't have any signatures) join_exc = self.get_failure( @@ -708,9 +711,12 @@ async def sync_partial_state_room( fed_handler = self.hs.get_federation_handler() store = self.hs.get_datastores().main - with patch.object( - fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room - ), patch.object(store, "is_partial_state_room", mock_is_partial_state_room): + with ( + patch.object( + fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room + ), + patch.object(store, "is_partial_state_room", mock_is_partial_state_room), + ): # Start the partial state sync. fed_handler._start_partial_state_room_sync("hs1", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 1) @@ -760,9 +766,12 @@ async def sync_partial_state_room( fed_handler = self.hs.get_federation_handler() store = self.hs.get_datastores().main - with patch.object( - fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room - ), patch.object(store, "is_partial_state_room", mock_is_partial_state_room): + with ( + patch.object( + fed_handler, "_sync_partial_state_room", mock_sync_partial_state_room + ), + patch.object(store, "is_partial_state_room", mock_is_partial_state_room), + ): # Start the partial state sync. fed_handler._start_partial_state_room_sync("hs1", {"hs2"}, "room_id") self.assertEqual(mock_sync_partial_state_room.call_count, 1) diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py index ad77356ede7b..f43ce664839f 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py @@ -172,20 +172,25 @@ def test_remote_joins_contribute_to_rate_limit(self) -> None: ) ) - with patch.object( - self.handler.federation_handler.federation_client, - "make_membership_event", - mock_make_membership_event, - ), patch.object( - self.handler.federation_handler.federation_client, - "send_join", - mock_send_join, - ), patch( - "synapse.event_auth._is_membership_change_allowed", - return_value=None, - ), patch( - "synapse.handlers.federation_event.check_state_dependent_auth_rules", - return_value=None, + with ( + patch.object( + self.handler.federation_handler.federation_client, + "make_membership_event", + mock_make_membership_event, + ), + patch.object( + self.handler.federation_handler.federation_client, + "send_join", + mock_send_join, + ), + patch( + "synapse.event_auth._is_membership_change_allowed", + return_value=None, + ), + patch( + "synapse.handlers.federation_event.check_state_dependent_auth_rules", + return_value=None, + ), ): self.get_success( self.handler.update_membership( diff --git a/tests/push/test_bulk_push_rule_evaluator.py b/tests/push/test_bulk_push_rule_evaluator.py index fc73f3dc2ac0..16c12928128a 100644 --- a/tests/push/test_bulk_push_rule_evaluator.py +++ b/tests/push/test_bulk_push_rule_evaluator.py @@ -120,9 +120,11 @@ def test_action_for_event_by_user_handles_noninteger_room_power_levels( # # We have seen stringy and null values for "room" in the wild, so presumably # some of this validation was missing in the past. - with patch("synapse.events.validator.validate_canonicaljson"), patch( - "synapse.events.validator.jsonschema.validate" - ), patch("synapse.handlers.event_auth.check_state_dependent_auth_rules"): + with ( + patch("synapse.events.validator.validate_canonicaljson"), + patch("synapse.events.validator.jsonschema.validate"), + patch("synapse.handlers.event_auth.check_state_dependent_auth_rules"), + ): pl_event_id = self.helper.send_state( self.room_id, "m.room.power_levels", diff --git a/tests/storage/test_stream.py b/tests/storage/test_stream.py index ed5f28624393..38a56419f373 100644 --- a/tests/storage/test_stream.py +++ b/tests/storage/test_stream.py @@ -1465,20 +1465,25 @@ def test_remote_join(self) -> None: ) ) - with patch.object( - self.room_member_handler.federation_handler.federation_client, - "make_membership_event", - mock_make_membership_event, - ), patch.object( - self.room_member_handler.federation_handler.federation_client, - "send_join", - mock_send_join, - ), patch( - "synapse.event_auth._is_membership_change_allowed", - return_value=None, - ), patch( - "synapse.handlers.federation_event.check_state_dependent_auth_rules", - return_value=None, + with ( + patch.object( + self.room_member_handler.federation_handler.federation_client, + "make_membership_event", + mock_make_membership_event, + ), + patch.object( + self.room_member_handler.federation_handler.federation_client, + "send_join", + mock_send_join, + ), + patch( + "synapse.event_auth._is_membership_change_allowed", + return_value=None, + ), + patch( + "synapse.handlers.federation_event.check_state_dependent_auth_rules", + return_value=None, + ), ): self.get_success( self.room_member_handler.update_membership( diff --git a/tests/util/test_check_dependencies.py b/tests/util/test_check_dependencies.py index 13a4e6ddaa5a..c052ba2b75f8 100644 --- a/tests/util/test_check_dependencies.py +++ b/tests/util/test_check_dependencies.py @@ -109,10 +109,13 @@ def test_version_reported_as_none(self) -> None: def test_checks_ignore_dev_dependencies(self) -> None: """Both generic and per-extra checks should ignore dev dependencies.""" - with patch( - "synapse.util.check_dependencies.metadata.requires", - return_value=["dummypkg >= 1; extra == 'mypy'"], - ), patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}): + with ( + patch( + "synapse.util.check_dependencies.metadata.requires", + return_value=["dummypkg >= 1; extra == 'mypy'"], + ), + patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}), + ): # We're testing that none of these calls raise. with self.mock_installed_package(None): check_requirements() @@ -141,10 +144,13 @@ def test_generic_check_of_optional_dependency(self) -> None: def test_check_for_extra_dependencies(self) -> None: """Complain if a package required for an extra is missing or old.""" - with patch( - "synapse.util.check_dependencies.metadata.requires", - return_value=["dummypkg >= 1; extra == 'cool-extra'"], - ), patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}): + with ( + patch( + "synapse.util.check_dependencies.metadata.requires", + return_value=["dummypkg >= 1; extra == 'cool-extra'"], + ), + patch("synapse.util.check_dependencies.RUNTIME_EXTRAS", {"cool-extra"}), + ): with self.mock_installed_package(None): self.assertRaises(DependencyException, check_requirements, "cool-extra") with self.mock_installed_package(old): diff --git a/tox.ini b/tox.ini index 4cd9dfb966c7..a506b5034d77 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py37, py38, py39, py310 +envlist = py39, py310, py311, py312, py313 # we require tox>=2.3.2 for the fix to https://github.com/tox-dev/tox/issues/208 minversion = 2.3.2 From 2a321bac35b872d47d8ae8da4cba31d757e96a26 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 6 Nov 2024 22:21:06 +0000 Subject: [PATCH 073/147] Issue one time keys in upload order (#17903) Currently, one-time-keys are issued in a somewhat random order. (In practice, they are issued according to the lexicographical order of their key IDs.) That can lead to a situation where a client gives up hope of a given OTK ever being used, whilst it is still on the server. Related: https://github.com/element-hq/element-meta/issues/2356 --- changelog.d/17903.bugfix | 1 + synapse/handlers/e2e_keys.py | 2 +- .../storage/databases/main/end_to_end_keys.py | 25 +++++- .../delta/88/03_add_otk_ts_added_index.sql | 18 +++++ tests/handlers/test_e2e_keys.py | 78 +++++++++++++++++-- 5 files changed, 116 insertions(+), 8 deletions(-) create mode 100644 changelog.d/17903.bugfix create mode 100644 synapse/storage/schema/main/delta/88/03_add_otk_ts_added_index.sql diff --git a/changelog.d/17903.bugfix b/changelog.d/17903.bugfix new file mode 100644 index 000000000000..a4d02fc98307 --- /dev/null +++ b/changelog.d/17903.bugfix @@ -0,0 +1 @@ +Fix a long-standing bug in Synapse which could cause one-time keys to be issued in the incorrect order, causing message decryption failures. diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index f78e66ad0a13..315461fefb37 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -615,7 +615,7 @@ async def claim_local_one_time_keys( 3. Attempt to fetch fallback keys from the database. Args: - local_query: An iterable of tuples of (user ID, device ID, algorithm). + local_query: An iterable of tuples of (user ID, device ID, algorithm, number of keys). always_include_fallback_keys: True to always include fallback keys. Returns: diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 575aaf498baf..1fbc49e7c5ac 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -99,6 +99,13 @@ def __init__( unique=True, ) + self.db_pool.updates.register_background_index_update( + update_name="add_otk_ts_added_index", + index_name="e2e_one_time_keys_json_user_id_device_id_algorithm_ts_added_idx", + table="e2e_one_time_keys_json", + columns=("user_id", "device_id", "algorithm", "ts_added_ms"), + ) + class EndToEndKeyWorkerStore(EndToEndKeyBackgroundStore, CacheInvalidationWorkerStore): def __init__( @@ -1122,7 +1129,7 @@ async def claim_e2e_one_time_keys( """Take a list of one time keys out of the database. Args: - query_list: An iterable of tuples of (user ID, device ID, algorithm). + query_list: An iterable of tuples of (user ID, device ID, algorithm, number of keys). Returns: A tuple (results, missing) of: @@ -1310,9 +1317,14 @@ def _claim_e2e_one_time_key_simple( OTK was found. """ + # Return the oldest keys from this device (based on `ts_added_ms`). + # Doing so means that keys are issued in the same order they were uploaded, + # which reduces the chances of a client expiring its copy of a (private) + # key while the public key is still on the server, waiting to be issued. sql = """ SELECT key_id, key_json FROM e2e_one_time_keys_json WHERE user_id = ? AND device_id = ? AND algorithm = ? + ORDER BY ts_added_ms LIMIT ? """ @@ -1354,13 +1366,22 @@ def _claim_e2e_one_time_keys_bulk( A list of tuples (user_id, device_id, algorithm, key_id, key_json) for each OTK claimed. """ + # Find, delete, and return the oldest keys from each device (based on + # `ts_added_ms`). + # + # Doing so means that keys are issued in the same order they were uploaded, + # which reduces the chances of a client expiring its copy of a (private) + # key while the public key is still on the server, waiting to be issued. sql = """ WITH claims(user_id, device_id, algorithm, claim_count) AS ( VALUES ? ), ranked_keys AS ( SELECT user_id, device_id, algorithm, key_id, claim_count, - ROW_NUMBER() OVER (PARTITION BY (user_id, device_id, algorithm)) AS r + ROW_NUMBER() OVER ( + PARTITION BY (user_id, device_id, algorithm) + ORDER BY ts_added_ms + ) AS r FROM e2e_one_time_keys_json JOIN claims USING (user_id, device_id, algorithm) ) diff --git a/synapse/storage/schema/main/delta/88/03_add_otk_ts_added_index.sql b/synapse/storage/schema/main/delta/88/03_add_otk_ts_added_index.sql new file mode 100644 index 000000000000..7712ea68ad67 --- /dev/null +++ b/synapse/storage/schema/main/delta/88/03_add_otk_ts_added_index.sql @@ -0,0 +1,18 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + + +-- Add an index on (user_id, device_id, algorithm, ts_added_ms) on e2e_one_time_keys_json, so that OTKs can +-- efficiently be issued in the same order they were uploaded. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8803, 'add_otk_ts_added_index', '{}'); diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index 8a3dfdcf75c1..bca314db83c8 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -151,18 +151,30 @@ def test_change_one_time_keys(self) -> None: def test_claim_one_time_key(self) -> None: local_user = "@boris:" + self.hs.hostname device_id = "xyz" - keys = {"alg1:k1": "key1"} - res = self.get_success( self.handler.upload_keys_for_user( - local_user, device_id, {"one_time_keys": keys} + local_user, device_id, {"one_time_keys": {"alg1:k1": "key1"}} ) ) self.assertDictEqual( res, {"one_time_key_counts": {"alg1": 1, "signed_curve25519": 0}} ) - res2 = self.get_success( + # Keys should be returned in the order they were uploaded. To test, advance time + # a little, then upload a second key with an earlier key ID; it should get + # returned second. + self.reactor.advance(1) + res = self.get_success( + self.handler.upload_keys_for_user( + local_user, device_id, {"one_time_keys": {"alg1:k0": "key0"}} + ) + ) + self.assertDictEqual( + res, {"one_time_key_counts": {"alg1": 2, "signed_curve25519": 0}} + ) + + # now claim both keys back. They should be in the same order + res = self.get_success( self.handler.claim_one_time_keys( {local_user: {device_id: {"alg1": 1}}}, self.requester, @@ -171,12 +183,27 @@ def test_claim_one_time_key(self) -> None: ) ) self.assertEqual( - res2, + res, { "failures": {}, "one_time_keys": {local_user: {device_id: {"alg1:k1": "key1"}}}, }, ) + res = self.get_success( + self.handler.claim_one_time_keys( + {local_user: {device_id: {"alg1": 1}}}, + self.requester, + timeout=None, + always_include_fallback_keys=False, + ) + ) + self.assertEqual( + res, + { + "failures": {}, + "one_time_keys": {local_user: {device_id: {"alg1:k0": "key0"}}}, + }, + ) def test_claim_one_time_key_bulk(self) -> None: """Like test_claim_one_time_key but claims multiple keys in one handler call.""" @@ -336,6 +363,47 @@ def assertAllOtks(user_id: str, device_id: str, *alg_key_pairs: str) -> None: counts_by_alg, expected_counts_by_alg, f"{user_id}:{device_id}" ) + def test_claim_one_time_key_bulk_ordering(self) -> None: + """Keys returned by the bulk claim call should be returned in the correct order""" + + # Alice has lots of keys, uploaded in a specific order + alice = f"@alice:{self.hs.hostname}" + alice_dev = "alice_dev_1" + + self.get_success( + self.handler.upload_keys_for_user( + alice, + alice_dev, + {"one_time_keys": {"alg1:k20": 20, "alg1:k21": 21, "alg1:k22": 22}}, + ) + ) + # Advance time by 1s, to ensure that there is a difference in upload time. + self.reactor.advance(1) + self.get_success( + self.handler.upload_keys_for_user( + alice, + alice_dev, + {"one_time_keys": {"alg1:k10": 10, "alg1:k11": 11, "alg1:k12": 12}}, + ) + ) + + # Now claim some, and check we get the right ones. + claim_res = self.get_success( + self.handler.claim_one_time_keys( + {alice: {alice_dev: {"alg1": 2}}}, + self.requester, + timeout=None, + always_include_fallback_keys=False, + ) + ) + # We should get the first-uploaded keys, even though they have later key ids. + # We should get a random set of two of k20, k21, k22. + self.assertEqual(claim_res["failures"], {}) + claimed_keys = claim_res["one_time_keys"]["@alice:test"]["alice_dev_1"] + self.assertEqual(len(claimed_keys), 2) + for key_id in claimed_keys.keys(): + self.assertIn(key_id, ["alg1:k20", "alg1:k21", "alg1:k22"]) + def test_fallback_key(self) -> None: local_user = "@boris:" + self.hs.hostname device_id = "xyz" From 77eafd47df9b34bfc2458b007280929ef82dedfe Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 7 Nov 2024 10:11:13 +0000 Subject: [PATCH 074/147] Fix other unit tests with latest twisted (#17907) There's also https://github.com/element-hq/synapse/pull/17906 --- changelog.d/17907.bugfix | 1 + synapse/logging/_remote.py | 4 +-- tests/server.py | 57 ++++++++++++++++++++++++++++++++++---- 3 files changed, 55 insertions(+), 7 deletions(-) create mode 100644 changelog.d/17907.bugfix diff --git a/changelog.d/17907.bugfix b/changelog.d/17907.bugfix new file mode 100644 index 000000000000..f38ce6a59032 --- /dev/null +++ b/changelog.d/17907.bugfix @@ -0,0 +1 @@ +Fix tests to run with latest Twisted. diff --git a/synapse/logging/_remote.py b/synapse/logging/_remote.py index f047edee8e16..ac34fa6525d5 100644 --- a/synapse/logging/_remote.py +++ b/synapse/logging/_remote.py @@ -39,7 +39,7 @@ ) from twisted.internet.interfaces import ( IPushProducer, - IReactorTCP, + IReactorTime, IStreamClientEndpoint, ) from twisted.internet.protocol import Factory, Protocol @@ -113,7 +113,7 @@ def __init__( port: int, maximum_buffer: int = 1000, level: int = logging.NOTSET, - _reactor: Optional[IReactorTCP] = None, + _reactor: Optional[IReactorTime] = None, ): super().__init__(level=level) self.host = host diff --git a/tests/server.py b/tests/server.py index 95aff6f66c41..23c81203a5a4 100644 --- a/tests/server.py +++ b/tests/server.py @@ -58,6 +58,7 @@ from twisted.enterprise import adbapi from twisted.internet import address, tcp, threads, udp from twisted.internet._resolver import SimpleResolverComplexifier +from twisted.internet.address import IPv4Address, IPv6Address from twisted.internet.defer import Deferred, fail, maybeDeferred, succeed from twisted.internet.error import DNSLookupError from twisted.internet.interfaces import ( @@ -73,6 +74,7 @@ IReactorPluggableNameResolver, IReactorTime, IResolverSimple, + ITCPTransport, ITransport, ) from twisted.internet.protocol import ClientFactory, DatagramProtocol, Factory @@ -780,7 +782,7 @@ def get_clock() -> Tuple[ThreadedMemoryReactorClock, Clock]: return clock, hs_clock -@implementer(ITransport) +@implementer(ITCPTransport) @attr.s(cmp=False, auto_attribs=True) class FakeTransport: """ @@ -809,12 +811,12 @@ class FakeTransport: will get called back for connectionLost() notifications etc. """ - _peer_address: IAddress = attr.Factory( + _peer_address: Union[IPv4Address, IPv6Address] = attr.Factory( lambda: address.IPv4Address("TCP", "127.0.0.1", 5678) ) """The value to be returned by getPeer""" - _host_address: IAddress = attr.Factory( + _host_address: Union[IPv4Address, IPv6Address] = attr.Factory( lambda: address.IPv4Address("TCP", "127.0.0.1", 1234) ) """The value to be returned by getHost""" @@ -826,10 +828,10 @@ class FakeTransport: producer: Optional[IPushProducer] = None autoflush: bool = True - def getPeer(self) -> IAddress: + def getPeer(self) -> Union[IPv4Address, IPv6Address]: return self._peer_address - def getHost(self) -> IAddress: + def getHost(self) -> Union[IPv4Address, IPv6Address]: return self._host_address def loseConnection(self) -> None: @@ -939,6 +941,51 @@ def flush(self, maxbytes: Optional[int] = None) -> None: logger.info("FakeTransport: Buffer now empty, completing disconnect") self.disconnected = True + ## ITCPTransport methods. ## + + def loseWriteConnection(self) -> None: + """ + Half-close the write side of a TCP connection. + + If the protocol instance this is attached to provides + IHalfCloseableProtocol, it will get notified when the operation is + done. When closing write connection, as with loseConnection this will + only happen when buffer has emptied and there is no registered + producer. + """ + raise NotImplementedError() + + def getTcpNoDelay(self) -> bool: + """ + Return if C{TCP_NODELAY} is enabled. + """ + return False + + def setTcpNoDelay(self, enabled: bool) -> None: + """ + Enable/disable C{TCP_NODELAY}. + + Enabling C{TCP_NODELAY} turns off Nagle's algorithm. Small packets are + sent sooner, possibly at the expense of overall throughput. + """ + # Ignore setting this. + + def getTcpKeepAlive(self) -> bool: + """ + Return if C{SO_KEEPALIVE} is enabled. + """ + return False + + def setTcpKeepAlive(self, enabled: bool) -> None: + """ + Enable/disable C{SO_KEEPALIVE}. + + Enabling C{SO_KEEPALIVE} sends packets periodically when the connection + is otherwise idle, usually once every two hours. They are intended + to allow detection of lost peers in a non-infinite amount of time. + """ + # Ignore setting this. + def connect_client( reactor: ThreadedMemoryReactorClock, client_id: int From d0fc1e904a3060b0f459be9aa7df9b9f1501e294 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 7 Nov 2024 15:26:14 +0000 Subject: [PATCH 075/147] Fix cancellation tests with new Twisted. (#17906) The latest Twisted release changed how they implemented `__await__` on deferreds, which broke the machinery we used to test cancellation. This PR changes things a bit to instead patch the `__await__` method, which is a stable API. This mostly doesn't change the core logic, except for fixing two bugs: - We previously did not intercept all await points - After cancellation we now need to not only unblock currently blocked await points, but also make sure we don't block any future await points. c.f. https://github.com/twisted/twisted/pull/12226 --------- Co-authored-by: Devon Hudson --- changelog.d/17906.bugfix | 1 + tests/http/server/_base.py | 107 ++++++++++++++++++++++++++++--------- 2 files changed, 84 insertions(+), 24 deletions(-) create mode 100644 changelog.d/17906.bugfix diff --git a/changelog.d/17906.bugfix b/changelog.d/17906.bugfix new file mode 100644 index 000000000000..f38ce6a59032 --- /dev/null +++ b/changelog.d/17906.bugfix @@ -0,0 +1 @@ +Fix tests to run with latest Twisted. diff --git a/tests/http/server/_base.py b/tests/http/server/_base.py index 731b0c4e59a4..dff5a5d262d1 100644 --- a/tests/http/server/_base.py +++ b/tests/http/server/_base.py @@ -27,6 +27,7 @@ Callable, ContextManager, Dict, + Generator, List, Optional, Set, @@ -49,7 +50,10 @@ respond_with_json, ) from synapse.http.site import SynapseRequest -from synapse.logging.context import LoggingContext, make_deferred_yieldable +from synapse.logging.context import ( + LoggingContext, + make_deferred_yieldable, +) from synapse.types import JsonDict from tests.server import FakeChannel, make_request @@ -199,7 +203,7 @@ def make_request_with_cancellation_test( # # We would like to trigger a cancellation at the first `await`, re-run the # request and cancel at the second `await`, and so on. By patching - # `Deferred.__next__`, we can intercept `await`s, track which ones we have or + # `Deferred.__await__`, we can intercept `await`s, track which ones we have or # have not seen, and force them to block when they wouldn't have. # The set of previously seen `await`s. @@ -211,7 +215,7 @@ def make_request_with_cancellation_test( ) for request_number in itertools.count(1): - deferred_patch = Deferred__next__Patch(seen_awaits, request_number) + deferred_patch = Deferred__await__Patch(seen_awaits, request_number) try: with mock.patch( @@ -250,6 +254,8 @@ def make_request_with_cancellation_test( ) if respond_mock.called: + _log_for_request(request_number, "--- response finished ---") + # The request ran to completion and we are done with testing it. # `respond_with_json` writes the response asynchronously, so we @@ -311,8 +317,8 @@ def make_request_with_cancellation_test( assert False, "unreachable" # noqa: B011 -class Deferred__next__Patch: - """A `Deferred.__next__` patch that will intercept `await`s and force them +class Deferred__await__Patch: + """A `Deferred.__await__` patch that will intercept `await`s and force them to block once it sees a new `await`. When done with the patch, `unblock_awaits()` must be called to clean up after any @@ -322,7 +328,7 @@ class Deferred__next__Patch: Usage: seen_awaits = set() - deferred_patch = Deferred__next__Patch(seen_awaits, 1) + deferred_patch = Deferred__await__Patch(seen_awaits, 1) try: with deferred_patch.patch(): # do things @@ -335,14 +341,14 @@ def __init__(self, seen_awaits: Set[Tuple[str, ...]], request_number: int): """ Args: seen_awaits: The set of stack traces of `await`s that have been previously - seen. When the `Deferred.__next__` patch sees a new `await`, it will add + seen. When the `Deferred.__await__` patch sees a new `await`, it will add it to the set. request_number: The request number to log against. """ self._request_number = request_number self._seen_awaits = seen_awaits - self._original_Deferred___next__ = Deferred.__next__ # type: ignore[misc,unused-ignore] + self._original_Deferred__await__ = Deferred.__await__ # type: ignore[misc,unused-ignore] # The number of `await`s on `Deferred`s we have seen so far. self.awaits_seen = 0 @@ -350,8 +356,13 @@ def __init__(self, seen_awaits: Set[Tuple[str, ...]], request_number: int): # Whether we have seen a new `await` not in `seen_awaits`. self.new_await_seen = False + # Whether to block new await points we see. This gets set to False once + # we have cancelled the request to allow things to run after + # cancellation. + self._block_new_awaits = True + # To force `await`s on resolved `Deferred`s to block, we make up a new - # unresolved `Deferred` and return it out of `Deferred.__next__` / + # unresolved `Deferred` and return it out of `Deferred.__await__` / # `coroutine.send()`. We have to resolve it later, in case the `await`ing # coroutine is part of some shared processing, such as `@cached`. self._to_unblock: Dict[Deferred, Union[object, Failure]] = {} @@ -360,15 +371,15 @@ def __init__(self, seen_awaits: Set[Tuple[str, ...]], request_number: int): self._previous_stack: List[inspect.FrameInfo] = [] def patch(self) -> ContextManager[Mock]: - """Returns a context manager which patches `Deferred.__next__`.""" + """Returns a context manager which patches `Deferred.__await__`.""" - def Deferred___next__( - deferred: "Deferred[T]", value: object = None - ) -> "Deferred[T]": - """Intercepts `await`s on `Deferred`s and rigs them to block once we have - seen enough of them. + def Deferred___await__( + deferred: "Deferred[T]", + ) -> Generator["Deferred[T]", None, T]: + """Intercepts calls to `__await__`, which returns a generator + yielding deferreds that we await on. - `Deferred.__next__` will normally: + The generator for `__await__` will normally: * return `self` if the `Deferred` is unresolved, in which case `coroutine.send()` will return the `Deferred`, and `_defer.inlineCallbacks` will stop running the coroutine until the @@ -376,9 +387,43 @@ def Deferred___next__( * raise a `StopIteration(result)`, containing the result of the `await`. * raise another exception, which will come out of the `await`. """ + + # Get the original generator. + gen = self._original_Deferred__await__(deferred) + + # Run the generator, handling each iteration to see if we need to + # block. + try: + while True: + # We've hit a new await point (or the deferred has + # completed), handle it. + handle_next_iteration(deferred) + + # Continue on. + yield gen.send(None) + except StopIteration as e: + # We need to convert `StopIteration` into a normal return. + return e.value + + def handle_next_iteration( + deferred: "Deferred[T]", + ) -> None: + """Intercepts `await`s on `Deferred`s and rigs them to block once we have + seen enough of them. + + Args: + deferred: The deferred that we've captured and are intercepting + `await` calls within. + """ + if not self._block_new_awaits: + # We're no longer blocking awaits points + return + self.awaits_seen += 1 - stack = _get_stack(skip_frames=1) + stack = _get_stack( + skip_frames=2 # Ignore this function and `Deferred___await__` in stack trace + ) stack_hash = _hash_stack(stack) if stack_hash not in self._seen_awaits: @@ -389,20 +434,29 @@ def Deferred___next__( if not self.new_await_seen: # This `await` isn't interesting. Let it proceed normally. + _log_await_stack( + stack, + self._previous_stack, + self._request_number, + "already seen", + ) + # Don't log the stack. It's been seen before in a previous run. self._previous_stack = stack - return self._original_Deferred___next__(deferred, value) + return # We want to block at the current `await`. if deferred.called and not deferred.paused: - # This `Deferred` already has a result. - # We return a new, unresolved, `Deferred` for `_inlineCallbacks` to wait - # on. This blocks the coroutine that did this `await`. + # This `Deferred` already has a result. We chain a new, + # unresolved, `Deferred` to the end of this Deferred that it + # will wait on. This blocks the coroutine that did this `await`. # We queue it up for unblocking later. new_deferred: "Deferred[T]" = Deferred() self._to_unblock[new_deferred] = deferred.result + deferred.addBoth(lambda _: make_deferred_yieldable(new_deferred)) + _log_await_stack( stack, self._previous_stack, @@ -411,7 +465,9 @@ def Deferred___next__( ) self._previous_stack = stack - return make_deferred_yieldable(new_deferred) + # Continue iterating on the deferred now that we've blocked it + # again. + return # This `Deferred` does not have a result yet. # The `await` will block normally, so we don't have to do anything. @@ -423,9 +479,9 @@ def Deferred___next__( ) self._previous_stack = stack - return self._original_Deferred___next__(deferred, value) + return - return mock.patch.object(Deferred, "__next__", new=Deferred___next__) + return mock.patch.object(Deferred, "__await__", new=Deferred___await__) def unblock_awaits(self) -> None: """Unblocks any shared processing that we forced to block. @@ -433,6 +489,9 @@ def unblock_awaits(self) -> None: Must be called when done, otherwise processing shared between multiple requests, such as database queries started by `@cached`, will become permanently stuck. """ + # Also disable blocking at future await points + self._block_new_awaits = False + to_unblock = self._to_unblock self._to_unblock = {} for deferred, result in to_unblock.items(): From c92639df2137a074f55f854406e7d4cf0db88ce4 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 7 Nov 2024 16:09:45 +0000 Subject: [PATCH 076/147] Switch portdb CI to python 3.13, pg 17 (#17909) --- .github/workflows/tests.yml | 4 ++-- changelog.d/17909.misc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17909.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 27dac8922041..d91f9c291876 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -583,8 +583,8 @@ jobs: - python-version: "3.9" postgres-version: "11" - - python-version: "3.11" - postgres-version: "15" + - python-version: "3.13" + postgres-version: "17" services: postgres: diff --git a/changelog.d/17909.misc b/changelog.d/17909.misc new file mode 100644 index 000000000000..f826aa794881 --- /dev/null +++ b/changelog.d/17909.misc @@ -0,0 +1 @@ +Update the portdb CI to use Python 3.13 and Postgres 17 as latest dependencies. \ No newline at end of file From c7a1d0aa1afc8349dd7839f5ba6ea6a0a2830013 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 7 Nov 2024 16:22:09 +0000 Subject: [PATCH 077/147] Fix Twisted tests with latest release (#17911) c.f. #17906 and #17907 --- changelog.d/17911.bugfix | 1 + tests/util/test_async_helpers.py | 15 +++++++++++---- 2 files changed, 12 insertions(+), 4 deletions(-) create mode 100644 changelog.d/17911.bugfix diff --git a/changelog.d/17911.bugfix b/changelog.d/17911.bugfix new file mode 100644 index 000000000000..f38ce6a59032 --- /dev/null +++ b/changelog.d/17911.bugfix @@ -0,0 +1 @@ +Fix tests to run with latest Twisted. diff --git a/tests/util/test_async_helpers.py b/tests/util/test_async_helpers.py index 350a2b7c8cdd..cfd2882410e7 100644 --- a/tests/util/test_async_helpers.py +++ b/tests/util/test_async_helpers.py @@ -320,12 +320,19 @@ async def caller() -> None: await concurrently_execute(callback, [1], 2) except _TestException as e: tb = traceback.extract_tb(e.__traceback__) - # we expect to see "caller", "concurrently_execute", "callback", - # and some magic from inside ensureDeferred that happens when .fail - # is called. + + # Remove twisted internals from the stack, as we don't care + # about the precise details. + tb = traceback.StackSummary( + t for t in tb if "/twisted/" not in t.filename + ) + + # we expect to see "caller", "concurrently_execute" at the top of the stack self.assertEqual(tb[0].name, "caller") self.assertEqual(tb[1].name, "concurrently_execute") - self.assertEqual(tb[-2].name, "callback") + # ... some stack frames from the implementation of `concurrently_execute` ... + # and at the bottom of the stack we expect to see "callback" + self.assertEqual(tb[-1].name, "callback") else: self.fail("No exception thrown") From cacd4fd7bd40465732fc302a69efa39dcb5eb118 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 Nov 2024 16:41:24 +0000 Subject: [PATCH 078/147] Fix MSC4222 returning full state (#17915) There was a bug that meant we would return the full state of the room on incremental syncs when using lazy loaded members and there were no entries in the timeline. This was due to trying to use `state_filter or state_filter.all()` as a short hand for handling `None` case, however `state_filter` implements `__bool__` so if the state filter was empty it would be set to full. c.f. MSC4222 and #17888 --- changelog.d/17915.bugfix | 1 + synapse/handlers/message.py | 4 +- synapse/handlers/sync.py | 2 +- synapse/storage/controllers/state.py | 56 ++++++++++++------- synapse/storage/databases/main/state.py | 8 +-- synapse/storage/databases/state/bg_updates.py | 4 +- synapse/storage/databases/state/store.py | 3 +- synapse/types/state.py | 12 +++- tests/handlers/test_sync.py | 32 +++++++++++ 9 files changed, 91 insertions(+), 31 deletions(-) create mode 100644 changelog.d/17915.bugfix diff --git a/changelog.d/17915.bugfix b/changelog.d/17915.bugfix new file mode 100644 index 000000000000..a5d82e486db0 --- /dev/null +++ b/changelog.d/17915.bugfix @@ -0,0 +1 @@ +Fix experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) where we would return the full state on incremental syncs when using lazy loaded members and there were no new events in the timeline. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 204965afeec9..df3010ecf689 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -196,7 +196,9 @@ async def get_state_events( AuthError (403) if the user doesn't have permission to view members of this room. """ - state_filter = state_filter or StateFilter.all() + if state_filter is None: + state_filter = StateFilter.all() + user_id = requester.user.to_string() if at_token: diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index df9a088063fb..350c3fa09a19 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -1520,7 +1520,7 @@ async def _compute_state_delta_for_incremental_sync( if sync_config.use_state_after: delta_state_ids: MutableStateMap[str] = {} - if members_to_fetch is not None: + if members_to_fetch: # We're lazy-loading, so the client might need some more member # events to understand the events in this timeline. So we always # fish out all the member events corresponding to the timeline diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index b50eb8868ec7..f28f5d7e0390 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -234,8 +234,11 @@ async def get_state_for_events( RuntimeError if we don't have a state group for one or more of the events (ie they are outliers or unknown) """ + if state_filter is None: + state_filter = StateFilter.all() + await_full_state = True - if state_filter and not state_filter.must_await_full_state(self._is_mine_id): + if not state_filter.must_await_full_state(self._is_mine_id): await_full_state = False event_to_groups = await self.get_state_group_for_events( @@ -244,7 +247,7 @@ async def get_state_for_events( groups = set(event_to_groups.values()) group_to_state = await self.stores.state._get_state_for_groups( - groups, state_filter or StateFilter.all() + groups, state_filter ) state_event_map = await self.stores.main.get_events( @@ -292,10 +295,11 @@ async def get_state_ids_for_events( RuntimeError if we don't have a state group for one or more of the events (ie they are outliers or unknown) """ - if ( - await_full_state - and state_filter - and not state_filter.must_await_full_state(self._is_mine_id) + if state_filter is None: + state_filter = StateFilter.all() + + if await_full_state and not state_filter.must_await_full_state( + self._is_mine_id ): # Full state is not required if the state filter is restrictive enough. await_full_state = False @@ -306,7 +310,7 @@ async def get_state_ids_for_events( groups = set(event_to_groups.values()) group_to_state = await self.stores.state._get_state_for_groups( - groups, state_filter or StateFilter.all() + groups, state_filter ) event_to_state = { @@ -335,9 +339,10 @@ async def get_state_for_event( RuntimeError if we don't have a state group for the event (ie it is an outlier or is unknown) """ - state_map = await self.get_state_for_events( - [event_id], state_filter or StateFilter.all() - ) + if state_filter is None: + state_filter = StateFilter.all() + + state_map = await self.get_state_for_events([event_id], state_filter) return state_map[event_id] @trace @@ -365,9 +370,12 @@ async def get_state_ids_for_event( RuntimeError if we don't have a state group for the event (ie it is an outlier or is unknown) """ + if state_filter is None: + state_filter = StateFilter.all() + state_map = await self.get_state_ids_for_events( [event_id], - state_filter or StateFilter.all(), + state_filter, await_full_state=await_full_state, ) return state_map[event_id] @@ -388,9 +396,12 @@ async def get_state_after_event( at the event and `state_filter` is not satisfied by partial state. Defaults to `True`. """ + if state_filter is None: + state_filter = StateFilter.all() + state_ids = await self.get_state_ids_for_event( event_id, - state_filter=state_filter or StateFilter.all(), + state_filter=state_filter, await_full_state=await_full_state, ) @@ -426,6 +437,9 @@ async def get_state_ids_at( at the last event in the room before `stream_position` and `state_filter` is not satisfied by partial state. Defaults to `True`. """ + if state_filter is None: + state_filter = StateFilter.all() + # FIXME: This gets the state at the latest event before the stream ordering, # which might not be the same as the "current state" of the room at the time # of the stream token if there were multiple forward extremities at the time. @@ -442,7 +456,7 @@ async def get_state_ids_at( if last_event_id: state = await self.get_state_after_event( last_event_id, - state_filter=state_filter or StateFilter.all(), + state_filter=state_filter, await_full_state=await_full_state, ) @@ -500,9 +514,10 @@ async def get_state_for_groups( Returns: Dict of state group to state map. """ - return await self.stores.state._get_state_for_groups( - groups, state_filter or StateFilter.all() - ) + if state_filter is None: + state_filter = StateFilter.all() + + return await self.stores.state._get_state_for_groups(groups, state_filter) @trace @tag_args @@ -583,12 +598,13 @@ async def get_current_state_ids( Returns: The current state of the room. """ - if await_full_state and ( - not state_filter or state_filter.must_await_full_state(self._is_mine_id) - ): + if state_filter is None: + state_filter = StateFilter.all() + + if await_full_state and state_filter.must_await_full_state(self._is_mine_id): await self._partial_state_room_tracker.await_full_state(room_id) - if state_filter and not state_filter.is_full(): + if state_filter is not None and not state_filter.is_full(): return await self.stores.main.get_partial_filtered_current_state_ids( room_id, state_filter ) diff --git a/synapse/storage/databases/main/state.py b/synapse/storage/databases/main/state.py index 42b3638e1c8c..788f7d1e325a 100644 --- a/synapse/storage/databases/main/state.py +++ b/synapse/storage/databases/main/state.py @@ -572,10 +572,10 @@ async def get_partial_filtered_current_state_ids( Returns: Map from type/state_key to event ID. """ + if state_filter is None: + state_filter = StateFilter.all() - where_clause, where_args = ( - state_filter or StateFilter.all() - ).make_sql_filter_clause() + where_clause, where_args = (state_filter).make_sql_filter_clause() if not where_clause: # We delegate to the cached version @@ -584,7 +584,7 @@ async def get_partial_filtered_current_state_ids( def _get_filtered_current_state_ids_txn( txn: LoggingTransaction, ) -> StateMap[str]: - results = StateMapWrapper(state_filter=state_filter or StateFilter.all()) + results = StateMapWrapper(state_filter=state_filter) sql = """ SELECT type, state_key, event_id FROM current_state_events diff --git a/synapse/storage/databases/state/bg_updates.py b/synapse/storage/databases/state/bg_updates.py index ea7d8199a7d5..f7824cba0f21 100644 --- a/synapse/storage/databases/state/bg_updates.py +++ b/synapse/storage/databases/state/bg_updates.py @@ -112,8 +112,8 @@ def _get_state_groups_from_groups_txn( Returns: Map from state_group to a StateMap at that point. """ - - state_filter = state_filter or StateFilter.all() + if state_filter is None: + state_filter = StateFilter.all() results: Dict[int, MutableStateMap[str]] = {group: {} for group in groups} diff --git a/synapse/storage/databases/state/store.py b/synapse/storage/databases/state/store.py index 875dba33496c..f7a59c8992db 100644 --- a/synapse/storage/databases/state/store.py +++ b/synapse/storage/databases/state/store.py @@ -284,7 +284,8 @@ async def _get_state_for_groups( Returns: Dict of state group to state map. """ - state_filter = state_filter or StateFilter.all() + if state_filter is None: + state_filter = StateFilter.all() member_filter, non_member_filter = state_filter.get_member_split() diff --git a/synapse/types/state.py b/synapse/types/state.py index 67d1c3fe9722..e641215f1840 100644 --- a/synapse/types/state.py +++ b/synapse/types/state.py @@ -68,15 +68,23 @@ class StateFilter: include_others: bool = False def __attrs_post_init__(self) -> None: - # If `include_others` is set we canonicalise the filter by removing - # wildcards from the types dictionary if self.include_others: + # If `include_others` is set we canonicalise the filter by removing + # wildcards from the types dictionary + # this is needed to work around the fact that StateFilter is frozen object.__setattr__( self, "types", immutabledict({k: v for k, v in self.types.items() if v is not None}), ) + else: + # Otherwise we remove entries where the value is the empty set. + object.__setattr__( + self, + "types", + immutabledict({k: v for k, v in self.types.items() if v is None or v}), + ) @staticmethod def all() -> "StateFilter": diff --git a/tests/handlers/test_sync.py b/tests/handlers/test_sync.py index 1960d2f0e108..9dd0e98971bf 100644 --- a/tests/handlers/test_sync.py +++ b/tests/handlers/test_sync.py @@ -1262,3 +1262,35 @@ def test_incremental_sync_multiple_deltas(self) -> None: ) ) self.assertEqual(state[("m.test_event", "")], second_state["event_id"]) + + def test_incremental_sync_lazy_loaded_no_timeline(self) -> None: + """Test that lazy-loading with an empty timeline doesn't return the full + state. + + There was a bug where an empty state filter would cause the DB to return + the full state, rather than an empty set. + """ + user = self.register_user("user", "password") + tok = self.login("user", "password") + + # Create a room as the user and set some custom state. + joined_room = self.helper.create_room_as(user, tok=tok) + + since_token = self.hs.get_event_sources().get_current_token() + end_stream_token = self.hs.get_event_sources().get_current_token() + + state = self.get_success( + self.sync_handler._compute_state_delta_for_incremental_sync( + room_id=joined_room, + sync_config=generate_sync_config(user, use_state_after=True), + batch=TimelineBatch( + prev_batch=end_stream_token, events=[], limited=True + ), + since_token=since_token, + end_token=end_stream_token, + members_to_fetch=set(), + timeline_state={}, + ) + ) + + self.assertEqual(state, {}) From 2f41f6d9478f924b4ef238f9b1daca68df645232 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Fri, 8 Nov 2024 10:23:07 -0700 Subject: [PATCH 079/147] Update changelog for release --- CHANGES.md | 16 +++++++++++++--- changelog.d/17902.misc | 1 - changelog.d/17903.bugfix | 1 - changelog.d/17906.bugfix | 1 - changelog.d/17907.bugfix | 1 - changelog.d/17908.misc | 1 - changelog.d/17909.misc | 1 - changelog.d/17911.bugfix | 1 - changelog.d/17915.bugfix | 1 - 9 files changed, 13 insertions(+), 11 deletions(-) delete mode 100644 changelog.d/17902.misc delete mode 100644 changelog.d/17903.bugfix delete mode 100644 changelog.d/17906.bugfix delete mode 100644 changelog.d/17907.bugfix delete mode 100644 changelog.d/17908.misc delete mode 100644 changelog.d/17909.misc delete mode 100644 changelog.d/17911.bugfix delete mode 100644 changelog.d/17915.bugfix diff --git a/CHANGES.md b/CHANGES.md index 4d34adc7e1df..766b90dd4c64 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,4 +1,10 @@ -# Synapse 1.119.0rc1 (2024-11-06) +# Synapse 1.119.0rc1 (2024-11-11) + +### Python 3.8 support dropped + +Python 3.8 is no longer supported by Synapse. The minimum supported Python version is now 3.9. + +If you are running Synapse with Python 3.8, please upgrade to Python 3.9 (or greater) before upgrading Synapse. ### Features @@ -13,9 +19,12 @@ the config option `run_background_tasks_on`. ([\#17847](https://github.com/element-hq/synapse/issues/17847)) - Fix bug where some presence and typing timeouts can expire early. ([\#17850](https://github.com/element-hq/synapse/issues/17850)) - Fix detection when the built Rust library was outdated when using source installations. ([\#17861](https://github.com/element-hq/synapse/issues/17861)) +- Fix a long-standing bug in Synapse which could cause one-time keys to be issued in the incorrect order, causing message decryption failures. ([\#17903](https://github.com/element-hq/synapse/pull/17903)) +- Fix experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) (Adding `state_after` to sync v2) where we would return the full state on incremental syncs when using lazy loaded members and there were no new events in the timeline. ([\#17915](https://github.com/element-hq/synapse/pull/17915)) ### Internal Changes +- Remove support for python 3.8. ([\#17908](https://github.com/element-hq/synapse/issues/17908)) - Add a test for downloading and thumbnailing a CMYK JPEG. ([\#17786](https://github.com/element-hq/synapse/issues/17786)) - Refactor database calls to remove `Generator` usage. ([\#17813](https://github.com/element-hq/synapse/issues/17813), [\#17814](https://github.com/element-hq/synapse/issues/17814), [\#17815](https://github.com/element-hq/synapse/issues/17815), [\#17816](https://github.com/element-hq/synapse/issues/17816), [\#17817](https://github.com/element-hq/synapse/issues/17817), [\#17818](https://github.com/element-hq/synapse/issues/17818), [\#17890](https://github.com/element-hq/synapse/issues/17890)) - Include the destination in the error of 'Destination mismatch' on federation requests. ([\#17830](https://github.com/element-hq/synapse/issues/17830)) @@ -24,8 +33,9 @@ - Bump the default Python version in the Synapse Dockerfile from 3.11 -> 3.12. ([\#17887](https://github.com/element-hq/synapse/issues/17887)) - Remove usage of internal header encoding API. ([\#17894](https://github.com/element-hq/synapse/issues/17894)) - Use unique name for each os.arch variant when uploading Wheel artifacts. ([\#17905](https://github.com/element-hq/synapse/issues/17905)) - - +- Fix tests to run with latest Twisted. ([\#17906](https://github.com/element-hq/synapse/pull/17906), [\#17907](https://github.com/element-hq/synapse/pull/17907), [\#17911](https://github.com/element-hq/synapse/pull/17911)) +- Update version constraint to allow the latest poetry-core 1.9.1. ([\#17902](https://github.com/element-hq/synapse/pull/17902)) +- Update the portdb CI to use Python 3.13 and Postgres 17 as latest dependencies. ([\#17909](https://github.com/element-hq/synapse/pull/17909)) ### Updates to locked dependencies diff --git a/changelog.d/17902.misc b/changelog.d/17902.misc deleted file mode 100644 index f094f57c2fcc..000000000000 --- a/changelog.d/17902.misc +++ /dev/null @@ -1 +0,0 @@ -Update version constraint to allow the latest poetry-core 1.9.1. diff --git a/changelog.d/17903.bugfix b/changelog.d/17903.bugfix deleted file mode 100644 index a4d02fc98307..000000000000 --- a/changelog.d/17903.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a long-standing bug in Synapse which could cause one-time keys to be issued in the incorrect order, causing message decryption failures. diff --git a/changelog.d/17906.bugfix b/changelog.d/17906.bugfix deleted file mode 100644 index f38ce6a59032..000000000000 --- a/changelog.d/17906.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix tests to run with latest Twisted. diff --git a/changelog.d/17907.bugfix b/changelog.d/17907.bugfix deleted file mode 100644 index f38ce6a59032..000000000000 --- a/changelog.d/17907.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix tests to run with latest Twisted. diff --git a/changelog.d/17908.misc b/changelog.d/17908.misc deleted file mode 100644 index 8f1772914865..000000000000 --- a/changelog.d/17908.misc +++ /dev/null @@ -1 +0,0 @@ -Remove support for python 3.8. diff --git a/changelog.d/17909.misc b/changelog.d/17909.misc deleted file mode 100644 index f826aa794881..000000000000 --- a/changelog.d/17909.misc +++ /dev/null @@ -1 +0,0 @@ -Update the portdb CI to use Python 3.13 and Postgres 17 as latest dependencies. \ No newline at end of file diff --git a/changelog.d/17911.bugfix b/changelog.d/17911.bugfix deleted file mode 100644 index f38ce6a59032..000000000000 --- a/changelog.d/17911.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix tests to run with latest Twisted. diff --git a/changelog.d/17915.bugfix b/changelog.d/17915.bugfix deleted file mode 100644 index a5d82e486db0..000000000000 --- a/changelog.d/17915.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix experimental support for [MSC4222](https://github.com/matrix-org/matrix-spec-proposals/pull/4222) where we would return the full state on incremental syncs when using lazy loaded members and there were no new events in the timeline. From 20fc9fcc338a8c9b36cb077b0bb3372a7065ce65 Mon Sep 17 00:00:00 2001 From: reivilibre Date: Mon, 11 Nov 2024 10:44:47 +0000 Subject: [PATCH 080/147] Clarify the semantics of the `enable_authenticated_media` configuration option. (#17913) Signed-off-by: Olivier 'reivilibre --- changelog.d/17913.doc | 1 + .../configuration/config_documentation.md | 20 +++++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 changelog.d/17913.doc diff --git a/changelog.d/17913.doc b/changelog.d/17913.doc new file mode 100644 index 000000000000..39f59795625a --- /dev/null +++ b/changelog.d/17913.doc @@ -0,0 +1 @@ +Clarify the semantics of the `enable_authenticated_media` configuration option. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 47e3ef12870c..deb04570bbae 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1890,6 +1890,26 @@ unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_mat after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but this will change to true in a future Synapse release. +In all cases, authenticated requests to download media will succeed, but for unauthenticated requests, this +case-by-case breakdown describes whether media downloads are permitted: + +* `enable_authenticated_media = False`: + * unauthenticated client or homeserver requesting local media: allowed + * unauthenticated client or homeserver requesting remote media: allowed as long as the media is in the cache, + or as long as the remote homeserver does not require authentication to retrieve the media +* `enable_authenticated_media = True`: + * unauthenticated client or homeserver requesting local media: + allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist); + otherwise denied. + * unauthenticated client or homeserver requesting remote media: the same as for local media; + allowed if the media was stored on the server whilst `enable_authenticated_media` was `False` (or in a previous Synapse version where this option did not exist); + otherwise denied. + +It is especially notable that media downloaded before this option existed (in older Synapse versions), or whilst this option was set to `False`, +will perpetually be available over the legacy, unauthenticated endpoint, even after this option is set to `True`. +This is for backwards compatibility with older clients and homeservers that do not yet support requesting authenticated media; +those older clients or homeservers will not be cut off from media they can already see. + Example configuration: ```yaml enable_authenticated_media: true From c486ec8bc24460d86e23e174b13839875c382ed4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 11 Nov 2024 10:45:46 +0000 Subject: [PATCH 081/147] Add index to `current_state_delta_stream` (#17912) As we're now using it in the sync APIs to get state changes within a room --- changelog.d/17912.misc | 1 + .../storage/databases/main/state_deltas.py | 27 +++++++++++++++++-- .../delta/88/04_current_state_delta_index.sql | 18 +++++++++++++ 3 files changed, 44 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17912.misc create mode 100644 synapse/storage/schema/main/delta/88/04_current_state_delta_index.sql diff --git a/changelog.d/17912.misc b/changelog.d/17912.misc new file mode 100644 index 000000000000..f6f661476ae1 --- /dev/null +++ b/changelog.d/17912.misc @@ -0,0 +1 @@ +Add an index to `current_state_delta_stream` table. diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index ba52fff652f2..117ee89d0a00 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -20,18 +20,26 @@ # import logging -from typing import List, Optional, Tuple +from typing import TYPE_CHECKING, List, Optional, Tuple import attr from synapse.logging.opentracing import trace from synapse.storage._base import SQLBaseStore -from synapse.storage.database import LoggingTransaction, make_in_list_sql_clause +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, + make_in_list_sql_clause, +) from synapse.storage.databases.main.stream import _filter_results_by_stream from synapse.types import RoomStreamToken, StrCollection from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.iterutils import batch_iter +if TYPE_CHECKING: + from synapse.server import HomeServer + logger = logging.getLogger(__name__) @@ -54,6 +62,21 @@ class StateDeltasStore(SQLBaseStore): # attribute. TODO: can we get static analysis to enforce this? _curr_state_delta_stream_cache: StreamChangeCache + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + + self.db_pool.updates.register_background_index_update( + update_name="current_state_delta_stream_room_index", + index_name="current_state_delta_stream_room_idx", + table="current_state_delta_stream", + columns=("room_id", "stream_id"), + ) + async def get_partial_current_state_deltas( self, prev_stream_id: int, max_stream_id: int ) -> Tuple[int, List[StateDelta]]: diff --git a/synapse/storage/schema/main/delta/88/04_current_state_delta_index.sql b/synapse/storage/schema/main/delta/88/04_current_state_delta_index.sql new file mode 100644 index 000000000000..ad54302a8f9f --- /dev/null +++ b/synapse/storage/schema/main/delta/88/04_current_state_delta_index.sql @@ -0,0 +1,18 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + + +-- Add an index on (user_id, device_id, algorithm, ts_added_ms) on e2e_one_time_keys_json, so that OTKs can +-- efficiently be issued in the same order they were uploaded. +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8804, 'current_state_delta_stream_room_index', '{}'); From 92fcca8ed7ddaa28b661edaa93bf8c84c2dd152b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 11 Nov 2024 10:46:34 +0000 Subject: [PATCH 082/147] Update changelog --- CHANGES.md | 1 + changelog.d/17912.misc | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 changelog.d/17912.misc diff --git a/CHANGES.md b/CHANGES.md index 766b90dd4c64..61d826a06b33 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -36,6 +36,7 @@ If you are running Synapse with Python 3.8, please upgrade to Python 3.9 (or gre - Fix tests to run with latest Twisted. ([\#17906](https://github.com/element-hq/synapse/pull/17906), [\#17907](https://github.com/element-hq/synapse/pull/17907), [\#17911](https://github.com/element-hq/synapse/pull/17911)) - Update version constraint to allow the latest poetry-core 1.9.1. ([\#17902](https://github.com/element-hq/synapse/pull/17902)) - Update the portdb CI to use Python 3.13 and Postgres 17 as latest dependencies. ([\#17909](https://github.com/element-hq/synapse/pull/17909)) +- Add an index to `current_state_delta_stream` table. ([\#17912](https://github.com/element-hq/synapse/issues/17912)) ### Updates to locked dependencies diff --git a/changelog.d/17912.misc b/changelog.d/17912.misc deleted file mode 100644 index f6f661476ae1..000000000000 --- a/changelog.d/17912.misc +++ /dev/null @@ -1 +0,0 @@ -Add an index to `current_state_delta_stream` table. From f4943b875b9fa5ac45583de6b883a335095e21d2 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 11 Nov 2024 11:37:09 +0000 Subject: [PATCH 083/147] Update changelog --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 61d826a06b33..898cf51d466d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -2,7 +2,7 @@ ### Python 3.8 support dropped -Python 3.8 is no longer supported by Synapse. The minimum supported Python version is now 3.9. +Python 3.8 is [end-of-life](https://devguide.python.org/versions/) and is no longer supported by Synapse. The minimum supported Python version is now 3.9. If you are running Synapse with Python 3.8, please upgrade to Python 3.9 (or greater) before upgrading Synapse. From 9916932e9898a6068c95c1d760f1e27862fa9c36 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 13:51:36 +0000 Subject: [PATCH 084/147] Bump anyhow from 1.0.92 to 1.0.93 (#17920) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2e23b95f85a1..e9bc05159e2d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.92" +version = "1.0.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f37166d7d48a0284b99dd824694c26119c700b53bf0d1540cdb147dbdaaf13" +checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" [[package]] name = "arc-swap" From 54e0086abd6f410875ac15dc78ad3d678404e2e8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 13:51:47 +0000 Subject: [PATCH 085/147] Bump ruff from 0.7.2 to 0.7.3 (#17919) --- poetry.lock | 42 +++++++++++++++++++++--------------------- pyproject.toml | 2 +- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/poetry.lock b/poetry.lock index 16b7dc504eb4..6618296688c0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -2257,29 +2257,29 @@ files = [ [[package]] name = "ruff" -version = "0.7.2" +version = "0.7.3" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.7.2-py3-none-linux_armv6l.whl", hash = "sha256:b73f873b5f52092e63ed540adefc3c36f1f803790ecf2590e1df8bf0a9f72cb8"}, - {file = "ruff-0.7.2-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:5b813ef26db1015953daf476202585512afd6a6862a02cde63f3bafb53d0b2d4"}, - {file = "ruff-0.7.2-py3-none-macosx_11_0_arm64.whl", hash = "sha256:853277dbd9675810c6826dad7a428d52a11760744508340e66bf46f8be9701d9"}, - {file = "ruff-0.7.2-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21aae53ab1490a52bf4e3bf520c10ce120987b047c494cacf4edad0ba0888da2"}, - {file = "ruff-0.7.2-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ccc7e0fc6e0cb3168443eeadb6445285abaae75142ee22b2b72c27d790ab60ba"}, - {file = "ruff-0.7.2-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fd77877a4e43b3a98e5ef4715ba3862105e299af0c48942cc6d51ba3d97dc859"}, - {file = "ruff-0.7.2-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:e00163fb897d35523c70d71a46fbaa43bf7bf9af0f4534c53ea5b96b2e03397b"}, - {file = "ruff-0.7.2-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f3c54b538633482dc342e9b634d91168fe8cc56b30a4b4f99287f4e339103e88"}, - {file = "ruff-0.7.2-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7b792468e9804a204be221b14257566669d1db5c00d6bb335996e5cd7004ba80"}, - {file = "ruff-0.7.2-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dba53ed84ac19ae4bfb4ea4bf0172550a2285fa27fbb13e3746f04c80f7fa088"}, - {file = "ruff-0.7.2-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:b19fafe261bf741bca2764c14cbb4ee1819b67adb63ebc2db6401dcd652e3748"}, - {file = "ruff-0.7.2-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:28bd8220f4d8f79d590db9e2f6a0674f75ddbc3847277dd44ac1f8d30684b828"}, - {file = "ruff-0.7.2-py3-none-musllinux_1_2_i686.whl", hash = "sha256:9fd67094e77efbea932e62b5d2483006154794040abb3a5072e659096415ae1e"}, - {file = "ruff-0.7.2-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:576305393998b7bd6c46018f8104ea3a9cb3fa7908c21d8580e3274a3b04b691"}, - {file = "ruff-0.7.2-py3-none-win32.whl", hash = "sha256:fa993cfc9f0ff11187e82de874dfc3611df80852540331bc85c75809c93253a8"}, - {file = "ruff-0.7.2-py3-none-win_amd64.whl", hash = "sha256:dd8800cbe0254e06b8fec585e97554047fb82c894973f7ff18558eee33d1cb88"}, - {file = "ruff-0.7.2-py3-none-win_arm64.whl", hash = "sha256:bb8368cd45bba3f57bb29cbb8d64b4a33f8415d0149d2655c5c8539452ce7760"}, - {file = "ruff-0.7.2.tar.gz", hash = "sha256:2b14e77293380e475b4e3a7a368e14549288ed2931fce259a6f99978669e844f"}, + {file = "ruff-0.7.3-py3-none-linux_armv6l.whl", hash = "sha256:34f2339dc22687ec7e7002792d1f50712bf84a13d5152e75712ac08be565d344"}, + {file = "ruff-0.7.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:fb397332a1879b9764a3455a0bb1087bda876c2db8aca3a3cbb67b3dbce8cda0"}, + {file = "ruff-0.7.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:37d0b619546103274e7f62643d14e1adcbccb242efda4e4bdb9544d7764782e9"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59f0c3ee4d1a6787614e7135b72e21024875266101142a09a61439cb6e38a5"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:44eb93c2499a169d49fafd07bc62ac89b1bc800b197e50ff4633aed212569299"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d0242ce53f3a576c35ee32d907475a8d569944c0407f91d207c8af5be5dae4e"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6b6224af8b5e09772c2ecb8dc9f3f344c1aa48201c7f07e7315367f6dd90ac29"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c50f95a82b94421c964fae4c27c0242890a20fe67d203d127e84fbb8013855f5"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f3eff9961b5d2644bcf1616c606e93baa2d6b349e8aa8b035f654df252c8c67"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8963cab06d130c4df2fd52c84e9f10d297826d2e8169ae0c798b6221be1d1d2"}, + {file = "ruff-0.7.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:61b46049d6edc0e4317fb14b33bd693245281a3007288b68a3f5b74a22a0746d"}, + {file = "ruff-0.7.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:10ebce7696afe4644e8c1a23b3cf8c0f2193a310c18387c06e583ae9ef284de2"}, + {file = "ruff-0.7.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3f36d56326b3aef8eeee150b700e519880d1aab92f471eefdef656fd57492aa2"}, + {file = "ruff-0.7.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5d024301109a0007b78d57ab0ba190087b43dce852e552734ebf0b0b85e4fb16"}, + {file = "ruff-0.7.3-py3-none-win32.whl", hash = "sha256:4ba81a5f0c5478aa61674c5a2194de8b02652f17addf8dfc40c8937e6e7d79fc"}, + {file = "ruff-0.7.3-py3-none-win_amd64.whl", hash = "sha256:588a9ff2fecf01025ed065fe28809cd5a53b43505f48b69a1ac7707b1b7e4088"}, + {file = "ruff-0.7.3-py3-none-win_arm64.whl", hash = "sha256:1713e2c5545863cdbfe2cbce21f69ffaf37b813bfd1fb3b90dc9a6f1963f5a8c"}, + {file = "ruff-0.7.3.tar.gz", hash = "sha256:e1d1ba2e40b6e71a61b063354d04be669ab0d39c352461f3d789cac68b54a313"}, ] [[package]] @@ -3102,4 +3102,4 @@ user-search = ["pyicu"] [metadata] lock-version = "2.0" python-versions = "^3.9.0" -content-hash = "0cd942a5193d01cbcef135a0bebd3fa0f12f7dbc63899d6f1c301e0649e9d902" +content-hash = "d71159b19349fdc0b7cd8e06e8c8778b603fc37b941c6df34ddc31746783d94d" diff --git a/pyproject.toml b/pyproject.toml index 13de146b4ed0..90bd688207d0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -320,7 +320,7 @@ all = [ # failing on new releases. Keeping lower bounds loose here means that dependabot # can bump versions without having to update the content-hash in the lockfile. # This helps prevents merge conflicts when running a batch of dependabot updates. -ruff = "0.7.2" +ruff = "0.7.3" # Type checking only works with the pydantic.v1 compat module from pydantic v2 pydantic = "^2" From 7feb07c3e903abe773da17904b7b14d80be49b6d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 13:52:14 +0000 Subject: [PATCH 086/147] Bump pygithub from 2.4.0 to 2.5.0 (#17917) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 6618296688c0..1b42938058d1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1785,13 +1785,13 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" [[package]] name = "pygithub" -version = "2.4.0" +version = "2.5.0" description = "Use the full Github API v3" optional = false python-versions = ">=3.8" files = [ - {file = "PyGithub-2.4.0-py3-none-any.whl", hash = "sha256:81935aa4bdc939fba98fee1cb47422c09157c56a27966476ff92775602b9ee24"}, - {file = "pygithub-2.4.0.tar.gz", hash = "sha256:6601e22627e87bac192f1e2e39c6e6f69a43152cfb8f307cee575879320b3051"}, + {file = "PyGithub-2.5.0-py3-none-any.whl", hash = "sha256:b0b635999a658ab8e08720bdd3318893ff20e2275f6446fcf35bf3f44f2c0fd2"}, + {file = "pygithub-2.5.0.tar.gz", hash = "sha256:e1613ac508a9be710920d26eb18b1905ebd9926aa49398e88151c1b526aad3cf"}, ] [package.dependencies] From db59067e786044e31b950bacdd9efbcbffe81ba4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 14:15:17 +0000 Subject: [PATCH 087/147] Bump bleach from 6.1.0 to 6.2.0 (#17918) --- poetry.lock | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/poetry.lock b/poetry.lock index 1b42938058d1..cf0ddc0cc8b8 100644 --- a/poetry.lock +++ b/poetry.lock @@ -104,21 +104,20 @@ typecheck = ["mypy"] [[package]] name = "bleach" -version = "6.1.0" +version = "6.2.0" description = "An easy safelist-based HTML-sanitizing tool." optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" files = [ - {file = "bleach-6.1.0-py3-none-any.whl", hash = "sha256:3225f354cfc436b9789c66c4ee030194bee0568fbf9cbdad3bc8b5c26c5f12b6"}, - {file = "bleach-6.1.0.tar.gz", hash = "sha256:0a31f1837963c41d46bbf1331b8778e1308ea0791db03cc4e7357b97cf42a8fe"}, + {file = "bleach-6.2.0-py3-none-any.whl", hash = "sha256:117d9c6097a7c3d22fd578fcd8d35ff1e125df6736f554da4e432fdd63f31e5e"}, + {file = "bleach-6.2.0.tar.gz", hash = "sha256:123e894118b8a599fd80d3ec1a6d4cc7ce4e5882b1317a7e1ba69b56e95f991f"}, ] [package.dependencies] -six = ">=1.9.0" webencodings = "*" [package.extras] -css = ["tinycss2 (>=1.1.0,<1.3)"] +css = ["tinycss2 (>=1.1.0,<1.5)"] [[package]] name = "canonicaljson" From 2637b26cfebe020123084d8a8535ae9fb0532371 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 11 Nov 2024 14:32:45 +0000 Subject: [PATCH 088/147] Fix building and attaching release artifacts (#17921) Broke in #17905 due to upgrading the `upload-artifact` action, as we didn't rename debs. I think we also need to change how we download the artefacts and attach them to a release, as they'll download to a different place. Docs: - https://github.com/actions/upload-artifact/tree/v4/ - https://github.com/actions/download-artifact/tree/v4/ --- .github/workflows/release-artifacts.yml | 20 +++++++++++++++++--- changelog.d/17921.misc | 1 + 2 files changed, 18 insertions(+), 3 deletions(-) create mode 100644 changelog.d/17921.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index d77d7792f0f6..14092a307a89 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -91,10 +91,19 @@ jobs: rm -rf /tmp/.buildx-cache mv /tmp/.buildx-cache-new /tmp/.buildx-cache + - name: Artifact name + id: artifact-name + # We can't have colons in the upload name of the artifact, so we convert + # e.g. `debian:sid` to `sid`. + env: + DISTRO: ${{ matrix.distro }} + run: | + echo "ARTIFACT_NAME=${DISTRO#*:}" >> "$GITHUB_OUTPUT" + - name: Upload debs as artifacts uses: actions/upload-artifact@v4 with: - name: debs + name: debs-${{ steps.artifact-name.outputs.ARTIFACT_NAME }} path: debs/* build-wheels: @@ -196,7 +205,12 @@ jobs: - name: Download all workflow run artifacts uses: actions/download-artifact@v4 - name: Build a tarball for the debs - run: tar -cvJf debs.tar.xz debs + # We need to merge all the debs uploads into one folder, then compress + # that. + run: | + mkdir debs + mv debs*/* debs/ + tar -cvJf debs.tar.xz debs - name: Attach to release uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109 env: @@ -204,7 +218,7 @@ jobs: with: files: | Sdist/* - Wheel/* + Wheel*/* debs.tar.xz # if it's not already published, keep the release as a draft. draft: true diff --git a/changelog.d/17921.misc b/changelog.d/17921.misc new file mode 100644 index 000000000000..4c6faa1f5ba9 --- /dev/null +++ b/changelog.d/17921.misc @@ -0,0 +1 @@ +Fix building and attaching release artifacts during the release process. From a4c503674f5ba3f2853f463b05143d760115111b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 11 Nov 2024 14:33:37 +0000 Subject: [PATCH 089/147] 1.119.0rc2 --- CHANGES.md | 5 ++++- changelog.d/17921.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/17921.misc diff --git a/CHANGES.md b/CHANGES.md index 898cf51d466d..5fcdde4846ba 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,4 +1,6 @@ -# Synapse 1.119.0rc1 (2024-11-11) +# Synapse 1.119.0rc2 (2024-11-11) + +Note that due to packaging issues there was no v1.119.0rc1. ### Python 3.8 support dropped @@ -37,6 +39,7 @@ If you are running Synapse with Python 3.8, please upgrade to Python 3.9 (or gre - Update version constraint to allow the latest poetry-core 1.9.1. ([\#17902](https://github.com/element-hq/synapse/pull/17902)) - Update the portdb CI to use Python 3.13 and Postgres 17 as latest dependencies. ([\#17909](https://github.com/element-hq/synapse/pull/17909)) - Add an index to `current_state_delta_stream` table. ([\#17912](https://github.com/element-hq/synapse/issues/17912)) +- Fix building and attaching release artifacts during the release process. ([\#17921](https://github.com/element-hq/synapse/issues/17921)) ### Updates to locked dependencies diff --git a/changelog.d/17921.misc b/changelog.d/17921.misc deleted file mode 100644 index 4c6faa1f5ba9..000000000000 --- a/changelog.d/17921.misc +++ /dev/null @@ -1 +0,0 @@ -Fix building and attaching release artifacts during the release process. diff --git a/debian/changelog b/debian/changelog index 173bcd63a603..10ca8fbb2036 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.119.0~rc2) stable; urgency=medium + + * New Synapse release 1.119.0rc2. + + -- Synapse Packaging team Mon, 11 Nov 2024 14:33:02 +0000 + matrix-synapse-py3 (1.119.0~rc1) stable; urgency=medium * New Synapse release 1.119.0rc1. diff --git a/pyproject.toml b/pyproject.toml index 2cf4ffb54874..04827c0acafa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.119.0rc1" +version = "1.119.0rc2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From bfb197c596f751bc9c4ed5c807b406a31385ae77 Mon Sep 17 00:00:00 2001 From: Benjamin Bouvier Date: Tue, 12 Nov 2024 17:41:14 +0100 Subject: [PATCH 090/147] Fix typo in error message when a media ID isn't known (#17865) --- changelog.d/17865.misc | 1 + synapse/media/media_repository.py | 2 +- synapse/rest/media/upload_resource.py | 2 +- tests/rest/admin/test_federation.py | 2 +- tests/rest/admin/test_statistics.py | 2 +- tests/rest/admin/test_user.py | 4 ++-- 6 files changed, 7 insertions(+), 6 deletions(-) create mode 100644 changelog.d/17865.misc diff --git a/changelog.d/17865.misc b/changelog.d/17865.misc new file mode 100644 index 000000000000..2303a7e1b74e --- /dev/null +++ b/changelog.d/17865.misc @@ -0,0 +1 @@ +Addressed some typos in docs and returned error message for unknown MXC ID. diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index 0b7420923228..f4d25a7b8b94 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -259,7 +259,7 @@ async def verify_can_upload(self, media_id: str, auth_user: UserID) -> None: """ media = await self.store.get_local_media(media_id) if media is None: - raise SynapseError(404, "Unknow media ID", errcode=Codes.NOT_FOUND) + raise NotFoundError("Unknown media ID") if media.user_id != auth_user.to_string(): raise SynapseError( diff --git a/synapse/rest/media/upload_resource.py b/synapse/rest/media/upload_resource.py index 5ef6bf883641..359d006f0414 100644 --- a/synapse/rest/media/upload_resource.py +++ b/synapse/rest/media/upload_resource.py @@ -94,7 +94,7 @@ def _get_file_metadata( # if headers.hasHeader(b"Content-Disposition"): # disposition = headers.getRawHeaders(b"Content-Disposition")[0] - # TODO(markjh): parse content-dispostion + # TODO(markjh): parse content-disposition return content_length, upload_name, media_type diff --git a/tests/rest/admin/test_federation.py b/tests/rest/admin/test_federation.py index c2015774a132..d5ae3345f50e 100644 --- a/tests/rest/admin/test_federation.py +++ b/tests/rest/admin/test_federation.py @@ -96,7 +96,7 @@ def test_invalid_parameter(self) -> None: self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) - # unkown order_by + # unknown order_by channel = self.make_request( "GET", self.url + "?order_by=bar", diff --git a/tests/rest/admin/test_statistics.py b/tests/rest/admin/test_statistics.py index 5f60e19e5614..07ec49c4e59e 100644 --- a/tests/rest/admin/test_statistics.py +++ b/tests/rest/admin/test_statistics.py @@ -82,7 +82,7 @@ def test_invalid_parameter(self) -> None: """ If parameters are invalid, an error is returned. """ - # unkown order_by + # unknown order_by channel = self.make_request( "GET", self.url + "?order_by=bar", diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index f9ae50f40a85..668ccb89ff1d 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -719,7 +719,7 @@ def test_invalid_parameter(self) -> None: self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual(Codes.INVALID_PARAM, channel.json_body["errcode"]) - # unkown order_by + # unknown order_by channel = self.make_request( "GET", self.url + "?order_by=bar", @@ -3696,7 +3696,7 @@ def test_limit_and_from_DELETE(self) -> None: @parameterized.expand(["GET", "DELETE"]) def test_invalid_parameter(self, method: str) -> None: """If parameters are invalid, an error is returned.""" - # unkown order_by + # unknown order_by channel = self.make_request( method, self.url + "?order_by=bar", From 73dc05c99366bd8e591f5b961fb6697b880871aa Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 12 Nov 2024 16:52:00 +0000 Subject: [PATCH 091/147] Unpin the upload release GHA action (#17923) We were pinned to an old version that had deprecation warnings. In new versions of the action leaving off properties (i.e. `draft` and `prerelease`) tells the action to not modify those properties of the release. --- .github/workflows/release-artifacts.yml | 6 +----- changelog.d/17923.misc | 1 + 2 files changed, 2 insertions(+), 5 deletions(-) create mode 100644 changelog.d/17923.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 14092a307a89..8e393a90d160 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -212,7 +212,7 @@ jobs: mv debs*/* debs/ tar -cvJf debs.tar.xz debs - name: Attach to release - uses: softprops/action-gh-release@a929a66f232c1b11af63782948aa2210f981808a # PR#109 + uses: softprops/action-gh-release@v2 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: @@ -220,7 +220,3 @@ jobs: Sdist/* Wheel*/* debs.tar.xz - # if it's not already published, keep the release as a draft. - draft: true - # mark it as a prerelease if the tag contains 'rc'. - prerelease: ${{ contains(github.ref, 'rc') }} diff --git a/changelog.d/17923.misc b/changelog.d/17923.misc new file mode 100644 index 000000000000..4d74e7e1849c --- /dev/null +++ b/changelog.d/17923.misc @@ -0,0 +1 @@ +Unpin the upload release GHA action. From e0fdb862cbbddc920a30233024eb99038ee2fb28 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 13 Nov 2024 11:30:04 +0000 Subject: [PATCH 092/147] Bump macos version used to build wheels (#17924) MacOS 12 is end-of-life and GitHub is deprecating support for it (including doing brown outs). Let's bump to MacOS 13. --- .github/workflows/release-artifacts.yml | 6 +++--- changelog.d/17924.misc | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/17924.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 8e393a90d160..c0aff7914134 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -111,7 +111,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-22.04, macos-12] + os: [ubuntu-22.04, macos-13] arch: [x86_64, aarch64] # is_pr is a flag used to exclude certain jobs from the matrix on PRs. # It is not read by the rest of the workflow. @@ -121,9 +121,9 @@ jobs: exclude: # Don't build macos wheels on PR CI. - is_pr: true - os: "macos-12" + os: "macos-13" # Don't build aarch64 wheels on mac. - - os: "macos-12" + - os: "macos-13" arch: aarch64 # Don't build aarch64 wheels on PR CI. - is_pr: true diff --git a/changelog.d/17924.misc b/changelog.d/17924.misc new file mode 100644 index 000000000000..c7cc502360e0 --- /dev/null +++ b/changelog.d/17924.misc @@ -0,0 +1 @@ +Bump macos version used to build wheels during release, as current version used is end-of-life. From 850ff14613040d733ab28373a54c95e20182cd1c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 13 Nov 2024 13:58:18 +0000 Subject: [PATCH 093/147] 1.119.0 --- CHANGES.md | 10 ++++++++-- debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 5fcdde4846ba..be5c18c84baf 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,6 +1,6 @@ -# Synapse 1.119.0rc2 (2024-11-11) +# Synapse 1.119.0 (2024-11-13) -Note that due to packaging issues there was no v1.119.0rc1. +No significant changes since 1.119.0rc2. ### Python 3.8 support dropped @@ -8,6 +8,12 @@ Python 3.8 is [end-of-life](https://devguide.python.org/versions/) and is no lon If you are running Synapse with Python 3.8, please upgrade to Python 3.9 (or greater) before upgrading Synapse. + +# Synapse 1.119.0rc2 (2024-11-11) + +Note that due to packaging issues there was no v1.119.0rc1. + + ### Features - Support [MSC4151](https://github.com/matrix-org/matrix-spec-proposals/pull/4151)'s stable report room API. ([\#17374](https://github.com/element-hq/synapse/issues/17374)) diff --git a/debian/changelog b/debian/changelog index 10ca8fbb2036..bacd453cb4f4 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.119.0) stable; urgency=medium + + * New Synapse release 1.119.0. + + -- Synapse Packaging team Wed, 13 Nov 2024 13:57:51 +0000 + matrix-synapse-py3 (1.119.0~rc2) stable; urgency=medium * New Synapse release 1.119.0rc2. diff --git a/pyproject.toml b/pyproject.toml index 04827c0acafa..e0afcdee5c1f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.119.0rc2" +version = "1.119.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From c812a794224b0fcc61394217a03cad180ed4160b Mon Sep 17 00:00:00 2001 From: Poruri Sai Rahul Date: Wed, 13 Nov 2024 19:40:20 +0530 Subject: [PATCH 094/147] Removal: Remove support for experimental msc3886 (#17638) --- changelog.d/17638.removal | 1 + docs/upgrade.md | 11 +++++++ synapse/config/experimental.py | 5 --- synapse/config/server.py | 4 --- synapse/http/server.py | 9 ------ synapse/http/site.py | 5 --- synapse/rest/client/rendezvous.py | 48 ---------------------------- synapse/rest/client/versions.py | 3 -- tests/logging/test_terse_json.py | 1 - tests/rest/client/test_rendezvous.py | 9 ------ tests/server.py | 2 -- tests/test_server.py | 41 +----------------------- 12 files changed, 13 insertions(+), 126 deletions(-) create mode 100644 changelog.d/17638.removal diff --git a/changelog.d/17638.removal b/changelog.d/17638.removal new file mode 100644 index 000000000000..1bb09e976ec5 --- /dev/null +++ b/changelog.d/17638.removal @@ -0,0 +1 @@ +Remove support for closed [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886). \ No newline at end of file diff --git a/docs/upgrade.md b/docs/upgrade.md index ea9824a5ee94..9f12d7c34f32 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -117,6 +117,17 @@ each upgrade are complete before moving on to the next upgrade, to avoid stacking them up. You can monitor the currently running background updates with [the Admin API](usage/administration/admin_api/background_updates.html#status). +# Upgrading to v1.120.0 + +## Removal of experimental MSC3886 feature + +[MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) +has been closed (and will not enter the Matrix spec). As such, we are +removing the experimental support for it in this release. + +The `experimental_features.msc3886_endpoint` configuration option has +been removed. + # Upgrading to v1.119.0 ## Minimum supported Python version diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index b26ce25d715d..3411179a2a31 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -365,11 +365,6 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # MSC3874: Filtering /messages with rel_types / not_rel_types. self.msc3874_enabled: bool = experimental.get("msc3874_enabled", False) - # MSC3886: Simple client rendezvous capability - self.msc3886_endpoint: Optional[str] = experimental.get( - "msc3886_endpoint", None - ) - # MSC3890: Remotely silence local notifications # Note: This option requires "experimental_features.msc3391_enabled" to be # set to "true", in order to communicate account data deletions to clients. diff --git a/synapse/config/server.py b/synapse/config/server.py index 6a8c7cb1c93a..ad7331de4288 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -215,9 +215,6 @@ class HttpListenerConfig: additional_resources: Dict[str, dict] = attr.Factory(dict) tag: Optional[str] = None request_id_header: Optional[str] = None - # If true, the listener will return CORS response headers compatible with MSC3886: - # https://github.com/matrix-org/matrix-spec-proposals/pull/3886 - experimental_cors_msc3886: bool = False @attr.s(slots=True, frozen=True, auto_attribs=True) @@ -1004,7 +1001,6 @@ def parse_listener_def(num: int, listener: Any) -> ListenerConfig: additional_resources=listener.get("additional_resources", {}), tag=listener.get("tag"), request_id_header=listener.get("request_id_header"), - experimental_cors_msc3886=listener.get("experimental_cors_msc3886", False), ) if socket_path: diff --git a/synapse/http/server.py b/synapse/http/server.py index 3e2d94d399a2..792961a14761 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -921,15 +921,6 @@ def set_cors_headers(request: "SynapseRequest") -> None: b"Access-Control-Expose-Headers", b"Synapse-Trace-Id, Server, ETag", ) - elif request.experimental_cors_msc3886: - request.setHeader( - b"Access-Control-Allow-Headers", - b"X-Requested-With, Content-Type, Authorization, Date, If-Match, If-None-Match", - ) - request.setHeader( - b"Access-Control-Expose-Headers", - b"ETag, Location, X-Max-Bytes", - ) else: request.setHeader( b"Access-Control-Allow-Headers", diff --git a/synapse/http/site.py b/synapse/http/site.py index 8bf63edd3622..1cd90cb9b72b 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -94,7 +94,6 @@ def __init__( self.reactor = site.reactor self._channel = channel # this is used by the tests self.start_time = 0.0 - self.experimental_cors_msc3886 = site.experimental_cors_msc3886 # The requester, if authenticated. For federation requests this is the # server name, for client requests this is the Requester object. @@ -666,10 +665,6 @@ def __init__( request_id_header = config.http_options.request_id_header - self.experimental_cors_msc3886: bool = ( - config.http_options.experimental_cors_msc3886 - ) - def request_factory(channel: HTTPChannel, queued: bool) -> Request: return request_class( channel, diff --git a/synapse/rest/client/rendezvous.py b/synapse/rest/client/rendezvous.py index 27bf53314a83..02f166b4ea43 100644 --- a/synapse/rest/client/rendezvous.py +++ b/synapse/rest/client/rendezvous.py @@ -34,51 +34,6 @@ logger = logging.getLogger(__name__) -# n.b [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) has now been closed. -# However, we want to keep this implementation around for some time. -# TODO: define an end-of-life date for this implementation. -class MSC3886RendezvousServlet(RestServlet): - """ - This is a placeholder implementation of [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886) - simple client rendezvous capability that is used by the "Sign in with QR" functionality. - - This implementation only serves as a 307 redirect to a configured server rather than being a full implementation. - - A module that implements the full functionality is available at: https://pypi.org/project/matrix-http-rendezvous-synapse/. - - Request: - - POST /rendezvous HTTP/1.1 - Content-Type: ... - - ... - - Response: - - HTTP/1.1 307 - Location: - """ - - PATTERNS = client_patterns( - "/org.matrix.msc3886/rendezvous$", releases=[], v1=False, unstable=True - ) - - def __init__(self, hs: "HomeServer"): - super().__init__() - redirection_target: Optional[str] = hs.config.experimental.msc3886_endpoint - assert ( - redirection_target is not None - ), "Servlet is only registered if there is a redirection target" - self.endpoint = redirection_target.encode("utf-8") - - async def on_POST(self, request: SynapseRequest) -> None: - respond_with_redirect( - request, self.endpoint, statusCode=TEMPORARY_REDIRECT, cors=True - ) - - # PUT, GET and DELETE are not implemented as they should be fulfilled by the redirect target. - - class MSC4108DelegationRendezvousServlet(RestServlet): PATTERNS = client_patterns( "/org.matrix.msc4108/rendezvous$", releases=[], v1=False, unstable=True @@ -114,9 +69,6 @@ def on_POST(self, request: SynapseRequest) -> None: def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - if hs.config.experimental.msc3886_endpoint is not None: - MSC3886RendezvousServlet(hs).register(http_server) - if hs.config.experimental.msc4108_enabled: MSC4108RendezvousServlet(hs).register(http_server) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index 8028cf8ad2cb..ba1141bbe597 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -149,9 +149,6 @@ async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]: "org.matrix.msc3881": msc3881_enabled, # Adds support for filtering /messages by event relation. "org.matrix.msc3874": self.config.experimental.msc3874_enabled, - # Adds support for simple HTTP rendezvous as per MSC3886 - "org.matrix.msc3886": self.config.experimental.msc3886_endpoint - is not None, # Adds support for relation-based redactions as per MSC3912. "org.matrix.msc3912": self.config.experimental.msc3912_enabled, # Whether recursively provide relations is supported. diff --git a/tests/logging/test_terse_json.py b/tests/logging/test_terse_json.py index ff85e067b78e..33b94cf9fa36 100644 --- a/tests/logging/test_terse_json.py +++ b/tests/logging/test_terse_json.py @@ -164,7 +164,6 @@ def test_with_request_context(self) -> None: site.site_tag = "test-site" site.server_version_string = "Server v1" site.reactor = Mock() - site.experimental_cors_msc3886 = False request = SynapseRequest( cast(HTTPChannel, FakeChannel(site, self.reactor)), site ) diff --git a/tests/rest/client/test_rendezvous.py b/tests/rest/client/test_rendezvous.py index 0ab754a11aa2..ab701680a6a4 100644 --- a/tests/rest/client/test_rendezvous.py +++ b/tests/rest/client/test_rendezvous.py @@ -34,7 +34,6 @@ from tests.unittest import override_config from tests.utils import HAS_AUTHLIB -msc3886_endpoint = "/_matrix/client/unstable/org.matrix.msc3886/rendezvous" msc4108_endpoint = "/_matrix/client/unstable/org.matrix.msc4108/rendezvous" @@ -54,17 +53,9 @@ def create_resource_dict(self) -> Dict[str, Resource]: } def test_disabled(self) -> None: - channel = self.make_request("POST", msc3886_endpoint, {}, access_token=None) - self.assertEqual(channel.code, 404) channel = self.make_request("POST", msc4108_endpoint, {}, access_token=None) self.assertEqual(channel.code, 404) - @override_config({"experimental_features": {"msc3886_endpoint": "/asd"}}) - def test_msc3886_redirect(self) -> None: - channel = self.make_request("POST", msc3886_endpoint, {}, access_token=None) - self.assertEqual(channel.code, 307) - self.assertEqual(channel.headers.getRawHeaders("Location"), ["/asd"]) - @unittest.skip_unless(HAS_AUTHLIB, "requires authlib") @override_config( { diff --git a/tests/server.py b/tests/server.py index 23c81203a5a4..84ed9f68ebdf 100644 --- a/tests/server.py +++ b/tests/server.py @@ -343,7 +343,6 @@ def __init__( self, resource: IResource, reactor: IReactorTime, - experimental_cors_msc3886: bool = False, ): """ @@ -352,7 +351,6 @@ def __init__( """ self._resource = resource self.reactor = reactor - self.experimental_cors_msc3886 = experimental_cors_msc3886 def getResourceFor(self, request: Request) -> IResource: return self._resource diff --git a/tests/test_server.py b/tests/test_server.py index 9ff2589497c1..9cb6766b5fca 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -233,9 +233,7 @@ def render(self, request: SynapseRequest) -> bytes: self.resource = OptionsResource() self.resource.putChild(b"res", DummyResource()) - def _make_request( - self, method: bytes, path: bytes, experimental_cors_msc3886: bool = False - ) -> FakeChannel: + def _make_request(self, method: bytes, path: bytes) -> FakeChannel: """Create a request from the method/path and return a channel with the response.""" # Create a site and query for the resource. site = SynapseSite( @@ -246,7 +244,6 @@ def _make_request( { "type": "http", "port": 0, - "experimental_cors_msc3886": experimental_cors_msc3886, }, ), self.resource, @@ -283,32 +280,6 @@ def _check_cors_standard_headers(self, channel: FakeChannel) -> None: [b"Synapse-Trace-Id, Server"], ) - def _check_cors_msc3886_headers(self, channel: FakeChannel) -> None: - # Ensure the correct CORS headers have been added - # as per https://github.com/matrix-org/matrix-spec-proposals/blob/hughns/simple-rendezvous-capability/proposals/3886-simple-rendezvous-capability.md#cors - self.assertEqual( - channel.headers.getRawHeaders(b"Access-Control-Allow-Origin"), - [b"*"], - "has correct CORS Origin header", - ) - self.assertEqual( - channel.headers.getRawHeaders(b"Access-Control-Allow-Methods"), - [b"GET, HEAD, POST, PUT, DELETE, OPTIONS"], # HEAD isn't in the spec - "has correct CORS Methods header", - ) - self.assertEqual( - channel.headers.getRawHeaders(b"Access-Control-Allow-Headers"), - [ - b"X-Requested-With, Content-Type, Authorization, Date, If-Match, If-None-Match" - ], - "has correct CORS Headers header", - ) - self.assertEqual( - channel.headers.getRawHeaders(b"Access-Control-Expose-Headers"), - [b"ETag, Location, X-Max-Bytes"], - "has correct CORS Expose Headers header", - ) - def test_unknown_options_request(self) -> None: """An OPTIONS requests to an unknown URL still returns 204 No Content.""" channel = self._make_request(b"OPTIONS", b"/foo/") @@ -325,16 +296,6 @@ def test_known_options_request(self) -> None: self._check_cors_standard_headers(channel) - def test_known_options_request_msc3886(self) -> None: - """An OPTIONS requests to an known URL still returns 204 No Content.""" - channel = self._make_request( - b"OPTIONS", b"/res/", experimental_cors_msc3886=True - ) - self.assertEqual(channel.code, 204) - self.assertNotIn("body", channel.result) - - self._check_cors_msc3886_headers(channel) - def test_unknown_request(self) -> None: """A non-OPTIONS request to an unknown URL should 404.""" channel = self._make_request(b"GET", b"/foo/") From e80dad5fa9ccc9fb7645c043a1e1995065c4bb2a Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Thu, 14 Nov 2024 16:18:24 +0000 Subject: [PATCH 095/147] Move server event filtering logic to rust (#17928) ### Pull Request Checklist * [X] Pull request is based on the develop branch * [X] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [X] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --- changelog.d/17928.misc | 1 + rust/src/events/filter.rs | 107 ++++++++++++++++++++++++++++++++ rust/src/events/mod.rs | 4 +- rust/src/identifier.rs | 86 +++++++++++++++++++++++++ rust/src/lib.rs | 2 + rust/src/matrix_const.rs | 28 +++++++++ rust/src/push/utils.rs | 1 - synapse/synapse_rust/events.pyi | 28 ++++++++- synapse/visibility.py | 66 ++++---------------- 9 files changed, 265 insertions(+), 58 deletions(-) create mode 100644 changelog.d/17928.misc create mode 100644 rust/src/events/filter.rs create mode 100644 rust/src/identifier.rs create mode 100644 rust/src/matrix_const.rs diff --git a/changelog.d/17928.misc b/changelog.d/17928.misc new file mode 100644 index 000000000000..b5aef4457a40 --- /dev/null +++ b/changelog.d/17928.misc @@ -0,0 +1 @@ +Move server event filtering logic to rust. diff --git a/rust/src/events/filter.rs b/rust/src/events/filter.rs new file mode 100644 index 000000000000..7e39972c62d3 --- /dev/null +++ b/rust/src/events/filter.rs @@ -0,0 +1,107 @@ +/* + * This file is licensed under the Affero General Public License (AGPL) version 3. + * + * Copyright (C) 2024 New Vector, Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * See the GNU Affero General Public License for more details: + * . + */ + +use std::collections::HashMap; + +use pyo3::{exceptions::PyValueError, pyfunction, PyResult}; + +use crate::{ + identifier::UserID, + matrix_const::{ + HISTORY_VISIBILITY_INVITED, HISTORY_VISIBILITY_JOINED, MEMBERSHIP_INVITE, MEMBERSHIP_JOIN, + }, +}; + +#[pyfunction(name = "event_visible_to_server")] +pub fn event_visible_to_server_py( + sender: String, + target_server_name: String, + history_visibility: String, + erased_senders: HashMap, + partial_state_invisible: bool, + memberships: Vec<(String, String)>, // (state_key, membership) +) -> PyResult { + event_visible_to_server( + sender, + target_server_name, + history_visibility, + erased_senders, + partial_state_invisible, + memberships, + ) + .map_err(|e| PyValueError::new_err(format!("{e}"))) +} + +/// Return whether the target server is allowed to see the event. +/// +/// For a fully stated room, the target server is allowed to see an event E if: +/// - the state at E has world readable or shared history vis, OR +/// - the state at E says that the target server is in the room. +/// +/// For a partially stated room, the target server is allowed to see E if: +/// - E was created by this homeserver, AND: +/// - the partial state at E has world readable or shared history vis, OR +/// - the partial state at E says that the target server is in the room. +pub fn event_visible_to_server( + sender: String, + target_server_name: String, + history_visibility: String, + erased_senders: HashMap, + partial_state_invisible: bool, + memberships: Vec<(String, String)>, // (state_key, membership) +) -> anyhow::Result { + if let Some(&erased) = erased_senders.get(&sender) { + if erased { + return Ok(false); + } + } + + if partial_state_invisible { + return Ok(false); + } + + if history_visibility != HISTORY_VISIBILITY_INVITED + && history_visibility != HISTORY_VISIBILITY_JOINED + { + return Ok(true); + } + + let mut visible = false; + for (state_key, membership) in memberships { + let state_key = UserID::try_from(state_key.as_ref()) + .map_err(|e| anyhow::anyhow!(format!("invalid user_id ({state_key}): {e}")))?; + if state_key.server_name() != target_server_name { + return Err(anyhow::anyhow!( + "state_key.server_name ({}) does not match target_server_name ({target_server_name})", + state_key.server_name() + )); + } + + match membership.as_str() { + MEMBERSHIP_INVITE => { + if history_visibility == HISTORY_VISIBILITY_INVITED { + visible = true; + break; + } + } + MEMBERSHIP_JOIN => { + visible = true; + break; + } + _ => continue, + } + } + + Ok(visible) +} diff --git a/rust/src/events/mod.rs b/rust/src/events/mod.rs index a4ade1a1786d..0bb6cdb181c5 100644 --- a/rust/src/events/mod.rs +++ b/rust/src/events/mod.rs @@ -22,15 +22,17 @@ use pyo3::{ types::{PyAnyMethods, PyModule, PyModuleMethods}, - Bound, PyResult, Python, + wrap_pyfunction, Bound, PyResult, Python, }; +pub mod filter; mod internal_metadata; /// Called when registering modules with python. pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { let child_module = PyModule::new_bound(py, "events")?; child_module.add_class::()?; + child_module.add_function(wrap_pyfunction!(filter::event_visible_to_server_py, m)?)?; m.add_submodule(&child_module)?; diff --git a/rust/src/identifier.rs b/rust/src/identifier.rs new file mode 100644 index 000000000000..b199c5838eb6 --- /dev/null +++ b/rust/src/identifier.rs @@ -0,0 +1,86 @@ +/* + * This file is licensed under the Affero General Public License (AGPL) version 3. + * + * Copyright (C) 2024 New Vector, Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * See the GNU Affero General Public License for more details: + * . + */ + +//! # Matrix Identifiers +//! +//! This module contains definitions and utilities for working with matrix identifiers. + +use std::{fmt, ops::Deref}; + +/// Errors that can occur when parsing a matrix identifier. +#[derive(Clone, Debug, PartialEq)] +pub enum IdentifierError { + IncorrectSigil, + MissingColon, +} + +impl fmt::Display for IdentifierError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + +/// A Matrix user_id. +#[derive(Clone, Debug, PartialEq)] +pub struct UserID(String); + +impl UserID { + /// Returns the `localpart` of the user_id. + pub fn localpart(&self) -> &str { + &self[1..self.colon_pos()] + } + + /// Returns the `server_name` / `domain` of the user_id. + pub fn server_name(&self) -> &str { + &self[self.colon_pos() + 1..] + } + + /// Returns the position of the ':' inside of the user_id. + /// Used when splitting the user_id into it's respective parts. + fn colon_pos(&self) -> usize { + self.find(':').unwrap() + } +} + +impl TryFrom<&str> for UserID { + type Error = IdentifierError; + + /// Will try creating a `UserID` from the provided `&str`. + /// Can fail if the user_id is incorrectly formatted. + fn try_from(s: &str) -> Result { + if !s.starts_with('@') { + return Err(IdentifierError::IncorrectSigil); + } + + if s.find(':').is_none() { + return Err(IdentifierError::MissingColon); + } + + Ok(UserID(s.to_string())) + } +} + +impl Deref for UserID { + type Target = str; + + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +impl fmt::Display for UserID { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.0) + } +} diff --git a/rust/src/lib.rs b/rust/src/lib.rs index 06477880b944..5de923832695 100644 --- a/rust/src/lib.rs +++ b/rust/src/lib.rs @@ -6,6 +6,8 @@ pub mod acl; pub mod errors; pub mod events; pub mod http; +pub mod identifier; +pub mod matrix_const; pub mod push; pub mod rendezvous; diff --git a/rust/src/matrix_const.rs b/rust/src/matrix_const.rs new file mode 100644 index 000000000000..f75f3bd7c340 --- /dev/null +++ b/rust/src/matrix_const.rs @@ -0,0 +1,28 @@ +/* + * This file is licensed under the Affero General Public License (AGPL) version 3. + * + * Copyright (C) 2024 New Vector, Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * See the GNU Affero General Public License for more details: + * . + */ + +//! # Matrix Constants +//! +//! This module contains definitions for constant values described by the matrix specification. + +pub const HISTORY_VISIBILITY_WORLD_READABLE: &str = "world_readable"; +pub const HISTORY_VISIBILITY_SHARED: &str = "shared"; +pub const HISTORY_VISIBILITY_INVITED: &str = "invited"; +pub const HISTORY_VISIBILITY_JOINED: &str = "joined"; + +pub const MEMBERSHIP_BAN: &str = "ban"; +pub const MEMBERSHIP_LEAVE: &str = "leave"; +pub const MEMBERSHIP_KNOCK: &str = "knock"; +pub const MEMBERSHIP_INVITE: &str = "invite"; +pub const MEMBERSHIP_JOIN: &str = "join"; diff --git a/rust/src/push/utils.rs b/rust/src/push/utils.rs index 28ebed62c88d..59536c995487 100644 --- a/rust/src/push/utils.rs +++ b/rust/src/push/utils.rs @@ -23,7 +23,6 @@ use anyhow::bail; use anyhow::Context; use anyhow::Error; use lazy_static::lazy_static; -use regex; use regex::Regex; use regex::RegexBuilder; diff --git a/synapse/synapse_rust/events.pyi b/synapse/synapse_rust/events.pyi index 1682d0d151a5..7d3422572ddb 100644 --- a/synapse/synapse_rust/events.pyi +++ b/synapse/synapse_rust/events.pyi @@ -10,7 +10,7 @@ # See the GNU Affero General Public License for more details: # . -from typing import Optional +from typing import List, Mapping, Optional, Tuple from synapse.types import JsonDict @@ -105,3 +105,29 @@ class EventInternalMetadata: def is_notifiable(self) -> bool: """Whether this event can trigger a push notification""" + +def event_visible_to_server( + sender: str, + target_server_name: str, + history_visibility: str, + erased_senders: Mapping[str, bool], + partial_state_invisible: bool, + memberships: List[Tuple[str, str]], +) -> bool: + """Determine whether the server is allowed to see the unredacted event. + + Args: + sender: The sender of the event. + target_server_name: The server we want to send the event to. + history_visibility: The history_visibility value at the event. + erased_senders: A mapping of users and whether they have requested erasure. If a + user is not in the map, it is treated as though they haven't requested erasure. + partial_state_invisible: Whether the event should be treated as invisible due to + the partial state status of the room. + memberships: A list of membership state information at the event for users + matching the `target_server_name`. Each list item must contain a tuple of + (state_key, membership). + + Returns: + Whether the server is allowed to see the unredacted event. + """ diff --git a/synapse/visibility.py b/synapse/visibility.py index 3a2782bade77..dc7b6e4065e3 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -27,7 +27,6 @@ Final, FrozenSet, List, - Mapping, Optional, Sequence, Set, @@ -48,6 +47,7 @@ from synapse.logging.opentracing import trace from synapse.storage.controllers import StorageControllers from synapse.storage.databases.main import DataStore +from synapse.synapse_rust.events import event_visible_to_server from synapse.types import RetentionPolicy, StateMap, StrCollection, get_domain_from_id from synapse.types.state import StateFilter from synapse.util import Clock @@ -628,17 +628,6 @@ async def filter_events_for_server( """Filter a list of events based on whether the target server is allowed to see them. - For a fully stated room, the target server is allowed to see an event E if: - - the state at E has world readable or shared history vis, OR - - the state at E says that the target server is in the room. - - For a partially stated room, the target server is allowed to see E if: - - E was created by this homeserver, AND: - - the partial state at E has world readable or shared history vis, OR - - the partial state at E says that the target server is in the room. - - TODO: state before or state after? - Args: storage target_server_name @@ -655,35 +644,6 @@ async def filter_events_for_server( The filtered events. """ - def is_sender_erased(event: EventBase, erased_senders: Mapping[str, bool]) -> bool: - if erased_senders and erased_senders[event.sender]: - logger.info("Sender of %s has been erased, redacting", event.event_id) - return True - return False - - def check_event_is_visible( - visibility: str, memberships: StateMap[EventBase] - ) -> bool: - if visibility not in (HistoryVisibility.INVITED, HistoryVisibility.JOINED): - return True - - # We now loop through all membership events looking for - # membership states for the requesting server to determine - # if the server is either in the room or has been invited - # into the room. - for ev in memberships.values(): - assert get_domain_from_id(ev.state_key) == target_server_name - - memtype = ev.membership - if memtype == Membership.JOIN: - return True - elif memtype == Membership.INVITE: - if visibility == HistoryVisibility.INVITED: - return True - - # server has no users in the room: redact - return False - if filter_out_erased_senders: erased_senders = await storage.main.are_users_erased(e.sender for e in events) else: @@ -726,20 +686,16 @@ def check_event_is_visible( target_server_name, ) - def include_event_in_output(e: EventBase) -> bool: - erased = is_sender_erased(e, erased_senders) - visible = check_event_is_visible( - event_to_history_vis[e.event_id], event_to_memberships.get(e.event_id, {}) - ) - - if e.event_id in partial_state_invisible_event_ids: - visible = False - - return visible and not erased - to_return = [] for e in events: - if include_event_in_output(e): + if event_visible_to_server( + sender=e.sender, + target_server_name=target_server_name, + history_visibility=event_to_history_vis[e.event_id], + erased_senders=erased_senders, + partial_state_invisible=e.event_id in partial_state_invisible_event_ids, + memberships=list(event_to_memberships.get(e.event_id, {}).values()), + ): to_return.append(e) elif redact: to_return.append(prune_event(e)) @@ -796,7 +752,7 @@ async def _event_to_history_vis( async def _event_to_memberships( storage: StorageControllers, events: Collection[EventBase], server_name: str -) -> Dict[str, StateMap[EventBase]]: +) -> Dict[str, StateMap[Tuple[str, str]]]: """Get the remote membership list at each of the given events Returns a map from event id to state map, which will contain only membership events @@ -849,7 +805,7 @@ def include(state_key: str) -> bool: return { e_id: { - key: event_map[inner_e_id] + key: (event_map[inner_e_id].state_key, event_map[inner_e_id].membership) for key, inner_e_id in key_to_eid.items() if inner_e_id in event_map } From d72843056bf3990ab95677dddfb51de625e6bdb1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 18 Nov 2024 14:05:49 +0000 Subject: [PATCH 096/147] Add some documentation about backing up Synapse (#17931) Fixes: https://github.com/element-hq/element-meta/issues/2155 Fixes: https://github.com/element-hq/synapse/issues/2046 --- changelog.d/17931.doc | 1 + docs/SUMMARY.md | 1 + docs/postgres.md | 4 + docs/setup/installation.md | 4 + docs/usage/administration/backups.md | 125 ++++++++++++++++++ .../configuration/config_documentation.md | 15 ++- 6 files changed, 147 insertions(+), 3 deletions(-) create mode 100644 changelog.d/17931.doc create mode 100644 docs/usage/administration/backups.md diff --git a/changelog.d/17931.doc b/changelog.d/17931.doc new file mode 100644 index 000000000000..9207cb0a1c5f --- /dev/null +++ b/changelog.d/17931.doc @@ -0,0 +1 @@ +Add documentation about backing up Synapse. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index c50121d5f784..fd91d9fa115d 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -54,6 +54,7 @@ - [Using `synctl` with Workers](synctl_workers.md) - [Systemd](systemd-with-workers/README.md) - [Administration](usage/administration/README.md) + - [Backups](usage/administration/backups.md) - [Admin API](usage/administration/admin_api/README.md) - [Account Validity](admin_api/account_validity.md) - [Background Updates](usage/administration/admin_api/background_updates.md) diff --git a/docs/postgres.md b/docs/postgres.md index d06f0cda1010..51670667e8a7 100644 --- a/docs/postgres.md +++ b/docs/postgres.md @@ -100,6 +100,10 @@ database: keepalives_count: 3 ``` +## Backups + +Don't forget to [back up](./usage/administration/backups.md#database) your database! + ## Tuning Postgres The default settings should be fine for most deployments. For larger diff --git a/docs/setup/installation.md b/docs/setup/installation.md index d717880aa538..bfeacab37553 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -656,6 +656,10 @@ This also requires the optional `lxml` python dependency to be installed. This in turn requires the `libxml2` library to be available - on Debian/Ubuntu this means `apt-get install libxml2-dev`, or equivalent for your OS. +### Backups + +Don't forget to take [backups](../usage/administration/backups.md) of your new server! + ### Troubleshooting Installation `pip` seems to leak *lots* of memory during installation. For instance, a Linux diff --git a/docs/usage/administration/backups.md b/docs/usage/administration/backups.md new file mode 100644 index 000000000000..24d250179b43 --- /dev/null +++ b/docs/usage/administration/backups.md @@ -0,0 +1,125 @@ +# How to back up a Synapse homeserver + +It is critical to maintain good backups of your server, to guard against +hardware failure as well as potential corruption due to bugs or administrator +error. + +This page documents the things you will need to consider backing up as part of +a Synapse installation. + +## Configuration files + +Keep a copy of your configuration file (`homeserver.yaml`), as well as any +auxiliary config files it refers to such as the +[`log_config`](../configuration/config_documentation.md#log_config) file, +[`app_service_config_files`](../configuration/config_documentation.md#app_service_config_files). +Often, all such config files will be kept in a single directory such as +`/etc/synapse`, which will make this easier. + +## Server signing key + +Your server has a [signing +key](../configuration/config_documentation.md#signing_key_path) which it uses +to sign events and outgoing federation requests. It is easiest to back it up +with your configuration files, but an alternative is to have Synapse create a +new signing key if you have to restore. + +If you do decide to replace the signing key, you should add the old *public* +key to +[`old_signing_keys`](../configuration/config_documentation.md#old_signing_keys). + +## Database + +Synapse's support for SQLite is only suitable for testing purposes, so for the +purposes of this document, we'll assume you are using +[PostgreSQL](../../postgres.md). + +A full discussion of backup strategies for PostgreSQL is out of scope for this +document; see the [PostgreSQL +documentation](https://www.postgresql.org/docs/current/backup.html) for +detailed information. + +### Synapse-specfic details + + * Be very careful not to restore into a database that already has tables + present. At best, this will error; at worst, it will lead to subtle database + inconsistencies. + + * The `e2e_one_time_keys_json` table should **not** be backed up, or if it is + backed up, should be + [`TRUNCATE`d](https://www.postgresql.org/docs/current/sql-truncate.html) + after restoring the database before Synapse is started. + + [Background: restoring the database to an older backup can cause + used one-time-keys to be re-issued, causing subsequent [message decryption + errors](https://github.com/element-hq/element-meta/issues/2155). Clearing + all one-time-keys from the database ensures that this cannot happen, and + will prompt clients to generate and upload new one-time-keys.] + +### Quick and easy database backup and restore + +Typically, the easiest solution is to use `pg_dump` to take a copy of the whole +database. We recommend `pg_dump`'s custom dump format, as it produces +significantly smaller backup files. + +```shell +sudo -u postgres pg_dump -Fc --exclude-table-data e2e_one_time_keys_json synapse > synapse.dump +``` + +There is no need to stop Postgres or Synapse while `pg_dump` is running: it +will take a consistent snapshot of the databse. + +To restore, you will need to recreate the database as described in [Using +Postgres](../../postgres.md#set-up-database), +then load the dump into it with `pg_restore`: + +```shell +sudo -u postgres createdb --encoding=UTF8 --locale=C --template=template0 --owner=synapse_user synapse +sudo -u postgres pg_restore -d synapse < synapse.dump +``` + +(If you forgot to exclude `e2e_one_time_keys_json` during `pg_dump`, remember +to connect to the new database and `TRUNCATE e2e_one_time_keys_json;` before +starting Synapse.) + +To reiterate: do **not** restore a dump over an existing database. + +Again, if you plan to run your homeserver at any sort of production level, we +recommend studying the PostgreSQL documentation on backup options. + +## Media store + +Synapse keeps a copy of media uploaded by users, including avatars and message +attachments, in its [Media +store](../configuration/config_documentation.md#media-store). + +It is a directory on the local disk, containing the following directories: + + * `local_content`: this is content uploaded by your local users. As a general + rule, you should back this up: it may represent the only copy of those + media files anywhere in the federation, and if they are lost, users will + see errors when viewing user or room avatars, and messages with attachments. + + * `local_thumbnails`: "thumbnails" of images uploaded by your users. If + [`dynamic_thumbnails`](../configuration/config_documentation.md#dynamic_thumbnails) + is enabled, these will be regenerated if they are removed from the disk, and + there is therefore no need to back them up. + + If `dynamic_thumbnails` is *not* enabled (the default): although this can + theoretically be regenerated from `local_content`, there is no tooling to do + so. We recommend that these are backed up too. + + * `remote_content`: this is a cache of content that was uploaded by a user on + another server, and has since been requested by a user on your own server. + + Typically there is no need to back up this directory: if a file in this directory + is removed, Synapse will attempt to fetch it again from the remote + server. + + * `remote_thumbnails`: thumbnails of images uploaded by users on other + servers. As with `remote_content`, there is normally no need to back this + up. + + * `url_cache`, `url_cache_thumbnails`: temporary caches of files downloaded + by the [URL previews](../../setup/installation.md#url-previews) feature. + These do not need to be backed up. diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index deb04570bbae..005633e46bb2 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -3128,6 +3128,15 @@ it was last used. It is possible to build an entry from an old `signing.key` file using the `export_signing_key` script which is provided with synapse. +If you have lost the private key file, you can ask another server you trust to +tell you the public keys it has seen from your server. To fetch the keys from +`matrix.org`, try something like: + +``` +curl https://matrix-federation.matrix.org/_matrix/key/v2/query/myserver.example.com | + jq '.server_keys | map(.verify_keys) | add' +``` + Example configuration: ```yaml old_signing_keys: @@ -4391,9 +4400,9 @@ It is possible to scale the processes that handle sending outbound federation re by running a [`generic_worker`](../../workers.md#synapseappgeneric_worker) and adding it's [`worker_name`](#worker_name) to a `federation_sender_instances` map. Doing so will remove handling of this function from the main process. Multiple workers can be added to this map, in which case the work is -balanced across them. +balanced across them. -The way that the load balancing works is any outbound federation request will be assigned +The way that the load balancing works is any outbound federation request will be assigned to a federation sender worker based on the hash of the destination server name. This means that all requests being sent to the same destination will be processed by the same worker instance. Multiple `federation_sender_instances` are useful if there is a federation @@ -4750,7 +4759,7 @@ This setting has the following sub-options: * `only_for_direct_messages`: Whether invites should be automatically accepted for all room types, or only for direct messages. Defaults to false. * `only_from_local_users`: Whether to only automatically accept invites from users on this homeserver. Defaults to false. -* `worker_to_run_on`: Which worker to run this module on. This must match +* `worker_to_run_on`: Which worker to run this module on. This must match the "worker_name". If not set or `null`, invites will be accepted on the main process. From 9d837daa8a68d35553df58f869f7a27542bd83fd Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 15:24:44 +0000 Subject: [PATCH 097/147] Bump immutabledict from 4.2.0 to 4.2.1 (#17941) --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index cf0ddc0cc8b8..327225c38a52 100644 --- a/poetry.lock +++ b/poetry.lock @@ -724,13 +724,13 @@ files = [ [[package]] name = "immutabledict" -version = "4.2.0" +version = "4.2.1" description = "Immutable wrapper around dictionaries (a fork of frozendict)" optional = false -python-versions = ">=3.8,<4.0" +python-versions = ">=3.8" files = [ - {file = "immutabledict-4.2.0-py3-none-any.whl", hash = "sha256:d728b2c2410d698d95e6200237feb50a695584d20289ad3379a439aa3d90baba"}, - {file = "immutabledict-4.2.0.tar.gz", hash = "sha256:e003fd81aad2377a5a758bf7e1086cf3b70b63e9a5cc2f46bce8d0a2b4727c5f"}, + {file = "immutabledict-4.2.1-py3-none-any.whl", hash = "sha256:c56a26ced38c236f79e74af3ccce53772827cef5c3bce7cab33ff2060f756373"}, + {file = "immutabledict-4.2.1.tar.gz", hash = "sha256:d91017248981c72eb66c8ff9834e99c2f53562346f23e7f51e7a5ebcf66a3bcc"}, ] [[package]] From 0f32408c801fe23a9d827c527218b993c85b5113 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 15:47:54 +0000 Subject: [PATCH 098/147] Bump phonenumbers from 8.13.49 to 8.13.50 (#17942) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 327225c38a52..af49564b7a97 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1443,13 +1443,13 @@ dev = ["jinja2"] [[package]] name = "phonenumbers" -version = "8.13.49" +version = "8.13.50" description = "Python version of Google's common library for parsing, formatting, storing and validating international phone numbers." optional = false python-versions = "*" files = [ - {file = "phonenumbers-8.13.49-py2.py3-none-any.whl", hash = "sha256:e17140955ab3d8f9580727372ea64c5ada5327932d6021ef6fd203c3db8c8139"}, - {file = "phonenumbers-8.13.49.tar.gz", hash = "sha256:e608ccb61f0bd42e6db1d2c421f7c22186b88f494870bf40aa31d1a2718ab0ae"}, + {file = "phonenumbers-8.13.50-py2.py3-none-any.whl", hash = "sha256:bb95dbc0d9979c51f7ad94bcd780784938958861fbb4b75a2fe39ccd3d58954a"}, + {file = "phonenumbers-8.13.50.tar.gz", hash = "sha256:e05ac6fb7b98c6d719a87ea895b9fc153673b4a51f455ec9afaf557ef4629da6"}, ] [[package]] From 4efd1056ca7cf02c492efb75d61c0830862e5a93 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 15:48:05 +0000 Subject: [PATCH 099/147] Bump packaging from 24.1 to 24.2 (#17940) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index af49564b7a97..eece22109560 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1418,13 +1418,13 @@ tests = ["Sphinx", "doubles", "flake8", "flake8-quotes", "gevent", "mock", "pyte [[package]] name = "packaging" -version = "24.1" +version = "24.2" description = "Core utilities for Python packages" optional = false python-versions = ">=3.8" files = [ - {file = "packaging-24.1-py3-none-any.whl", hash = "sha256:5b8f2217dbdbd2f7f384c41c628544e6d52f2d0f53c6d0c3ea61aa5d1d7ff124"}, - {file = "packaging-24.1.tar.gz", hash = "sha256:026ed72c8ed3fcce5bf8950572258698927fd1dbda10a5e981cdf0ac37f4f002"}, + {file = "packaging-24.2-py3-none-any.whl", hash = "sha256:09abb1bccd265c01f4a3aa3f7a7db064b36514d2cba19a2f694fe6150451a759"}, + {file = "packaging-24.2.tar.gz", hash = "sha256:c228a6dc5e932d346bc5739379109d49e8853dd8223571c7c5b55260edc0b97f"}, ] [[package]] From e918f683d4ef0658c706e0fb81f37da98c86157d Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 18 Nov 2024 15:48:26 +0000 Subject: [PATCH 100/147] Bump serde from 1.0.214 to 1.0.215 (#17938) --- Cargo.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e9bc05159e2d..46c930ebd750 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -485,18 +485,18 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "serde" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f55c3193aca71c12ad7890f1785d2b73e1b9f63a0bbc353c08ef26fe03fc56b5" +checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.214" +version = "1.0.215" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de523f781f095e28fa605cdce0f8307e451cc0fd14e2eb4cd2e98a355b147766" +checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", From c5e89f5fae80105a556d20c449044923d79f6915 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 19 Nov 2024 11:20:48 +0000 Subject: [PATCH 101/147] Create one-off scheduled task to delete old OTKs (#17934) To work around the fact that, pre-https://github.com/element-hq/synapse/pull/17903, our database may have old one-time-keys that the clients have long thrown away the private keys for, we want to delete OTKs that look like they came from libolm. To spread the load a bit, without holding up other background database updates, we use a scheduled task to do the work. --- changelog.d/17934.feature | 1 + synapse/handlers/e2e_keys.py | 46 ++++++++++++ .../storage/databases/main/end_to_end_keys.py | 48 +++++++++++++ .../delta/88/05_drop_old_otks.sql.postgres | 19 +++++ .../main/delta/88/05_drop_old_otks.sql.sqlite | 19 +++++ tests/handlers/test_e2e_keys.py | 70 +++++++++++++++++++ 6 files changed, 203 insertions(+) create mode 100644 changelog.d/17934.feature create mode 100644 synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.postgres create mode 100644 synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.sqlite diff --git a/changelog.d/17934.feature b/changelog.d/17934.feature new file mode 100644 index 000000000000..f0e138a30ff3 --- /dev/null +++ b/changelog.d/17934.feature @@ -0,0 +1 @@ +Add a one-off task to delete old one-time-keys, to guard against us having old OTKs in the database that the client has long forgotten about. diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 315461fefb37..540995e0627a 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -39,6 +39,8 @@ from synapse.types import ( JsonDict, JsonMapping, + ScheduledTask, + TaskStatus, UserID, get_domain_from_id, get_verify_key_from_cross_signing_key, @@ -70,6 +72,7 @@ def __init__(self, hs: "HomeServer"): self.is_mine = hs.is_mine self.clock = hs.get_clock() self._worker_lock_handler = hs.get_worker_locks_handler() + self._task_scheduler = hs.get_task_scheduler() federation_registry = hs.get_federation_registry() @@ -116,6 +119,10 @@ def __init__(self, hs: "HomeServer"): hs.config.experimental.msc3984_appservice_key_query ) + self._task_scheduler.register_action( + self._delete_old_one_time_keys_task, "delete_old_otks" + ) + @trace @cancellable async def query_devices( @@ -1574,6 +1581,45 @@ async def has_different_keys(self, user_id: str, body: JsonDict) -> bool: return True return False + async def _delete_old_one_time_keys_task( + self, task: ScheduledTask + ) -> Tuple[TaskStatus, Optional[JsonMapping], Optional[str]]: + """Scheduler task to delete old one time keys. + + Until Synapse 1.119, Synapse used to issue one-time-keys in a random order, leading to the possibility + that it could still have old OTKs that the client has dropped. This task is scheduled exactly once + by a database schema delta file, and it clears out old one-time-keys that look like they came from libolm. + """ + last_user = task.result.get("from_user", "") if task.result else "" + while True: + # We process users in batches of 100 + users, rowcount = await self.store.delete_old_otks_for_next_user_batch( + last_user, 100 + ) + if len(users) == 0: + # We're done! + return TaskStatus.COMPLETE, None, None + + logger.debug( + "Deleted %i old one-time-keys for users '%s'..'%s'", + rowcount, + users[0], + users[-1], + ) + last_user = users[-1] + + # Store our progress + await self._task_scheduler.update_task( + task.id, result={"from_user": last_user} + ) + + # Sleep a little before doing the next user. + # + # matrix.org has about 15M users in the e2e_one_time_keys_json table + # (comprising 20M devices). We want this to take about a week, so we need + # to do about one batch of 100 users every 4 seconds. + await self.clock.sleep(4) + def _check_cross_signing_key( key: JsonDict, user_id: str, key_type: str, signing_key: Optional[VerifyKey] = None diff --git a/synapse/storage/databases/main/end_to_end_keys.py b/synapse/storage/databases/main/end_to_end_keys.py index 1fbc49e7c5ac..3bb8fccb5e6b 100644 --- a/synapse/storage/databases/main/end_to_end_keys.py +++ b/synapse/storage/databases/main/end_to_end_keys.py @@ -1453,6 +1453,54 @@ def impl(txn: LoggingTransaction) -> Tuple[bool, Optional[int]]: impl, ) + async def delete_old_otks_for_next_user_batch( + self, after_user_id: str, number_of_users: int + ) -> Tuple[List[str], int]: + """Deletes old OTKs belonging to the next batch of users + + Returns: + `(users, rows)`, where: + * `users` is the user IDs of the updated users. An empty list if we are done. + * `rows` is the number of deleted rows + """ + + def impl(txn: LoggingTransaction) -> Tuple[List[str], int]: + # Find a batch of users + txn.execute( + """ + SELECT DISTINCT(user_id) FROM e2e_one_time_keys_json + WHERE user_id > ? + ORDER BY user_id + LIMIT ? + """, + (after_user_id, number_of_users), + ) + users = [row[0] for row in txn.fetchall()] + if len(users) == 0: + return users, 0 + + # Delete any old OTKs belonging to those users. + # + # We only actually consider OTKs whose key ID is 6 characters long. These + # keys were likely made by libolm rather than Vodozemac; libolm only kept + # 100 private OTKs, so was far more vulnerable than Vodozemac to throwing + # away keys prematurely. + clause, args = make_in_list_sql_clause( + txn.database_engine, "user_id", users + ) + sql = f""" + DELETE FROM e2e_one_time_keys_json + WHERE {clause} AND ts_added_ms < ? AND length(key_id) = 6 + """ + args.append(self._clock.time_msec() - (7 * 24 * 3600 * 1000)) + txn.execute(sql, args) + + return users, txn.rowcount + + return await self.db_pool.runInteraction( + "delete_old_otks_for_next_user_batch", impl + ) + class EndToEndKeyStore(EndToEndKeyWorkerStore, SQLBaseStore): def __init__( diff --git a/synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.postgres b/synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.postgres new file mode 100644 index 000000000000..93a68836ee55 --- /dev/null +++ b/synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.postgres @@ -0,0 +1,19 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + +-- Until Synapse 1.119, Synapse used to issue one-time-keys in a random order, leading to the possibility +-- that it could still have old OTKs that the client has dropped. +-- +-- We create a scheduled task which will drop old OTKs, to flush them out. +INSERT INTO scheduled_tasks(id, action, status, timestamp) + VALUES ('delete_old_otks_task', 'delete_old_otks', 'scheduled', extract(epoch from current_timestamp) * 1000); diff --git a/synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.sqlite b/synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.sqlite new file mode 100644 index 000000000000..cdc2b5d211c9 --- /dev/null +++ b/synapse/storage/schema/main/delta/88/05_drop_old_otks.sql.sqlite @@ -0,0 +1,19 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + +-- Until Synapse 1.119, Synapse used to issue one-time-keys in a random order, leading to the possibility +-- that it could still have old OTKs that the client has dropped. +-- +-- We create a scheduled task which will drop old OTKs, to flush them out. +INSERT INTO scheduled_tasks(id, action, status, timestamp) + VALUES ('delete_old_otks_task', 'delete_old_otks', 'scheduled', strftime('%s', 'now') * 1000); diff --git a/tests/handlers/test_e2e_keys.py b/tests/handlers/test_e2e_keys.py index bca314db83c8..e67efcc17f4c 100644 --- a/tests/handlers/test_e2e_keys.py +++ b/tests/handlers/test_e2e_keys.py @@ -19,6 +19,7 @@ # [This file includes modifications made by New Vector Limited] # # +import time from typing import Dict, Iterable from unittest import mock @@ -1826,3 +1827,72 @@ def test_check_cross_signing_setup(self) -> None: ) self.assertIs(exists, True) self.assertIs(replaceable_without_uia, False) + + def test_delete_old_one_time_keys(self) -> None: + """Test the db migration that clears out old OTKs""" + + # We upload two sets of keys, one just over a week ago, and one just less than + # a week ago. Each batch contains some keys that match the deletion pattern + # (key IDs of 6 chars), and some that do not. + # + # Finally, set the scheduled task going, and check what gets deleted. + + user_id = "@user000:" + self.hs.hostname + device_id = "xyz" + + # The scheduled task should be for "now" in real, wallclock time, so + # set the test reactor to just over a week ago. + self.reactor.advance(time.time() - 7.5 * 24 * 3600) + + # Upload some keys + self.get_success( + self.handler.upload_keys_for_user( + user_id, + device_id, + { + "one_time_keys": { + # some keys to delete + "alg1:AAAAAA": "key1", + "alg2:AAAAAB": {"key": "key2", "signatures": {"k1": "sig1"}}, + # A key to *not* delete + "alg2:AAAAAAAAAA": {"key": "key3"}, + } + }, + ) + ) + + # A day passes + self.reactor.advance(24 * 3600) + + # Upload some more keys + self.get_success( + self.handler.upload_keys_for_user( + user_id, + device_id, + { + "one_time_keys": { + # some keys which match the pattern + "alg1:BAAAAA": "key1", + "alg2:BAAAAB": {"key": "key2", "signatures": {"k1": "sig1"}}, + # A key to *not* delete + "alg2:BAAAAAAAAA": {"key": "key3"}, + } + }, + ) + ) + + # The rest of the week passes, which should set the scheduled task going. + self.reactor.advance(6.5 * 24 * 3600) + + # Check what we're left with in the database + remaining_key_ids = { + row[0] + for row in self.get_success( + self.handler.store.db_pool.simple_select_list( + "e2e_one_time_keys_json", None, ["key_id"] + ) + ) + } + self.assertEqual( + remaining_key_ids, {"AAAAAAAAAA", "BAAAAA", "BAAAAB", "BAAAAAAAAA"} + ) From 1092a35a2a3c2ffe3d7d712707bd06deaecfde52 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 19 Nov 2024 15:03:32 +0000 Subject: [PATCH 102/147] Speed up slow initial sliding syncs on large servers (#17946) This was due to a missing index, which meant that deleting previous connections associated with the device and `conn_id` took a long time. --- changelog.d/17946.misc | 1 + synapse/_scripts/synapse_port_db.py | 2 ++ .../storage/databases/main/sliding_sync.py | 22 ++++++++++++++++++- .../88/05_sliding_sync_room_config_index.sql | 20 +++++++++++++++++ 4 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17946.misc create mode 100644 synapse/storage/schema/main/delta/88/05_sliding_sync_room_config_index.sql diff --git a/changelog.d/17946.misc b/changelog.d/17946.misc new file mode 100644 index 000000000000..3520a75f58d1 --- /dev/null +++ b/changelog.d/17946.misc @@ -0,0 +1 @@ +Speed up slow initial sliding syncs on large servers. diff --git a/synapse/_scripts/synapse_port_db.py b/synapse/_scripts/synapse_port_db.py index 31639d366eca..d8f6f8ebdc3c 100755 --- a/synapse/_scripts/synapse_port_db.py +++ b/synapse/_scripts/synapse_port_db.py @@ -88,6 +88,7 @@ from synapse.storage.databases.main.room import RoomBackgroundUpdateStore from synapse.storage.databases.main.roommember import RoomMemberBackgroundUpdateStore from synapse.storage.databases.main.search import SearchBackgroundUpdateStore +from synapse.storage.databases.main.sliding_sync import SlidingSyncStore from synapse.storage.databases.main.state import MainStateBackgroundUpdateStore from synapse.storage.databases.main.stats import StatsStore from synapse.storage.databases.main.user_directory import ( @@ -255,6 +256,7 @@ class Store( ReceiptsBackgroundUpdateStore, RelationsWorkerStore, EventFederationWorkerStore, + SlidingSyncStore, ): def execute(self, f: Callable[..., R], *args: Any, **kwargs: Any) -> Awaitable[R]: return self.db_pool.runInteraction(f.__name__, f, *args, **kwargs) diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py index 7b357c1ffea4..874dfdcb77a4 100644 --- a/synapse/storage/databases/main/sliding_sync.py +++ b/synapse/storage/databases/main/sliding_sync.py @@ -21,7 +21,11 @@ from synapse.api.errors import SlidingSyncUnknownPosition from synapse.logging.opentracing import log_kv from synapse.storage._base import SQLBaseStore, db_to_json -from synapse.storage.database import LoggingTransaction +from synapse.storage.database import ( + DatabasePool, + LoggingDatabaseConnection, + LoggingTransaction, +) from synapse.types import MultiWriterStreamToken, RoomStreamToken from synapse.types.handlers.sliding_sync import ( HaveSentRoom, @@ -35,12 +39,28 @@ from synapse.util.caches.descriptors import cached if TYPE_CHECKING: + from synapse.server import HomeServer from synapse.storage.databases.main import DataStore logger = logging.getLogger(__name__) class SlidingSyncStore(SQLBaseStore): + def __init__( + self, + database: DatabasePool, + db_conn: LoggingDatabaseConnection, + hs: "HomeServer", + ): + super().__init__(database, db_conn, hs) + + self.db_pool.updates.register_background_index_update( + update_name="sliding_sync_connection_room_configs_required_state_id_idx", + index_name="sliding_sync_connection_room_configs_required_state_id_idx", + table="sliding_sync_connection_room_configs", + columns=("required_state_id",), + ) + async def get_latest_bump_stamp_for_room( self, room_id: str, diff --git a/synapse/storage/schema/main/delta/88/05_sliding_sync_room_config_index.sql b/synapse/storage/schema/main/delta/88/05_sliding_sync_room_config_index.sql new file mode 100644 index 000000000000..7b2e18a84bce --- /dev/null +++ b/synapse/storage/schema/main/delta/88/05_sliding_sync_room_config_index.sql @@ -0,0 +1,20 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2024 New Vector, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + + +-- Add an index on sliding_sync_connection_room_configs(required_state_id), so +-- that when we delete entries in `sliding_sync_connection_required_state` it's +-- efficient for Postgres to check they've been deleted from +-- `sliding_sync_connection_room_configs` too +INSERT INTO background_updates (ordering, update_name, progress_json) VALUES + (8805, 'sliding_sync_connection_room_configs_required_state_id_idx', '{}'); From 8291aa8fd7b4890832165c4053a92ef5c8589c50 Mon Sep 17 00:00:00 2001 From: Renaud Allard Date: Wed, 20 Nov 2024 12:48:04 +0100 Subject: [PATCH 103/147] Support both import names of PyPI package `python-multipart`. (#17932) --- changelog.d/17932.misc | 1 + synapse/http/client.py | 21 +++++++++++++++++---- 2 files changed, 18 insertions(+), 4 deletions(-) create mode 100644 changelog.d/17932.misc diff --git a/changelog.d/17932.misc b/changelog.d/17932.misc new file mode 100644 index 000000000000..2401c4cf213c --- /dev/null +++ b/changelog.d/17932.misc @@ -0,0 +1 @@ +Support new package name of PyPI package `python-multipart` 0.0.13 so that distro packagers do not need to work around name conflict with PyPI package `multipart`. diff --git a/synapse/http/client.py b/synapse/http/client.py index c3b2299c9549..85923d956bcc 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -36,7 +36,6 @@ ) import attr -import multipart import treq from canonicaljson import encode_canonical_json from netaddr import AddrFormatError, IPAddress, IPSet @@ -93,6 +92,20 @@ if TYPE_CHECKING: from synapse.server import HomeServer +# Support both import names for the `python-multipart` (PyPI) library, +# which renamed its package name from `multipart` to `python_multipart` +# in 0.0.13 (though supports the old import name for compatibility). +# Note that the `multipart` package name conflicts with `multipart` (PyPI) +# so we should prefer importing from `python_multipart` when possible. +try: + from python_multipart import MultipartParser + + if TYPE_CHECKING: + from python_multipart import multipart +except ImportError: + from multipart import MultipartParser # type: ignore[no-redef] + + logger = logging.getLogger(__name__) outgoing_requests_counter = Counter("synapse_http_client_requests", "", ["method"]) @@ -1039,7 +1052,7 @@ def __init__( self.deferred = deferred self.boundary = boundary self.max_length = max_length - self.parser: Optional[multipart.MultipartParser] = None + self.parser: Optional[MultipartParser] = None self.multipart_response = MultipartResponse() self.has_redirect = False self.in_json = False @@ -1097,12 +1110,12 @@ def on_part_data(data: bytes, start: int, end: int) -> None: self.deferred.errback() self.file_length += end - start - callbacks: "multipart.multipart.MultipartCallbacks" = { + callbacks: "multipart.MultipartCallbacks" = { "on_header_field": on_header_field, "on_header_value": on_header_value, "on_part_data": on_part_data, } - self.parser = multipart.MultipartParser(self.boundary, callbacks) + self.parser = MultipartParser(self.boundary, callbacks) self.total_length += len(incoming_data) if self.max_length is not None and self.total_length >= self.max_length: From d0a474d312443a0ef6ebdbd9c6d3b3fd24a3500c Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Wed, 20 Nov 2024 07:48:22 -0700 Subject: [PATCH 104/147] Enable authenticated media by default (#17889) Co-authored-by: Olivier 'reivilibre --- changelog.d/17889.feature | 1 + docs/upgrade.md | 23 ++++++ .../configuration/config_documentation.md | 7 +- synapse/config/repository.py | 4 +- tests/media/test_media_storage.py | 80 ++++++++++++++++++- tests/replication/test_multi_media_repo.py | 4 + tests/rest/admin/test_admin.py | 9 ++- tests/rest/admin/test_media.py | 6 ++ tests/rest/admin/test_user.py | 4 +- tests/rest/media/test_domain_blocking.py | 4 +- tests/rest/media/test_url_preview.py | 3 + 11 files changed, 129 insertions(+), 16 deletions(-) create mode 100644 changelog.d/17889.feature diff --git a/changelog.d/17889.feature b/changelog.d/17889.feature new file mode 100644 index 000000000000..221282553bbe --- /dev/null +++ b/changelog.d/17889.feature @@ -0,0 +1 @@ +Enforce authenticated media by default. Administrators can revert this by configuring `enable_authenticated_media` to `false`. In a future release of Synapse, this option will be removed and become always-on. diff --git a/docs/upgrade.md b/docs/upgrade.md index 9f12d7c34f32..45e63b0c5de8 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -128,6 +128,29 @@ removing the experimental support for it in this release. The `experimental_features.msc3886_endpoint` configuration option has been removed. +## Authenticated media is now enforced by default + +The [`enable_authenticated_media`] configuration option now defaults to true. + +This means that clients and remote (federated) homeservers now need to use +the authenticated media endpoints in order to download media from your +homeserver. + +As an exception, existing media that was stored on the server prior to +this option changing to `true` will still be accessible over the +unauthenticated endpoints. + +The matrix.org homeserver has already been running with this option enabled +since September 2024, so most common clients and homeservers should already +be compatible. + +With that said, administrators who wish to disable this feature for broader +compatibility can still do so by manually configuring +`enable_authenticated_media: False`. + +[`enable_authenticated_media`]: usage/configuration/config_documentation.md#enable_authenticated_media + + # Upgrading to v1.119.0 ## Minimum supported Python version diff --git a/docs/usage/configuration/config_documentation.md b/docs/usage/configuration/config_documentation.md index 005633e46bb2..7a48d76bbb1a 100644 --- a/docs/usage/configuration/config_documentation.md +++ b/docs/usage/configuration/config_documentation.md @@ -1887,8 +1887,7 @@ Config options related to Synapse's media store. When set to true, all subsequent media uploads will be marked as authenticated, and will not be available over legacy unauthenticated media endpoints (`/_matrix/media/(r0|v3|v1)/download` and `/_matrix/media/(r0|v3|v1)/thumbnail`) - requests for authenticated media over these endpoints will result in a 404. All media, including authenticated media, will be available over the authenticated media endpoints `_matrix/client/v1/media/download` and `_matrix/client/v1/media/thumbnail`. Media uploaded prior to setting this option to true will still be available over the legacy endpoints. Note if the setting is switched to false -after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to false, but -this will change to true in a future Synapse release. +after enabling, media marked as authenticated will be available over legacy endpoints. Defaults to true (previously false). In a future release of Synapse, this option will be removed and become always-on. In all cases, authenticated requests to download media will succeed, but for unauthenticated requests, this case-by-case breakdown describes whether media downloads are permitted: @@ -1910,9 +1909,11 @@ will perpetually be available over the legacy, unauthenticated endpoint, even af This is for backwards compatibility with older clients and homeservers that do not yet support requesting authenticated media; those older clients or homeservers will not be cut off from media they can already see. +_Changed in Synapse 1.120:_ This option now defaults to `True` when not set, whereas before this version it defaulted to `False`. + Example configuration: ```yaml -enable_authenticated_media: true +enable_authenticated_media: false ``` --- ### `enable_media_repo` diff --git a/synapse/config/repository.py b/synapse/config/repository.py index 97ce6de52811..27860154e1c9 100644 --- a/synapse/config/repository.py +++ b/synapse/config/repository.py @@ -272,9 +272,7 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: remote_media_lifetime ) - self.enable_authenticated_media = config.get( - "enable_authenticated_media", False - ) + self.enable_authenticated_media = config.get("enable_authenticated_media", True) def generate_config_section(self, data_dir_path: str, **kwargs: Any) -> str: assert data_dir_path is not None diff --git a/tests/media/test_media_storage.py b/tests/media/test_media_storage.py index 034d9ece0b51..f4fbc0544a29 100644 --- a/tests/media/test_media_storage.py +++ b/tests/media/test_media_storage.py @@ -419,6 +419,11 @@ def _req( return channel + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_handle_missing_content_type(self) -> None: channel = self._req( b"attachment; filename=out" + self.test_image.extension, @@ -430,6 +435,11 @@ def test_handle_missing_content_type(self) -> None: headers.getRawHeaders(b"Content-Type"), [b"application/octet-stream"] ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_disposition_filename_ascii(self) -> None: """ If the filename is filename= then Synapse will decode it as an @@ -450,6 +460,11 @@ def test_disposition_filename_ascii(self) -> None: ], ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_disposition_filenamestar_utf8escaped(self) -> None: """ If the filename is filename=*utf8'' then Synapse will @@ -475,6 +490,11 @@ def test_disposition_filenamestar_utf8escaped(self) -> None: ], ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_disposition_none(self) -> None: """ If there is no filename, Content-Disposition should only @@ -491,6 +511,11 @@ def test_disposition_none(self) -> None: [b"inline" if self.test_image.is_inline else b"attachment"], ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_thumbnail_crop(self) -> None: """Test that a cropped remote thumbnail is available.""" self._test_thumbnail( @@ -500,6 +525,11 @@ def test_thumbnail_crop(self) -> None: unable_to_thumbnail=self.test_image.unable_to_thumbnail, ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_thumbnail_scale(self) -> None: """Test that a scaled remote thumbnail is available.""" self._test_thumbnail( @@ -509,6 +539,11 @@ def test_thumbnail_scale(self) -> None: unable_to_thumbnail=self.test_image.unable_to_thumbnail, ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_invalid_type(self) -> None: """An invalid thumbnail type is never available.""" self._test_thumbnail( @@ -519,7 +554,10 @@ def test_invalid_type(self) -> None: ) @unittest.override_config( - {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "scale"}]} + { + "thumbnail_sizes": [{"width": 32, "height": 32, "method": "scale"}], + "enable_authenticated_media": False, + }, ) def test_no_thumbnail_crop(self) -> None: """ @@ -533,7 +571,10 @@ def test_no_thumbnail_crop(self) -> None: ) @unittest.override_config( - {"thumbnail_sizes": [{"width": 32, "height": 32, "method": "crop"}]} + { + "thumbnail_sizes": [{"width": 32, "height": 32, "method": "crop"}], + "enable_authenticated_media": False, + } ) def test_no_thumbnail_scale(self) -> None: """ @@ -546,6 +587,11 @@ def test_no_thumbnail_scale(self) -> None: unable_to_thumbnail=self.test_image.unable_to_thumbnail, ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_thumbnail_repeated_thumbnail(self) -> None: """Test that fetching the same thumbnail works, and deleting the on disk thumbnail regenerates it. @@ -720,6 +766,11 @@ def test_same_quality(self, method: str, desired_size: int) -> None: ) ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_x_robots_tag_header(self) -> None: """ Tests that the `X-Robots-Tag` header is present, which informs web crawlers @@ -733,6 +784,11 @@ def test_x_robots_tag_header(self) -> None: [b"noindex, nofollow, noarchive, noimageindex"], ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_cross_origin_resource_policy_header(self) -> None: """ Test that the Cross-Origin-Resource-Policy header is set to "cross-origin" @@ -747,6 +803,11 @@ def test_cross_origin_resource_policy_header(self) -> None: [b"cross-origin"], ) + @unittest.override_config( + { + "enable_authenticated_media": False, + } + ) def test_unknown_v3_endpoint(self) -> None: """ If the v3 endpoint fails, try the r0 one. @@ -985,6 +1046,11 @@ def read_body_with_max_size_50MiB(*args: Any, **kwargs: Any) -> Deferred: d.callback(52428800) return d + @override_config( + { + "enable_authenticated_media": False, + } + ) @patch( "synapse.http.matrixfederationclient.read_body_with_max_size", read_body_with_max_size_30MiB, @@ -1060,6 +1126,7 @@ async def _send_request(*args: Any, **kwargs: Any) -> IResponse: { "remote_media_download_per_second": "50M", "remote_media_download_burst_count": "50M", + "enable_authenticated_media": False, } ) @patch( @@ -1119,7 +1186,12 @@ async def _send_request(*args: Any, **kwargs: Any) -> IResponse: ) assert channel.code == 200 - @override_config({"remote_media_download_burst_count": "87M"}) + @override_config( + { + "remote_media_download_burst_count": "87M", + "enable_authenticated_media": False, + } + ) @patch( "synapse.http.matrixfederationclient.read_body_with_max_size", read_body_with_max_size_30MiB, @@ -1159,7 +1231,7 @@ async def _send_request(*args: Any, **kwargs: Any) -> IResponse: ) assert channel2.code == 429 - @override_config({"max_upload_size": "29M"}) + @override_config({"max_upload_size": "29M", "enable_authenticated_media": False}) @patch( "synapse.http.matrixfederationclient.read_body_with_max_size", read_body_with_max_size_30MiB, diff --git a/tests/replication/test_multi_media_repo.py b/tests/replication/test_multi_media_repo.py index 6fc4600c41b3..f36af877c4ed 100644 --- a/tests/replication/test_multi_media_repo.py +++ b/tests/replication/test_multi_media_repo.py @@ -40,6 +40,7 @@ from tests.replication._base import BaseMultiWorkerStreamTestCase from tests.server import FakeChannel, FakeTransport, make_request from tests.test_utils import SMALL_PNG +from tests.unittest import override_config logger = logging.getLogger(__name__) @@ -148,6 +149,7 @@ def _get_media_req( return channel, request + @override_config({"enable_authenticated_media": False}) def test_basic(self) -> None: """Test basic fetching of remote media from a single worker.""" hs1 = self.make_worker_hs("synapse.app.generic_worker") @@ -164,6 +166,7 @@ def test_basic(self) -> None: self.assertEqual(channel.code, 200) self.assertEqual(channel.result["body"], b"Hello!") + @override_config({"enable_authenticated_media": False}) def test_download_simple_file_race(self) -> None: """Test that fetching remote media from two different processes at the same time works. @@ -203,6 +206,7 @@ def test_download_simple_file_race(self) -> None: # We expect only one new file to have been persisted. self.assertEqual(start_count + 1, self._count_remote_media()) + @override_config({"enable_authenticated_media": False}) def test_download_image_race(self) -> None: """Test that fetching remote *images* from two different processes at the same time works. diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 6351326fff84..5483f8f37f62 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -30,7 +30,7 @@ import synapse.rest.admin from synapse.http.server import JsonResource from synapse.rest.admin import VersionServlet -from synapse.rest.client import login, room +from synapse.rest.client import login, media, room from synapse.server import HomeServer from synapse.util import Clock @@ -60,6 +60,7 @@ class QuarantineMediaTestCase(unittest.HomeserverTestCase): synapse.rest.admin.register_servlets, synapse.rest.admin.register_servlets_for_media_repo, login.register_servlets, + media.register_servlets, room.register_servlets, ] @@ -74,7 +75,7 @@ def _ensure_quarantined( """Ensure a piece of media is quarantined when trying to access it.""" channel = self.make_request( "GET", - f"/_matrix/media/v3/download/{server_and_media_id}", + f"/_matrix/client/v1/media/download/{server_and_media_id}", shorthand=False, access_token=admin_user_tok, ) @@ -131,7 +132,7 @@ def test_quarantine_media_by_id(self) -> None: # Attempt to access the media channel = self.make_request( "GET", - f"/_matrix/media/v3/download/{server_name_and_media_id}", + f"/_matrix/client/v1/media/download/{server_name_and_media_id}", shorthand=False, access_token=non_admin_user_tok, ) @@ -295,7 +296,7 @@ def test_cannot_quarantine_safe_media(self) -> None: # Attempt to access each piece of media channel = self.make_request( "GET", - f"/_matrix/media/v3/download/{server_and_media_id_2}", + f"/_matrix/client/v1/media/download/{server_and_media_id_2}", shorthand=False, access_token=non_admin_user_tok, ) diff --git a/tests/rest/admin/test_media.py b/tests/rest/admin/test_media.py index f37816551321..19c244cfcfc9 100644 --- a/tests/rest/admin/test_media.py +++ b/tests/rest/admin/test_media.py @@ -36,6 +36,7 @@ from tests import unittest from tests.test_utils import SMALL_PNG +from tests.unittest import override_config VALID_TIMESTAMP = 1609459200000 # 2021-01-01 in milliseconds INVALID_TIMESTAMP_IN_S = 1893456000 # 2030-01-01 in seconds @@ -126,6 +127,7 @@ def test_media_is_not_local(self) -> None: self.assertEqual(400, channel.code, msg=channel.json_body) self.assertEqual("Can only delete local media", channel.json_body["error"]) + @override_config({"enable_authenticated_media": False}) def test_delete_media(self) -> None: """ Tests that delete a media is successfully @@ -371,6 +373,7 @@ def test_delete_media_never_accessed(self, use_legacy_url: bool) -> None: self._access_media(server_and_media_id, False) + @override_config({"enable_authenticated_media": False}) def test_keep_media_by_date(self) -> None: """ Tests that media is not deleted if it is newer than `before_ts` @@ -408,6 +411,7 @@ def test_keep_media_by_date(self) -> None: self._access_media(server_and_media_id, False) + @override_config({"enable_authenticated_media": False}) def test_keep_media_by_size(self) -> None: """ Tests that media is not deleted if its size is smaller than or equal @@ -443,6 +447,7 @@ def test_keep_media_by_size(self) -> None: self._access_media(server_and_media_id, False) + @override_config({"enable_authenticated_media": False}) def test_keep_media_by_user_avatar(self) -> None: """ Tests that we do not delete media if is used as a user avatar @@ -487,6 +492,7 @@ def test_keep_media_by_user_avatar(self) -> None: self._access_media(server_and_media_id, False) + @override_config({"enable_authenticated_media": False}) def test_keep_media_by_room_avatar(self) -> None: """ Tests that we do not delete media if it is used as a room avatar diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 668ccb89ff1d..6d050e778403 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -45,6 +45,7 @@ devices, login, logout, + media, profile, register, room, @@ -3517,6 +3518,7 @@ class UserMediaRestTestCase(unittest.HomeserverTestCase): servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, + media.register_servlets, ] def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: @@ -4023,7 +4025,7 @@ def _create_media_and_access( # Try to access a media and to create `last_access_ts` channel = self.make_request( "GET", - f"/_matrix/media/v3/download/{server_and_media_id}", + f"/_matrix/client/v1/media/download/{server_and_media_id}", shorthand=False, access_token=user_token, ) diff --git a/tests/rest/media/test_domain_blocking.py b/tests/rest/media/test_domain_blocking.py index 72205c6bb3b5..49d81f4b2864 100644 --- a/tests/rest/media/test_domain_blocking.py +++ b/tests/rest/media/test_domain_blocking.py @@ -91,7 +91,8 @@ def test_cannot_download_blocked_media(self) -> None: { # Disable downloads from a domain we won't be requesting downloads from. # This proves we haven't broken anything. - "prevent_media_downloads_from": ["not-listed.com"] + "prevent_media_downloads_from": ["not-listed.com"], + "enable_authenticated_media": False, } ) def test_remote_media_normally_unblocked(self) -> None: @@ -132,6 +133,7 @@ def test_cannot_download_blocked_media_thumbnail(self) -> None: # This proves we haven't broken anything. "prevent_media_downloads_from": ["not-listed.com"], "dynamic_thumbnails": True, + "enable_authenticated_media": False, } ) def test_remote_media_thumbnail_normally_unblocked(self) -> None: diff --git a/tests/rest/media/test_url_preview.py b/tests/rest/media/test_url_preview.py index a96f0e7fca21..103d7662d9d2 100644 --- a/tests/rest/media/test_url_preview.py +++ b/tests/rest/media/test_url_preview.py @@ -42,6 +42,7 @@ from tests import unittest from tests.server import FakeTransport from tests.test_utils import SMALL_PNG +from tests.unittest import override_config try: import lxml @@ -1259,6 +1260,7 @@ def _download_image(self) -> Tuple[str, str]: self.assertIsNone(_port) return host, media_id + @override_config({"enable_authenticated_media": False}) def test_storage_providers_exclude_files(self) -> None: """Test that files are not stored in or fetched from storage providers.""" host, media_id = self._download_image() @@ -1301,6 +1303,7 @@ def test_storage_providers_exclude_files(self) -> None: "URL cache file was unexpectedly retrieved from a storage provider", ) + @override_config({"enable_authenticated_media": False}) def test_storage_providers_exclude_thumbnails(self) -> None: """Test that thumbnails are not stored in or fetched from storage providers.""" host, media_id = self._download_image() From ddd1d79d03e6381cebe2f3f1fe35ba1823d291f7 Mon Sep 17 00:00:00 2001 From: Olivier 'reivilibre Date: Wed, 20 Nov 2024 15:01:56 +0000 Subject: [PATCH 105/147] Fix nix flake --- flake.lock | 48 +++++++----------------------------------------- flake.nix | 4 ++-- 2 files changed, 9 insertions(+), 43 deletions(-) diff --git a/flake.lock b/flake.lock index 6b25cef3fc96..a6a2aea3285a 100644 --- a/flake.lock +++ b/flake.lock @@ -56,24 +56,6 @@ "type": "github" } }, - "flake-utils_2": { - "inputs": { - "systems": "systems_2" - }, - "locked": { - "lastModified": 1681202837, - "narHash": "sha256-H+Rh19JDwRtpVPAWp64F+rlEtxUWBAQW28eAi3SRSzg=", - "owner": "numtide", - "repo": "flake-utils", - "rev": "cfacdce06f30d2b68473a46042957675eebb3401", - "type": "github" - }, - "original": { - "owner": "numtide", - "repo": "flake-utils", - "type": "github" - } - }, "gitignore": { "inputs": { "nixpkgs": [ @@ -202,11 +184,11 @@ }, "nixpkgs_3": { "locked": { - "lastModified": 1681358109, - "narHash": "sha256-eKyxW4OohHQx9Urxi7TQlFBTDWII+F+x2hklDOQPB50=", + "lastModified": 1728538411, + "narHash": "sha256-f0SBJz1eZ2yOuKUr5CA9BHULGXVSn6miBuUWdTyhUhU=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "96ba1c52e54e74c3197f4d43026b3f3d92e83ff9", + "rev": "b69de56fac8c2b6f8fd27f2eca01dcda8e0a4221", "type": "github" }, "original": { @@ -249,20 +231,19 @@ "devenv": "devenv", "nixpkgs": "nixpkgs_2", "rust-overlay": "rust-overlay", - "systems": "systems_3" + "systems": "systems_2" } }, "rust-overlay": { "inputs": { - "flake-utils": "flake-utils_2", "nixpkgs": "nixpkgs_3" }, "locked": { - "lastModified": 1693966243, - "narHash": "sha256-a2CA1aMIPE67JWSVIGoGtD3EGlFdK9+OlJQs0FOWCKY=", + "lastModified": 1731897198, + "narHash": "sha256-Ou7vLETSKwmE/HRQz4cImXXJBr/k9gp4J4z/PF8LzTE=", "owner": "oxalica", "repo": "rust-overlay", - "rev": "a8b4bb4cbb744baaabc3e69099f352f99164e2c1", + "rev": "0be641045af6d8666c11c2c40e45ffc9667839b5", "type": "github" }, "original": { @@ -300,21 +281,6 @@ "repo": "default", "type": "github" } - }, - "systems_3": { - "locked": { - "lastModified": 1681028828, - "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=", - "owner": "nix-systems", - "repo": "default", - "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e", - "type": "github" - }, - "original": { - "owner": "nix-systems", - "repo": "default", - "type": "github" - } } }, "root": "root", diff --git a/flake.nix b/flake.nix index bc360ae44a2b..749c10da1d04 100644 --- a/flake.nix +++ b/flake.nix @@ -82,7 +82,7 @@ # # NOTE: We currently need to set the Rust version unnecessarily high # in order to work around https://github.com/matrix-org/synapse/issues/15939 - (rust-bin.stable."1.71.1".default.override { + (rust-bin.stable."1.82.0".default.override { # Additionally install the "rust-src" extension to allow diving into the # Rust source code in an IDE (rust-analyzer will also make use of it). extensions = [ "rust-src" ]; @@ -205,7 +205,7 @@ # corresponding Nix packages on https://search.nixos.org/packages. # # This was done until `./install-deps.pl --dryrun` produced no output. - env.PERL5LIB = "${with pkgs.perl536Packages; makePerlPath [ + env.PERL5LIB = "${with pkgs.perl538Packages; makePerlPath [ DBI ClassMethodModifiers CryptEd25519 From ec4d1369651e854ce0b2f92fde1ea90bfde9a17f Mon Sep 17 00:00:00 2001 From: Olivier 'reivilibre Date: Wed, 20 Nov 2024 15:13:32 +0000 Subject: [PATCH 106/147] 1.120.0rc1 --- CHANGES.md | 47 +++++++++++++++++++++++++++++++++++++++ changelog.d/17638.removal | 1 - changelog.d/17865.misc | 1 - changelog.d/17889.feature | 1 - changelog.d/17913.doc | 1 - changelog.d/17923.misc | 1 - changelog.d/17924.misc | 1 - changelog.d/17928.misc | 1 - changelog.d/17931.doc | 1 - changelog.d/17932.misc | 1 - changelog.d/17934.feature | 1 - changelog.d/17946.misc | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- 14 files changed, 54 insertions(+), 12 deletions(-) delete mode 100644 changelog.d/17638.removal delete mode 100644 changelog.d/17865.misc delete mode 100644 changelog.d/17889.feature delete mode 100644 changelog.d/17913.doc delete mode 100644 changelog.d/17923.misc delete mode 100644 changelog.d/17924.misc delete mode 100644 changelog.d/17928.misc delete mode 100644 changelog.d/17931.doc delete mode 100644 changelog.d/17932.misc delete mode 100644 changelog.d/17934.feature delete mode 100644 changelog.d/17946.misc diff --git a/CHANGES.md b/CHANGES.md index be5c18c84baf..98fba52bae67 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,50 @@ +# Synapse 1.120.0rc1 (2024-11-20) + +This release enables the enforcement of authenticated media by default, with exemptions for media that is already present in the +homeserver's media store. + +Most homeservers operating in the public federation will not be impacted by this change, given that +the large homeserver `matrix.org` enabled this in September 2024 and therefore most clients and servers +will already have updated as a result. + +Some server administrators may still wish to disable this enforcement for the time being, in the interest of compatibility with older clients +and older federated homeservers. +See the [upgrade notes](https://element-hq.github.io/synapse/v1.120/upgrade.html#authenticated-media-is-now-enforced-by-default) for more information. + +### Features + +- Enforce authenticated media by default. Administrators can revert this by configuring `enable_authenticated_media` to `false`. In a future release of Synapse, this option will be removed and become always-on. ([\#17889](https://github.com/element-hq/synapse/issues/17889)) +- Add a one-off task to delete old One-Time Keys, to guard against us having old OTKs in the database that the client has long forgotten about. ([\#17934](https://github.com/element-hq/synapse/issues/17934)) + +### Improved Documentation + +- Clarify the semantics of the `enable_authenticated_media` configuration option. ([\#17913](https://github.com/element-hq/synapse/issues/17913)) +- Add documentation about backing up Synapse. ([\#17931](https://github.com/element-hq/synapse/issues/17931)) + +### Deprecations and Removals + +- Remove support for [MSC3886: Simple client rendezvous capability](https://github.com/matrix-org/matrix-spec-proposals/pull/3886), which has been superseded by [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/pull/4108) and therefore closed. ([\#17638](https://github.com/element-hq/synapse/issues/17638)) + +### Internal Changes + +- Addressed some typos in docs and returned error message for unknown MXC ID. ([\#17865](https://github.com/element-hq/synapse/issues/17865)) +- Unpin the upload release GHA action. ([\#17923](https://github.com/element-hq/synapse/issues/17923)) +- Bump macos version used to build wheels during release, as current version used is end-of-life. ([\#17924](https://github.com/element-hq/synapse/issues/17924)) +- Move server event filtering logic to rust. ([\#17928](https://github.com/element-hq/synapse/issues/17928)) +- Support new package name of PyPI package `python-multipart` 0.0.13 so that distro packagers do not need to work around name conflict with PyPI package `multipart`. ([\#17932](https://github.com/element-hq/synapse/issues/17932)) +- Speed up slow initial sliding syncs on large servers. ([\#17946](https://github.com/element-hq/synapse/issues/17946)) + +### Updates to locked dependencies + +* Bump anyhow from 1.0.92 to 1.0.93. ([\#17920](https://github.com/element-hq/synapse/issues/17920)) +* Bump bleach from 6.1.0 to 6.2.0. ([\#17918](https://github.com/element-hq/synapse/issues/17918)) +* Bump immutabledict from 4.2.0 to 4.2.1. ([\#17941](https://github.com/element-hq/synapse/issues/17941)) +* Bump packaging from 24.1 to 24.2. ([\#17940](https://github.com/element-hq/synapse/issues/17940)) +* Bump phonenumbers from 8.13.49 to 8.13.50. ([\#17942](https://github.com/element-hq/synapse/issues/17942)) +* Bump pygithub from 2.4.0 to 2.5.0. ([\#17917](https://github.com/element-hq/synapse/issues/17917)) +* Bump ruff from 0.7.2 to 0.7.3. ([\#17919](https://github.com/element-hq/synapse/issues/17919)) +* Bump serde from 1.0.214 to 1.0.215. ([\#17938](https://github.com/element-hq/synapse/issues/17938)) + # Synapse 1.119.0 (2024-11-13) No significant changes since 1.119.0rc2. diff --git a/changelog.d/17638.removal b/changelog.d/17638.removal deleted file mode 100644 index 1bb09e976ec5..000000000000 --- a/changelog.d/17638.removal +++ /dev/null @@ -1 +0,0 @@ -Remove support for closed [MSC3886](https://github.com/matrix-org/matrix-spec-proposals/pull/3886). \ No newline at end of file diff --git a/changelog.d/17865.misc b/changelog.d/17865.misc deleted file mode 100644 index 2303a7e1b74e..000000000000 --- a/changelog.d/17865.misc +++ /dev/null @@ -1 +0,0 @@ -Addressed some typos in docs and returned error message for unknown MXC ID. diff --git a/changelog.d/17889.feature b/changelog.d/17889.feature deleted file mode 100644 index 221282553bbe..000000000000 --- a/changelog.d/17889.feature +++ /dev/null @@ -1 +0,0 @@ -Enforce authenticated media by default. Administrators can revert this by configuring `enable_authenticated_media` to `false`. In a future release of Synapse, this option will be removed and become always-on. diff --git a/changelog.d/17913.doc b/changelog.d/17913.doc deleted file mode 100644 index 39f59795625a..000000000000 --- a/changelog.d/17913.doc +++ /dev/null @@ -1 +0,0 @@ -Clarify the semantics of the `enable_authenticated_media` configuration option. diff --git a/changelog.d/17923.misc b/changelog.d/17923.misc deleted file mode 100644 index 4d74e7e1849c..000000000000 --- a/changelog.d/17923.misc +++ /dev/null @@ -1 +0,0 @@ -Unpin the upload release GHA action. diff --git a/changelog.d/17924.misc b/changelog.d/17924.misc deleted file mode 100644 index c7cc502360e0..000000000000 --- a/changelog.d/17924.misc +++ /dev/null @@ -1 +0,0 @@ -Bump macos version used to build wheels during release, as current version used is end-of-life. diff --git a/changelog.d/17928.misc b/changelog.d/17928.misc deleted file mode 100644 index b5aef4457a40..000000000000 --- a/changelog.d/17928.misc +++ /dev/null @@ -1 +0,0 @@ -Move server event filtering logic to rust. diff --git a/changelog.d/17931.doc b/changelog.d/17931.doc deleted file mode 100644 index 9207cb0a1c5f..000000000000 --- a/changelog.d/17931.doc +++ /dev/null @@ -1 +0,0 @@ -Add documentation about backing up Synapse. diff --git a/changelog.d/17932.misc b/changelog.d/17932.misc deleted file mode 100644 index 2401c4cf213c..000000000000 --- a/changelog.d/17932.misc +++ /dev/null @@ -1 +0,0 @@ -Support new package name of PyPI package `python-multipart` 0.0.13 so that distro packagers do not need to work around name conflict with PyPI package `multipart`. diff --git a/changelog.d/17934.feature b/changelog.d/17934.feature deleted file mode 100644 index f0e138a30ff3..000000000000 --- a/changelog.d/17934.feature +++ /dev/null @@ -1 +0,0 @@ -Add a one-off task to delete old one-time-keys, to guard against us having old OTKs in the database that the client has long forgotten about. diff --git a/changelog.d/17946.misc b/changelog.d/17946.misc deleted file mode 100644 index 3520a75f58d1..000000000000 --- a/changelog.d/17946.misc +++ /dev/null @@ -1 +0,0 @@ -Speed up slow initial sliding syncs on large servers. diff --git a/debian/changelog b/debian/changelog index bacd453cb4f4..d7cec3fa8a8e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.120.0~rc1) stable; urgency=medium + + * New Synapse release 1.120.0rc1. + + -- Synapse Packaging team Wed, 20 Nov 2024 15:02:21 +0000 + matrix-synapse-py3 (1.119.0) stable; urgency=medium * New Synapse release 1.119.0. diff --git a/pyproject.toml b/pyproject.toml index 3688aececf37..f0a4b682de16 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.119.0" +version = "1.120.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From f73edbe4d216be2279caed3909736d1b8906e429 Mon Sep 17 00:00:00 2001 From: Will Hunt Date: Wed, 20 Nov 2024 16:35:43 +0000 Subject: [PATCH 107/147] Add encrypted appservice extensions to Complement test image. (#17945) --- changelog.d/17945.misc | 1 + docker/complement/conf/workers-shared-extra.yaml.j2 | 10 ++++++++++ 2 files changed, 11 insertions(+) create mode 100644 changelog.d/17945.misc diff --git a/changelog.d/17945.misc b/changelog.d/17945.misc new file mode 100644 index 000000000000..eeebb921699c --- /dev/null +++ b/changelog.d/17945.misc @@ -0,0 +1 @@ +Enable encrypted appservice related experimental features in the complement docker image. diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2 index b9334cc53bb6..9a74c617bc4d 100644 --- a/docker/complement/conf/workers-shared-extra.yaml.j2 +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -104,6 +104,16 @@ experimental_features: msc3967_enabled: true # Expose a room summary for public rooms msc3266_enabled: true + # Send to-device messages to application services + msc2409_to_device_messages_enabled: true + # Allow application services to masquerade devices + msc3202_device_masquerading: true + # Sending device list changes, one-time key counts and fallback key usage to application services + msc3202_transaction_extensions: true + # Proxy OTK claim requests to exclusive ASes + msc3983_appservice_otk_claims: true + # Proxy key queries to exclusive ASes + msc3984_appservice_key_query: true server_notices: system_mxid_localpart: _server From 0202e5f210a8356900dccf10fcbfa4688d8b2f99 Mon Sep 17 00:00:00 2001 From: Olivier 'reivilibre Date: Wed, 20 Nov 2024 16:45:54 +0000 Subject: [PATCH 108/147] Tweak changelog --- CHANGES.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 98fba52bae67..31d9914b39fa 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -29,8 +29,8 @@ See the [upgrade notes](https://element-hq.github.io/synapse/v1.120/upgrade.html - Addressed some typos in docs and returned error message for unknown MXC ID. ([\#17865](https://github.com/element-hq/synapse/issues/17865)) - Unpin the upload release GHA action. ([\#17923](https://github.com/element-hq/synapse/issues/17923)) -- Bump macos version used to build wheels during release, as current version used is end-of-life. ([\#17924](https://github.com/element-hq/synapse/issues/17924)) -- Move server event filtering logic to rust. ([\#17928](https://github.com/element-hq/synapse/issues/17928)) +- Bump macOS version used to build wheels during release, as current version used is end-of-life. ([\#17924](https://github.com/element-hq/synapse/issues/17924)) +- Move server event filtering logic to Rust. ([\#17928](https://github.com/element-hq/synapse/issues/17928)) - Support new package name of PyPI package `python-multipart` 0.0.13 so that distro packagers do not need to work around name conflict with PyPI package `multipart`. ([\#17932](https://github.com/element-hq/synapse/issues/17932)) - Speed up slow initial sliding syncs on large servers. ([\#17946](https://github.com/element-hq/synapse/issues/17946)) From 84ec15c47edc3489ee5f985af3f290b0f1ad021e Mon Sep 17 00:00:00 2001 From: V02460 Date: Wed, 20 Nov 2024 17:49:21 +0100 Subject: [PATCH 109/147] Raise setuptools_rust version cap to 1.10.2 (#17944) --- changelog.d/17944.misc | 1 + pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17944.misc diff --git a/changelog.d/17944.misc b/changelog.d/17944.misc new file mode 100644 index 000000000000..a8a645103f49 --- /dev/null +++ b/changelog.d/17944.misc @@ -0,0 +1 @@ +Raise setuptools_rust version cap to 1.10.2. \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 3688aececf37..4c51cf340d99 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -370,7 +370,7 @@ tomli = ">=1.2.3" # runtime errors caused by build system changes. # We are happy to raise these upper bounds upon request, # provided we check that it's safe to do so (i.e. that CI passes). -requires = ["poetry-core>=1.1.0,<=1.9.1", "setuptools_rust>=1.3,<=1.8.1"] +requires = ["poetry-core>=1.1.0,<=1.9.1", "setuptools_rust>=1.3,<=1.10.2"] build-backend = "poetry.core.masonry.api" From 81b080f7a29dd351b4fa10037213b81a0faafaa2 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 20 Nov 2024 16:52:19 +0000 Subject: [PATCH 110/147] Bump serde_json from 1.0.132 to 1.0.133 (#17939) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 46c930ebd750..5c8f627fd7c2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -505,9 +505,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.132" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d726bfaff4b320266d395898905d0eba0345aae23b54aee3a737e260fd46db03" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "itoa", "memchr", From 79c02cada0dc1b1ec0920789df4d633704531287 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 20 Nov 2024 17:12:17 +0000 Subject: [PATCH 111/147] Fix incorrect comment in new schema delta (#17936) Added in #17912, was a bad copy and paste. --- changelog.d/17936.misc | 1 + .../schema/main/delta/88/04_current_state_delta_index.sql | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17936.misc diff --git a/changelog.d/17936.misc b/changelog.d/17936.misc new file mode 100644 index 000000000000..91d976fbd9ca --- /dev/null +++ b/changelog.d/17936.misc @@ -0,0 +1 @@ +Fix incorrect comment in new schema delta. diff --git a/synapse/storage/schema/main/delta/88/04_current_state_delta_index.sql b/synapse/storage/schema/main/delta/88/04_current_state_delta_index.sql index ad54302a8f9f..0ee78df1a0a1 100644 --- a/synapse/storage/schema/main/delta/88/04_current_state_delta_index.sql +++ b/synapse/storage/schema/main/delta/88/04_current_state_delta_index.sql @@ -12,7 +12,7 @@ -- . --- Add an index on (user_id, device_id, algorithm, ts_added_ms) on e2e_one_time_keys_json, so that OTKs can --- efficiently be issued in the same order they were uploaded. +-- Add an index on `current_state_delta_stream(room_id, stream_id)` to allow +-- efficient per-room lookups. INSERT INTO background_updates (ordering, update_name, progress_json) VALUES (8804, 'current_state_delta_stream_room_index', '{}'); From 80e39fd834baeb30b2fa21b9df5da762dfbccb73 Mon Sep 17 00:00:00 2001 From: Valentin Iovene Date: Wed, 20 Nov 2024 23:06:08 +0100 Subject: [PATCH 112/147] Add Forgejo oidc provider config example (#17872) --- changelog.d/17872.doc | 1 + docs/openid.md | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 changelog.d/17872.doc diff --git a/changelog.d/17872.doc b/changelog.d/17872.doc new file mode 100644 index 000000000000..7f8b2d349535 --- /dev/null +++ b/changelog.d/17872.doc @@ -0,0 +1 @@ +Add OIDC example configuration for Forgejo (fork of Gitea). diff --git a/docs/openid.md b/docs/openid.md index 7a10b1615b82..5a3d7e9fba65 100644 --- a/docs/openid.md +++ b/docs/openid.md @@ -336,6 +336,36 @@ but it has a `response_types_supported` which excludes "code" (which we rely on, is even mentioned in their [documentation](https://developers.facebook.com/docs/facebook-login/manually-build-a-login-flow#login)), so we have to disable discovery and configure the URIs manually. +### Forgejo + +Forgejo is a fork of Gitea that can act as an OAuth2 provider. + +The implementation of OAuth2 is improved compared to Gitea, as it provides a correctly defined `subject_claim` and `scopes`. + +Synapse config: + +```yaml +oidc_providers: + - idp_id: forgejo + idp_name: Forgejo + discover: false + issuer: "https://your-forgejo.com/" + client_id: "your-client-id" # TO BE FILLED + client_secret: "your-client-secret" # TO BE FILLED + client_auth_method: client_secret_post + scopes: ["openid", "profile", "email", "groups"] + authorization_endpoint: "https://your-forgejo.com/login/oauth/authorize" + token_endpoint: "https://your-forgejo.com/login/oauth/access_token" + userinfo_endpoint: "https://your-forgejo.com/api/v1/user" + user_mapping_provider: + config: + subject_claim: "sub" + picture_claim: "picture" + localpart_template: "{{ user.preferred_username }}" + display_name_template: "{{ user.name }}" + email_template: "{{ user.email }}" +``` + ### GitHub [GitHub][github-idp] is a bit special as it is not an OpenID Connect compliant provider, but From 4c67d20af73c11010738c43d56dffe4762400e50 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Fri, 22 Nov 2024 12:35:03 +0000 Subject: [PATCH 113/147] link to element-docker-demo from contrib/docker* (#17953) --- changelog.d/17953.doc | 1 + contrib/docker/README.md | 3 +++ contrib/docker_compose_workers/README.md | 3 +++ 3 files changed, 7 insertions(+) create mode 100644 changelog.d/17953.doc diff --git a/changelog.d/17953.doc b/changelog.d/17953.doc new file mode 100644 index 000000000000..10f5a27ba9fd --- /dev/null +++ b/changelog.d/17953.doc @@ -0,0 +1 @@ +Link to element-docker-demo from contrib/docker*. diff --git a/contrib/docker/README.md b/contrib/docker/README.md index 89c1518bd0b2..fdfa96795a17 100644 --- a/contrib/docker/README.md +++ b/contrib/docker/README.md @@ -30,3 +30,6 @@ docker-compose up -d ### More information For more information on required environment variables and mounts, see the main docker documentation at [/docker/README.md](../../docker/README.md) + +**For a more comprehensive Docker Compose example showcasing a full Matrix 2.0 stack, please see +https://github.com/element-hq/element-docker-demo** \ No newline at end of file diff --git a/contrib/docker_compose_workers/README.md b/contrib/docker_compose_workers/README.md index 81518f6ba129..16c8c26795da 100644 --- a/contrib/docker_compose_workers/README.md +++ b/contrib/docker_compose_workers/README.md @@ -8,6 +8,9 @@ All examples and snippets assume that your Synapse service is called `synapse` i An example Docker Compose file can be found [here](docker-compose.yaml). +**For a more comprehensive Docker Compose example, showcasing a full Matrix 2.0 stack (originally based on this +docker-compose.yaml), please see https://github.com/element-hq/element-docker-demo** + ## Worker Service Examples in Docker Compose In order to start the Synapse container as a worker, you must specify an `entrypoint` that loads both the `homeserver.yaml` and the configuration for the worker (`synapse-generic-worker-1.yaml` in the example below). You must also include the worker type in the environment variable `SYNAPSE_WORKER` or alternatively pass `-m synapse.app.generic_worker` as part of the `entrypoint` after `"/start.py", "run"`). From 4587decd678300217969f1d2f69b226421a33ced Mon Sep 17 00:00:00 2001 From: Shay Date: Fri, 22 Nov 2024 04:37:19 -0800 Subject: [PATCH 114/147] Return suspended status when querying user account (#17952) --- changelog.d/17952.misc | 1 + docs/admin_api/user_admin_api.md | 3 ++- synapse/handlers/admin.py | 1 + tests/rest/admin/test_user.py | 1 + 4 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17952.misc diff --git a/changelog.d/17952.misc b/changelog.d/17952.misc new file mode 100644 index 000000000000..84fc8bfc2906 --- /dev/null +++ b/changelog.d/17952.misc @@ -0,0 +1 @@ +Return whether the user is suspended when querying the user account in the Admin API. \ No newline at end of file diff --git a/docs/admin_api/user_admin_api.md b/docs/admin_api/user_admin_api.md index 96a2994b7b4f..a6e2e0a1537d 100644 --- a/docs/admin_api/user_admin_api.md +++ b/docs/admin_api/user_admin_api.md @@ -55,7 +55,8 @@ It returns a JSON body like the following: } ], "user_type": null, - "locked": false + "locked": false, + "suspended": false } ``` diff --git a/synapse/handlers/admin.py b/synapse/handlers/admin.py index d1989e9d2c60..d1194545aeb6 100644 --- a/synapse/handlers/admin.py +++ b/synapse/handlers/admin.py @@ -124,6 +124,7 @@ async def get_user(self, user: UserID) -> Optional[JsonMapping]: "consent_ts": user_info.consent_ts, "user_type": user_info.user_type, "is_guest": user_info.is_guest, + "suspended": user_info.suspended, } if self._msc3866_enabled: diff --git a/tests/rest/admin/test_user.py b/tests/rest/admin/test_user.py index 6d050e778403..fdb8fafa0e84 100644 --- a/tests/rest/admin/test_user.py +++ b/tests/rest/admin/test_user.py @@ -3222,6 +3222,7 @@ def _check_fields(self, content: JsonDict) -> None: self.assertIn("consent_ts", content) self.assertIn("external_ids", content) self.assertIn("last_seen_ts", content) + self.assertIn("suspended", content) # This key was removed intentionally. Ensure it is not accidentally re-included. self.assertNotIn("password_hash", content) From 93cc9550519fd71a4929e23224ab9b4d2738705c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 14:23:32 +0000 Subject: [PATCH 115/147] Bump tornado from 6.4.1 to 6.4.2 (#17955) --- poetry.lock | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/poetry.lock b/poetry.lock index eece22109560..cbabacea57ff 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2526,22 +2526,22 @@ files = [ [[package]] name = "tornado" -version = "6.4.1" +version = "6.4.2" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -optional = true +optional = false python-versions = ">=3.8" files = [ - {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:163b0aafc8e23d8cdc3c9dfb24c5368af84a81e3364745ccb4427669bf84aec8"}, - {file = "tornado-6.4.1-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:6d5ce3437e18a2b66fbadb183c1d3364fb03f2be71299e7d10dbeeb69f4b2a14"}, - {file = "tornado-6.4.1-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2e20b9113cd7293f164dc46fffb13535266e713cdb87bd2d15ddb336e96cfc4"}, - {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ae50a504a740365267b2a8d1a90c9fbc86b780a39170feca9bcc1787ff80842"}, - {file = "tornado-6.4.1-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:613bf4ddf5c7a95509218b149b555621497a6cc0d46ac341b30bd9ec19eac7f3"}, - {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:25486eb223babe3eed4b8aecbac33b37e3dd6d776bc730ca14e1bf93888b979f"}, - {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:454db8a7ecfcf2ff6042dde58404164d969b6f5d58b926da15e6b23817950fc4"}, - {file = "tornado-6.4.1-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a02a08cc7a9314b006f653ce40483b9b3c12cda222d6a46d4ac63bb6c9057698"}, - {file = "tornado-6.4.1-cp38-abi3-win32.whl", hash = "sha256:d9a566c40b89757c9aa8e6f032bcdb8ca8795d7c1a9762910c722b1635c9de4d"}, - {file = "tornado-6.4.1-cp38-abi3-win_amd64.whl", hash = "sha256:b24b8982ed444378d7f21d563f4180a2de31ced9d8d84443907a0a64da2072e7"}, - {file = "tornado-6.4.1.tar.gz", hash = "sha256:92d3ab53183d8c50f8204a51e6f91d18a15d5ef261e84d452800d4ff6fc504e9"}, + {file = "tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1"}, + {file = "tornado-6.4.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:072ce12ada169c5b00b7d92a99ba089447ccc993ea2143c9ede887e0937aa803"}, + {file = "tornado-6.4.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a017d239bd1bb0919f72af256a970624241f070496635784d9bf0db640d3fec"}, + {file = "tornado-6.4.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c36e62ce8f63409301537222faffcef7dfc5284f27eec227389f2ad11b09d946"}, + {file = "tornado-6.4.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bca9eb02196e789c9cb5c3c7c0f04fb447dc2adffd95265b2c7223a8a615ccbf"}, + {file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:304463bd0772442ff4d0f5149c6f1c2135a1fae045adf070821c6cdc76980634"}, + {file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_i686.whl", hash = "sha256:c82c46813ba483a385ab2a99caeaedf92585a1f90defb5693351fa7e4ea0bf73"}, + {file = "tornado-6.4.2-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:932d195ca9015956fa502c6b56af9eb06106140d844a335590c1ec7f5277d10c"}, + {file = "tornado-6.4.2-cp38-abi3-win32.whl", hash = "sha256:2876cef82e6c5978fde1e0d5b1f919d756968d5b4282418f3146b79b58556482"}, + {file = "tornado-6.4.2-cp38-abi3-win_amd64.whl", hash = "sha256:908b71bf3ff37d81073356a5fadcc660eb10c1476ee6e2725588626ce7e5ca38"}, + {file = "tornado-6.4.2.tar.gz", hash = "sha256:92bad5b4746e9879fd7bf1eb21dce4e3fc5128d71601f80005afa39237ad620b"}, ] [[package]] From 3943d2fde746b3adacffa25794dbf4acea249870 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 25 Nov 2024 18:12:33 +0000 Subject: [PATCH 116/147] Fix up logic for delaying sending read receipts over federation. (#17933) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For context of why we delay read receipts, see https://github.com/matrix-org/synapse/issues/4730. Element Web often sends read receipts in quick succession, if it reloads the timeline it'll send one for the last message in the old timeline and again for the last message in the new timeline. This caused remote users to see a read receipt for older messages come through quickly, but then the second read receipt taking a while to arrive for the most recent message. There are two things going on in this PR: 1. There was a mismatch between seconds and milliseconds, and so we ended up delaying for far longer than intended. 2. Changing the logic to reuse the `DestinationWakeupQueue` (used for presence) The changes in logic are: - Treat the first receipt and subsequent receipts in a room in the same way - Whitelist certain classes of receipts to never delay being sent, i.e. receipts in small rooms, receipts for events that were sent within the last 60s, and sending receipts to the event sender's server. - The maximum delay a receipt can have before being sent to a server is 30s, and we'll send out receipts to remotes at least at 50Hz (by default) The upshot is that this should make receipts feel more snappy over federation. This new logic should send roughly between 10%–20% of transactions immediately on matrix.org. --- changelog.d/17933.bugfix | 1 + synapse/federation/sender/__init__.py | 165 ++++++++++-------- .../sender/per_destination_queue.py | 25 +-- synapse/storage/databases/main/cache.py | 2 + .../storage/databases/main/events_worker.py | 27 +++ tests/federation/test_federation_sender.py | 134 ++++++++++---- 6 files changed, 220 insertions(+), 134 deletions(-) create mode 100644 changelog.d/17933.bugfix diff --git a/changelog.d/17933.bugfix b/changelog.d/17933.bugfix new file mode 100644 index 000000000000..8d30ac587eba --- /dev/null +++ b/changelog.d/17933.bugfix @@ -0,0 +1 @@ +Fix long-standing bug where read receipts could get overly delayed being sent over federation. diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 188848088153..17cddf18a382 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -140,7 +140,6 @@ Iterable, List, Optional, - Set, Tuple, ) @@ -170,7 +169,13 @@ run_as_background_process, wrap_as_background_process, ) -from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection +from synapse.types import ( + JsonDict, + ReadReceipt, + RoomStreamToken, + StrCollection, + get_domain_from_id, +) from synapse.util import Clock from synapse.util.metrics import Measure from synapse.util.retryutils import filter_destinations_by_retry_limiter @@ -297,12 +302,10 @@ class _DestinationWakeupQueue: # being woken up. _MAX_TIME_IN_QUEUE = 30.0 - # The maximum duration in seconds between waking up consecutive destination - # queues. - _MAX_DELAY = 0.1 - sender: "FederationSender" = attr.ib() clock: Clock = attr.ib() + max_delay_s: int = attr.ib() + queue: "OrderedDict[str, Literal[None]]" = attr.ib(factory=OrderedDict) processing: bool = attr.ib(default=False) @@ -332,7 +335,7 @@ async def _handle(self) -> None: # We also add an upper bound to the delay, to gracefully handle the # case where the queue only has a few entries in it. current_sleep_seconds = min( - self._MAX_DELAY, self._MAX_TIME_IN_QUEUE / len(self.queue) + self.max_delay_s, self._MAX_TIME_IN_QUEUE / len(self.queue) ) while self.queue: @@ -416,19 +419,14 @@ def __init__(self, hs: "HomeServer"): self._is_processing = False self._last_poked_id = -1 - # map from room_id to a set of PerDestinationQueues which we believe are - # awaiting a call to flush_read_receipts_for_room. The presence of an entry - # here for a given room means that we are rate-limiting RR flushes to that room, - # and that there is a pending call to _flush_rrs_for_room in the system. - self._queues_awaiting_rr_flush_by_room: Dict[str, Set[PerDestinationQueue]] = {} + self._external_cache = hs.get_external_cache() - self._rr_txn_interval_per_room_ms = ( - 1000.0 - / hs.config.ratelimiting.federation_rr_transactions_per_room_per_second + rr_txn_interval_per_room_s = ( + 1.0 / hs.config.ratelimiting.federation_rr_transactions_per_room_per_second + ) + self._destination_wakeup_queue = _DestinationWakeupQueue( + self, self.clock, max_delay_s=rr_txn_interval_per_room_s ) - - self._external_cache = hs.get_external_cache() - self._destination_wakeup_queue = _DestinationWakeupQueue(self, self.clock) # Regularly wake up destinations that have outstanding PDUs to be caught up self.clock.looping_call_now( @@ -745,37 +743,48 @@ async def send_read_receipt(self, receipt: ReadReceipt) -> None: # Some background on the rate-limiting going on here. # - # It turns out that if we attempt to send out RRs as soon as we get them from - # a client, then we end up trying to do several hundred Hz of federation - # transactions. (The number of transactions scales as O(N^2) on the size of a - # room, since in a large room we have both more RRs coming in, and more servers - # to send them to.) + # It turns out that if we attempt to send out RRs as soon as we get them + # from a client, then we end up trying to do several hundred Hz of + # federation transactions. (The number of transactions scales as O(N^2) + # on the size of a room, since in a large room we have both more RRs + # coming in, and more servers to send them to.) # - # This leads to a lot of CPU load, and we end up getting behind. The solution - # currently adopted is as follows: + # This leads to a lot of CPU load, and we end up getting behind. The + # solution currently adopted is to differentiate between receipts and + # destinations we should immediately send to, and those we can trickle + # the receipts to. # - # The first receipt in a given room is sent out immediately, at time T0. Any - # further receipts are, in theory, batched up for N seconds, where N is calculated - # based on the number of servers in the room to achieve a transaction frequency - # of around 50Hz. So, for example, if there were 100 servers in the room, then - # N would be 100 / 50Hz = 2 seconds. + # The current logic is to send receipts out immediately if: + # - the room is "small", i.e. there's only N servers to send receipts + # to, and so sending out the receipts immediately doesn't cause too + # much load; or + # - the receipt is for an event that happened recently, as users + # notice if receipts are delayed when they know other users are + # currently reading the room; or + # - the receipt is being sent to the server that sent the event, so + # that users see receipts for their own receipts quickly. # - # Then, after T+N, we flush out any receipts that have accumulated, and restart - # the timer to flush out more receipts at T+2N, etc. If no receipts accumulate, - # we stop the cycle and go back to the start. + # For destinations that we should delay sending the receipt to, we queue + # the receipts up to be sent in the next transaction, but don't trigger + # a new transaction to be sent. We then add the destination to the + # `DestinationWakeupQueue`, which will slowly iterate over each + # destination and trigger a new transaction to be sent. # - # However, in practice, it is often possible to flush out receipts earlier: in - # particular, if we are sending a transaction to a given server anyway (for - # example, because we have a PDU or a RR in another room to send), then we may - # as well send out all of the pending RRs for that server. So it may be that - # by the time we get to T+N, we don't actually have any RRs left to send out. - # Nevertheless we continue to buffer up RRs for the room in question until we - # reach the point that no RRs arrive between timer ticks. + # However, in practice, it is often possible to send out delayed + # receipts earlier: in particular, if we are sending a transaction to a + # given server anyway (for example, because we have a PDU or a RR in + # another room to send), then we may as well send out all of the pending + # RRs for that server. So it may be that by the time we get to waking up + # the destination, we don't actually have any RRs left to send out. # - # For even more background, see https://github.com/matrix-org/synapse/issues/4730. + # For even more background, see + # https://github.com/matrix-org/synapse/issues/4730. room_id = receipt.room_id + # Local read receipts always have 1 event ID. + event_id = receipt.event_ids[0] + # Work out which remote servers should be poked and poke them. domains_set = await self._storage_controllers.state.get_current_hosts_in_room_or_partial_state_approximation( room_id @@ -797,49 +806,51 @@ async def send_read_receipt(self, receipt: ReadReceipt) -> None: if not domains: return - queues_pending_flush = self._queues_awaiting_rr_flush_by_room.get(room_id) + # We now split which domains we want to wake up immediately vs which we + # want to delay waking up. + immediate_domains: StrCollection + delay_domains: StrCollection - # if there is no flush yet scheduled, we will send out these receipts with - # immediate flushes, and schedule the next flush for this room. - if queues_pending_flush is not None: - logger.debug("Queuing receipt for: %r", domains) + if len(domains) < 10: + # For "small" rooms send to all domains immediately + immediate_domains = domains + delay_domains = () else: - logger.debug("Sending receipt to: %r", domains) - self._schedule_rr_flush_for_room(room_id, len(domains)) + metadata = await self.store.get_metadata_for_event( + receipt.room_id, event_id + ) + assert metadata is not None - for domain in domains: - queue = self._get_per_destination_queue(domain) - queue.queue_read_receipt(receipt) + sender_domain = get_domain_from_id(metadata.sender) - # if there is already a RR flush pending for this room, then make sure this - # destination is registered for the flush - if queues_pending_flush is not None: - queues_pending_flush.add(queue) + if self.clock.time_msec() - metadata.received_ts < 60_000: + # We always send receipts for recent messages immediately + immediate_domains = domains + delay_domains = () else: - queue.flush_read_receipts_for_room(room_id) - - def _schedule_rr_flush_for_room(self, room_id: str, n_domains: int) -> None: - # that is going to cause approximately len(domains) transactions, so now back - # off for that multiplied by RR_TXN_INTERVAL_PER_ROOM - backoff_ms = self._rr_txn_interval_per_room_ms * n_domains - - logger.debug("Scheduling RR flush in %s in %d ms", room_id, backoff_ms) - self.clock.call_later(backoff_ms, self._flush_rrs_for_room, room_id) - self._queues_awaiting_rr_flush_by_room[room_id] = set() - - def _flush_rrs_for_room(self, room_id: str) -> None: - queues = self._queues_awaiting_rr_flush_by_room.pop(room_id) - logger.debug("Flushing RRs in %s to %s", room_id, queues) - - if not queues: - # no more RRs arrived for this room; we are done. - return + # Otherwise, we delay waking up all destinations except for the + # sender's domain. + immediate_domains = [] + delay_domains = [] + for domain in domains: + if domain == sender_domain: + immediate_domains.append(domain) + else: + delay_domains.append(domain) + + for domain in immediate_domains: + # Add to destination queue and wake the destination up + queue = self._get_per_destination_queue(domain) + queue.queue_read_receipt(receipt) + queue.attempt_new_transaction() - # schedule the next flush - self._schedule_rr_flush_for_room(room_id, len(queues)) + for domain in delay_domains: + # Add to destination queue... + queue = self._get_per_destination_queue(domain) + queue.queue_read_receipt(receipt) - for queue in queues: - queue.flush_read_receipts_for_room(room_id) + # ... and schedule the destination to be woken up. + self._destination_wakeup_queue.add_to_queue(domain) async def send_presence_to_destinations( self, states: Iterable[UserPresenceState], destinations: Iterable[str] diff --git a/synapse/federation/sender/per_destination_queue.py b/synapse/federation/sender/per_destination_queue.py index d097e65ea745..b3f65e8237e7 100644 --- a/synapse/federation/sender/per_destination_queue.py +++ b/synapse/federation/sender/per_destination_queue.py @@ -156,7 +156,6 @@ def __init__( # Each receipt can only have a single receipt per # (room ID, receipt type, user ID, thread ID) tuple. self._pending_receipt_edus: List[Dict[str, Dict[str, Dict[str, dict]]]] = [] - self._rrs_pending_flush = False # stream_id of last successfully sent to-device message. # NB: may be a long or an int. @@ -258,15 +257,7 @@ def queue_read_receipt(self, receipt: ReadReceipt) -> None: } ) - def flush_read_receipts_for_room(self, room_id: str) -> None: - # If there are any pending receipts for this room then force-flush them - # in a new transaction. - for edu in self._pending_receipt_edus: - if room_id in edu: - self._rrs_pending_flush = True - self.attempt_new_transaction() - # No use in checking remaining EDUs if the room was found. - break + self.mark_new_data() def send_keyed_edu(self, edu: Edu, key: Hashable) -> None: self._pending_edus_keyed[(edu.edu_type, key)] = edu @@ -603,12 +594,9 @@ async def _catch_up_transmission_loop(self) -> None: self._destination, last_successful_stream_ordering ) - def _get_receipt_edus(self, force_flush: bool, limit: int) -> Iterable[Edu]: + def _get_receipt_edus(self, limit: int) -> Iterable[Edu]: if not self._pending_receipt_edus: return - if not force_flush and not self._rrs_pending_flush: - # not yet time for this lot - return # Send at most limit EDUs for receipts. for content in self._pending_receipt_edus[:limit]: @@ -747,7 +735,7 @@ async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]: ) # Add read receipt EDUs. - pending_edus.extend(self.queue._get_receipt_edus(force_flush=False, limit=5)) + pending_edus.extend(self.queue._get_receipt_edus(limit=5)) edu_limit = MAX_EDUS_PER_TRANSACTION - len(pending_edus) # Next, prioritize to-device messages so that existing encryption channels @@ -795,13 +783,6 @@ async def __aenter__(self) -> Tuple[List[EventBase], List[Edu]]: if not self._pdus and not pending_edus: return [], [] - # if we've decided to send a transaction anyway, and we have room, we - # may as well send any pending RRs - if edu_limit: - pending_edus.extend( - self.queue._get_receipt_edus(force_flush=True, limit=edu_limit) - ) - if self._pdus: self._last_stream_ordering = self._pdus[ -1 diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index 32c3472e585d..707d18de78a3 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -322,6 +322,7 @@ def _invalidate_caches_for_event( self._attempt_to_invalidate_cache( "get_unread_event_push_actions_by_room_for_user", (room_id,) ) + self._attempt_to_invalidate_cache("get_metadata_for_event", (room_id, event_id)) self._attempt_to_invalidate_cache("_get_max_event_pos", (room_id,)) @@ -446,6 +447,7 @@ def _invalidate_caches_for_room_events(self, room_id: str) -> None: self._attempt_to_invalidate_cache("_get_state_group_for_event", None) self._attempt_to_invalidate_cache("get_event_ordering", None) + self._attempt_to_invalidate_cache("get_metadata_for_event", (room_id,)) self._attempt_to_invalidate_cache("is_partial_state_event", None) self._attempt_to_invalidate_cache("_get_joined_profile_from_event_id", None) diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 403407068c5f..825fd00993a6 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -193,6 +193,14 @@ class _EventRow: outlier: bool +@attr.s(slots=True, frozen=True, auto_attribs=True) +class EventMetadata: + """Event metadata returned by `get_metadata_for_event(..)`""" + + sender: str + received_ts: int + + class EventRedactBehaviour(Enum): """ What to do when retrieving a redacted event from the database. @@ -2580,3 +2588,22 @@ async def have_finished_sliding_sync_background_jobs(self) -> bool: _BackgroundUpdates.SLIDING_SYNC_MEMBERSHIP_SNAPSHOTS_BG_UPDATE, ) ) + + @cached(tree=True) + async def get_metadata_for_event( + self, room_id: str, event_id: str + ) -> Optional[EventMetadata]: + row = await self.db_pool.simple_select_one( + table="events", + keyvalues={"room_id": room_id, "event_id": event_id}, + retcols=("sender", "received_ts"), + allow_none=True, + desc="get_metadata_for_event", + ) + if row is None: + return None + + return EventMetadata( + sender=row[0], + received_ts=row[1], + ) diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index 6a8887fe7443..cd906bbbc786 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -34,6 +34,7 @@ from synapse.rest import admin from synapse.rest.client import login from synapse.server import HomeServer +from synapse.storage.databases.main.events_worker import EventMetadata from synapse.types import JsonDict, ReadReceipt from synapse.util import Clock @@ -55,12 +56,15 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: federation_transport_client=self.federation_transport_client, ) - hs.get_storage_controllers().state.get_current_hosts_in_room = AsyncMock( # type: ignore[method-assign] + self.main_store = hs.get_datastores().main + self.state_controller = hs.get_storage_controllers().state + + self.state_controller.get_current_hosts_in_room = AsyncMock( # type: ignore[method-assign] return_value={"test", "host2"} ) - hs.get_storage_controllers().state.get_current_hosts_in_room_or_partial_state_approximation = ( # type: ignore[method-assign] - hs.get_storage_controllers().state.get_current_hosts_in_room + self.state_controller.get_current_hosts_in_room_or_partial_state_approximation = ( # type: ignore[method-assign] + self.state_controller.get_current_hosts_in_room ) return hs @@ -185,12 +189,15 @@ def test_send_receipts_thread(self) -> None: ], ) - def test_send_receipts_with_backoff(self) -> None: - """Send two receipts in quick succession; the second should be flushed, but - only after 20ms""" + def test_send_receipts_with_backoff_small_room(self) -> None: + """Read receipt in small rooms should not be delayed""" mock_send_transaction = self.federation_transport_client.send_transaction mock_send_transaction.return_value = {} + self.state_controller.get_current_hosts_in_room_or_partial_state_approximation = AsyncMock( # type: ignore[method-assign] + return_value={"test", "host2"} + ) + sender = self.hs.get_federation_sender() receipt = ReadReceipt( "room_id", @@ -206,47 +213,104 @@ def test_send_receipts_with_backoff(self) -> None: # expect a call to send_transaction mock_send_transaction.assert_called_once() - json_cb = mock_send_transaction.call_args[0][1] - data = json_cb() - self.assertEqual( - data["edus"], - [ - { - "edu_type": EduTypes.RECEIPT, - "content": { - "room_id": { - "m.read": { - "user_id": { - "event_ids": ["event_id"], - "data": {"ts": 1234}, - } - } - } - }, - } - ], + self._assert_edu_in_call(mock_send_transaction.call_args[0][1]) + + def test_send_receipts_with_backoff_recent_event(self) -> None: + """Read receipt for a recent message should not be delayed""" + mock_send_transaction = self.federation_transport_client.send_transaction + mock_send_transaction.return_value = {} + + # Pretend this is a big room + self.state_controller.get_current_hosts_in_room_or_partial_state_approximation = AsyncMock( # type: ignore[method-assign] + return_value={"test"} | {f"host{i}" for i in range(20)} ) + + self.main_store.get_metadata_for_event = AsyncMock( + return_value=EventMetadata( + received_ts=self.clock.time_msec(), + sender="@test:test", + ) + ) + + sender = self.hs.get_federation_sender() + receipt = ReadReceipt( + "room_id", + "m.read", + "user_id", + ["event_id"], + thread_id=None, + data={"ts": 1234}, + ) + self.get_success(sender.send_read_receipt(receipt)) + + self.pump() + + # expect a call to send_transaction for each host + self.assertEqual(mock_send_transaction.call_count, 20) + self._assert_edu_in_call(mock_send_transaction.call_args.args[1]) + mock_send_transaction.reset_mock() - # send the second RR + def test_send_receipts_with_backoff_sender(self) -> None: + """Read receipt for a message should not be delayed to the sender, but + is delayed to everyone else""" + mock_send_transaction = self.federation_transport_client.send_transaction + mock_send_transaction.return_value = {} + + # Pretend this is a big room + self.state_controller.get_current_hosts_in_room_or_partial_state_approximation = AsyncMock( # type: ignore[method-assign] + return_value={"test"} | {f"host{i}" for i in range(20)} + ) + + self.main_store.get_metadata_for_event = AsyncMock( + return_value=EventMetadata( + received_ts=self.clock.time_msec() - 5 * 60_000, + sender="@test:host1", + ) + ) + + sender = self.hs.get_federation_sender() receipt = ReadReceipt( "room_id", "m.read", "user_id", - ["other_id"], + ["event_id"], thread_id=None, data={"ts": 1234}, ) - self.successResultOf(defer.ensureDeferred(sender.send_read_receipt(receipt))) + self.get_success(sender.send_read_receipt(receipt)) + self.pump() - mock_send_transaction.assert_not_called() - self.reactor.advance(19) - mock_send_transaction.assert_not_called() + # First, expect a call to send_transaction for the sending host + mock_send_transaction.assert_called() - self.reactor.advance(10) - mock_send_transaction.assert_called_once() - json_cb = mock_send_transaction.call_args[0][1] + transaction = mock_send_transaction.call_args_list[0].args[0] + self.assertEqual(transaction.destination, "host1") + self._assert_edu_in_call(mock_send_transaction.call_args_list[0].args[1]) + + # We also expect a call to one of the other hosts, as the first + # destination to wake up. + self.assertEqual(mock_send_transaction.call_count, 2) + self._assert_edu_in_call(mock_send_transaction.call_args.args[1]) + + mock_send_transaction.reset_mock() + + # We now expect to see 18 more transactions to the remaining hosts + # periodically. + for _ in range(18): + self.reactor.advance( + 1.0 + / self.hs.config.ratelimiting.federation_rr_transactions_per_room_per_second + ) + + mock_send_transaction.assert_called_once() + self._assert_edu_in_call(mock_send_transaction.call_args.args[1]) + mock_send_transaction.reset_mock() + + def _assert_edu_in_call(self, json_cb: Callable[[], JsonDict]) -> None: + """Assert that the given `json_cb` from a `send_transaction` has a + receipt in it.""" data = json_cb() self.assertEqual( data["edus"], @@ -257,7 +321,7 @@ def test_send_receipts_with_backoff(self) -> None: "room_id": { "m.read": { "user_id": { - "event_ids": ["other_id"], + "event_ids": ["event_id"], "data": {"ts": 1234}, } } From 02aa7adf4c3f3bf71ddfcd4bd80d6adcf74a444c Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 26 Nov 2024 07:45:18 +0000 Subject: [PATCH 117/147] Fix `delete_old_otks` job on worker deployments (#17960) In a worker-mode deployment, the `E2eKeysHandler` is not necessarily loaded, which means the handler for the `delete_old_otks` task will not be registered. Make sure we load the handler. Introduced in https://github.com/element-hq/synapse/pull/17934 --- changelog.d/17960.bugfix | 1 + synapse/server.py | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog.d/17960.bugfix diff --git a/changelog.d/17960.bugfix b/changelog.d/17960.bugfix new file mode 100644 index 000000000000..767085320dc3 --- /dev/null +++ b/changelog.d/17960.bugfix @@ -0,0 +1 @@ +Fix a bug introduced in Synapse v1.120rc1 which would cause the newly-introduced `delete_old_otks` job to fail in worker-mode deployments. diff --git a/synapse/server.py b/synapse/server.py index c7b491881395..462e15cc2ff6 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -254,6 +254,7 @@ class HomeServer(metaclass=abc.ABCMeta): "auth", "deactivate_account", "delayed_events", + "e2e_keys", # for the `delete_old_otks` scheduled-task handler "message", "pagination", "profile", From cd7d90bd281e235d91411d84e4724d01d849b314 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Nov 2024 09:30:16 +0000 Subject: [PATCH 118/147] Bump tomli from 2.0.2 to 2.1.0 (#17959) --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index cbabacea57ff..14bb9ad1723f 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2515,13 +2515,13 @@ twisted = ["twisted"] [[package]] name = "tomli" -version = "2.0.2" +version = "2.1.0" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" files = [ - {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, - {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, + {file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"}, + {file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"}, ] [[package]] From 8c653e1dd6c8f18f2f9e2d78d37877a70dba1b2d Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 26 Nov 2024 14:11:12 +0100 Subject: [PATCH 119/147] 1.120.0 --- CHANGES.md | 9 +++++++++ changelog.d/17960.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/17960.bugfix diff --git a/CHANGES.md b/CHANGES.md index 31d9914b39fa..0caac3f89e33 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +# Synapse 1.120.0 (2024-11-26) + +### Bugfixes + +- Fix a bug introduced in Synapse v1.120rc1 which would cause the newly-introduced `delete_old_otks` job to fail in worker-mode deployments. ([\#17960](https://github.com/element-hq/synapse/issues/17960)) + + + + # Synapse 1.120.0rc1 (2024-11-20) This release enables the enforcement of authenticated media by default, with exemptions for media that is already present in the diff --git a/changelog.d/17960.bugfix b/changelog.d/17960.bugfix deleted file mode 100644 index 767085320dc3..000000000000 --- a/changelog.d/17960.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug introduced in Synapse v1.120rc1 which would cause the newly-introduced `delete_old_otks` job to fail in worker-mode deployments. diff --git a/debian/changelog b/debian/changelog index d7cec3fa8a8e..bd4466d7aaae 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.120.0) stable; urgency=medium + + * New synapse release 1.120.0. + + -- Synapse Packaging team Tue, 26 Nov 2024 13:10:23 +0000 + matrix-synapse-py3 (1.120.0~rc1) stable; urgency=medium * New Synapse release 1.120.0rc1. diff --git a/pyproject.toml b/pyproject.toml index f0a4b682de16..5fd1d7c19837 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.120.0rc1" +version = "1.120.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From cee9da0da53ba9d04fbb989e2b35108266b2e787 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 26 Nov 2024 19:43:26 +0100 Subject: [PATCH 120/147] MSC4108: Add a Content-Type header on the PUT response (#17253) This is a workaround for some proxy setup, where the ETag header gets stripped from the response headers unless there is a Content-Type header set. In particular, we saw this bug when putting Cloudflare in front of Synapse. I'm pretty sure this is a Cloudflare bug, as this behaviour isn't documented anywhere, and doesn't make sense whatsoever. --------- Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/17253.misc | 1 + rust/src/rendezvous/mod.rs | 7 +++++++ 2 files changed, 8 insertions(+) create mode 100644 changelog.d/17253.misc diff --git a/changelog.d/17253.misc b/changelog.d/17253.misc new file mode 100644 index 000000000000..868691624d01 --- /dev/null +++ b/changelog.d/17253.misc @@ -0,0 +1 @@ +[MSC4108](https://github.com/matrix-org/matrix-spec-proposals/pull/4108): Add a `Content-Type` header on the `PUT` response to work around a faulty behavior in some caching reverse proxies. diff --git a/rust/src/rendezvous/mod.rs b/rust/src/rendezvous/mod.rs index f69f45490f02..55df0203f3fa 100644 --- a/rust/src/rendezvous/mod.rs +++ b/rust/src/rendezvous/mod.rs @@ -288,6 +288,13 @@ impl RendezvousHandler { let mut response = Response::new(Bytes::new()); *response.status_mut() = StatusCode::ACCEPTED; prepare_headers(response.headers_mut(), session); + + // Even though this isn't mandated by the MSC, we set a Content-Type on the response. It + // doesn't do any harm as the body is empty, but this helps escape a bug in some reverse + // proxy/cache setup which strips the ETag header if there is no Content-Type set. + // Specifically, we noticed this behaviour when placing Synapse behind Cloudflare. + response.headers_mut().typed_insert(ContentType::text()); + http_response_to_twisted(twisted_request, response)?; Ok(()) From a58f09acc78d2497fc7c3c8930c42233bcc7428c Mon Sep 17 00:00:00 2001 From: V02460 Date: Wed, 27 Nov 2024 11:46:00 +0100 Subject: [PATCH 121/147] Bump pyo3 to v0.23.2 (#17966) Keep up-to-date with pyo3 releases. This bump enables Python 3.13 support and resolves deprecations. Links for quick reference: https://github.com/PyO3/pyo3/releases https://github.com/davidhewitt/pythonize/releases https://github.com/vorner/pyo3-log --- Cargo.lock | 158 +++------------------------ changelog.d/17966.misc | 1 + rust/Cargo.toml | 6 +- rust/src/acl/mod.rs | 4 +- rust/src/events/internal_metadata.rs | 82 +++++++++----- rust/src/events/mod.rs | 4 +- rust/src/http.rs | 2 +- rust/src/lib.rs | 15 +++ rust/src/push/evaluator.rs | 2 + rust/src/push/mod.rs | 44 +++++--- rust/src/rendezvous/mod.rs | 13 ++- 11 files changed, 136 insertions(+), 195 deletions(-) create mode 100644 changelog.d/17966.misc diff --git a/Cargo.lock b/Cargo.lock index 5c8f627fd7c2..b7084165ee21 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -35,12 +35,6 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" -[[package]] -name = "bitflags" -version = "2.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" - [[package]] name = "blake2" version = "0.10.6" @@ -162,9 +156,9 @@ dependencies = [ [[package]] name = "heck" -version = "0.4.1" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hex" @@ -222,16 +216,6 @@ version = "0.2.154" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ae743338b92ff9146ce83992f766a31066a91a8c84a45e0e9f21e7cf6de6d346" -[[package]] -name = "lock_api" -version = "0.4.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" -dependencies = [ - "autocfg", - "scopeguard", -] - [[package]] name = "log" version = "0.4.22" @@ -265,29 +249,6 @@ version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" -[[package]] -name = "parking_lot" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" -dependencies = [ - "lock_api", - "parking_lot_core", -] - -[[package]] -name = "parking_lot_core" -version = "0.9.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" -dependencies = [ - "cfg-if", - "libc", - "redox_syscall", - "smallvec", - "windows-targets", -] - [[package]] name = "portable-atomic" version = "1.6.0" @@ -311,16 +272,16 @@ dependencies = [ [[package]] name = "pyo3" -version = "0.21.2" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e00b96a521718e08e03b1a622f01c8a8deb50719335de3f60b3b3950f069d8" +checksum = "f54b3d09cbdd1f8c20650b28e7b09e338881482f4aa908a5f61a00c98fba2690" dependencies = [ "anyhow", "cfg-if", "indoc", "libc", "memoffset", - "parking_lot", + "once_cell", "portable-atomic", "pyo3-build-config", "pyo3-ffi", @@ -330,9 +291,9 @@ dependencies = [ [[package]] name = "pyo3-build-config" -version = "0.21.2" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7883df5835fafdad87c0d888b266c8ec0f4c9ca48a5bed6bbb592e8dedee1b50" +checksum = "3015cf985888fe66cfb63ce0e321c603706cd541b7aec7ddd35c281390af45d8" dependencies = [ "once_cell", "target-lexicon", @@ -340,9 +301,9 @@ dependencies = [ [[package]] name = "pyo3-ffi" -version = "0.21.2" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01be5843dc60b916ab4dad1dca6d20b9b4e6ddc8e15f50c47fe6d85f1fb97403" +checksum = "6fca7cd8fd809b5ac4eefb89c1f98f7a7651d3739dfb341ca6980090f554c270" dependencies = [ "libc", "pyo3-build-config", @@ -350,9 +311,9 @@ dependencies = [ [[package]] name = "pyo3-log" -version = "0.10.0" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2af49834b8d2ecd555177e63b273b708dea75150abc6f5341d0a6e1a9623976c" +checksum = "3eb421dc86d38d08e04b927b02424db480be71b777fa3a56f32e2f2a3a1a3b08" dependencies = [ "arc-swap", "log", @@ -361,9 +322,9 @@ dependencies = [ [[package]] name = "pyo3-macros" -version = "0.21.2" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77b34069fc0682e11b31dbd10321cbf94808394c56fd996796ce45217dfac53c" +checksum = "34e657fa5379a79151b6ff5328d9216a84f55dc93b17b08e7c3609a969b73aa0" dependencies = [ "proc-macro2", "pyo3-macros-backend", @@ -373,9 +334,9 @@ dependencies = [ [[package]] name = "pyo3-macros-backend" -version = "0.21.2" +version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08260721f32db5e1a5beae69a55553f56b99bd0e1c3e6e0a5e8851a9d0f5a85c" +checksum = "295548d5ffd95fd1981d2d3cf4458831b21d60af046b729b6fd143b0ba7aee2f" dependencies = [ "heck", "proc-macro2", @@ -386,9 +347,9 @@ dependencies = [ [[package]] name = "pythonize" -version = "0.21.1" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d0664248812c38cc55a4ed07f88e4df516ce82604b93b1ffdc041aa77a6cb3c" +checksum = "91a6ee7a084f913f98d70cdc3ebec07e852b735ae3059a1500db2661265da9ff" dependencies = [ "pyo3", "serde", @@ -433,15 +394,6 @@ dependencies = [ "getrandom", ] -[[package]] -name = "redox_syscall" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" -dependencies = [ - "bitflags", -] - [[package]] name = "regex" version = "1.11.1" @@ -477,12 +429,6 @@ version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" -[[package]] -name = "scopeguard" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" - [[package]] name = "serde" version = "1.0.215" @@ -537,12 +483,6 @@ dependencies = [ "digest", ] -[[package]] -name = "smallvec" -version = "1.13.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" - [[package]] name = "subtle" version = "2.5.0" @@ -694,67 +634,3 @@ dependencies = [ "js-sys", "wasm-bindgen", ] - -[[package]] -name = "windows-targets" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" -dependencies = [ - "windows_aarch64_gnullvm", - "windows_aarch64_msvc", - "windows_i686_gnu", - "windows_i686_gnullvm", - "windows_i686_msvc", - "windows_x86_64_gnu", - "windows_x86_64_gnullvm", - "windows_x86_64_msvc", -] - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" - -[[package]] -name = "windows_i686_msvc" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" - -[[package]] -name = "windows_x86_64_gnu" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" - -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" - -[[package]] -name = "windows_x86_64_msvc" -version = "0.52.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" diff --git a/changelog.d/17966.misc b/changelog.d/17966.misc new file mode 100644 index 000000000000..c6d6e55fbf92 --- /dev/null +++ b/changelog.d/17966.misc @@ -0,0 +1 @@ +Bump pyo3 and dependencies to v0.23.2. \ No newline at end of file diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 026487275c42..7eebeb3b5582 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -30,14 +30,14 @@ http = "1.1.0" lazy_static = "1.4.0" log = "0.4.17" mime = "0.3.17" -pyo3 = { version = "0.21.0", features = [ +pyo3 = { version = "0.23.2", features = [ "macros", "anyhow", "abi3", "abi3-py38", ] } -pyo3-log = "0.10.0" -pythonize = "0.21.0" +pyo3-log = "0.12.0" +pythonize = "0.23.0" regex = "1.6.0" sha2 = "0.10.8" serde = { version = "1.0.144", features = ["derive"] } diff --git a/rust/src/acl/mod.rs b/rust/src/acl/mod.rs index 982720ba909c..57b45475fddf 100644 --- a/rust/src/acl/mod.rs +++ b/rust/src/acl/mod.rs @@ -32,14 +32,14 @@ use crate::push::utils::{glob_to_regex, GlobMatchType}; /// Called when registering modules with python. pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { - let child_module = PyModule::new_bound(py, "acl")?; + let child_module = PyModule::new(py, "acl")?; child_module.add_class::()?; m.add_submodule(&child_module)?; // We need to manually add the module to sys.modules to make `from // synapse.synapse_rust import acl` work. - py.import_bound("sys")? + py.import("sys")? .getattr("modules")? .set_item("synapse.synapse_rust.acl", child_module)?; diff --git a/rust/src/events/internal_metadata.rs b/rust/src/events/internal_metadata.rs index ad87825f163b..eeb6074c10c8 100644 --- a/rust/src/events/internal_metadata.rs +++ b/rust/src/events/internal_metadata.rs @@ -41,9 +41,11 @@ use pyo3::{ pybacked::PyBackedStr, pyclass, pymethods, types::{PyAnyMethods, PyDict, PyDictMethods, PyString}, - Bound, IntoPy, PyAny, PyObject, PyResult, Python, + Bound, IntoPyObject, PyAny, PyObject, PyResult, Python, }; +use crate::UnwrapInfallible; + /// Definitions of the various fields of the internal metadata. #[derive(Clone)] enum EventInternalMetadataData { @@ -60,31 +62,59 @@ enum EventInternalMetadataData { impl EventInternalMetadataData { /// Convert the field to its name and python object. - fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a Bound<'a, PyString>, PyObject) { + fn to_python_pair<'a>(&self, py: Python<'a>) -> (&'a Bound<'a, PyString>, Bound<'a, PyAny>) { match self { - EventInternalMetadataData::OutOfBandMembership(o) => { - (pyo3::intern!(py, "out_of_band_membership"), o.into_py(py)) - } - EventInternalMetadataData::SendOnBehalfOf(o) => { - (pyo3::intern!(py, "send_on_behalf_of"), o.into_py(py)) - } - EventInternalMetadataData::RecheckRedaction(o) => { - (pyo3::intern!(py, "recheck_redaction"), o.into_py(py)) - } - EventInternalMetadataData::SoftFailed(o) => { - (pyo3::intern!(py, "soft_failed"), o.into_py(py)) - } - EventInternalMetadataData::ProactivelySend(o) => { - (pyo3::intern!(py, "proactively_send"), o.into_py(py)) - } - EventInternalMetadataData::Redacted(o) => { - (pyo3::intern!(py, "redacted"), o.into_py(py)) - } - EventInternalMetadataData::TxnId(o) => (pyo3::intern!(py, "txn_id"), o.into_py(py)), - EventInternalMetadataData::TokenId(o) => (pyo3::intern!(py, "token_id"), o.into_py(py)), - EventInternalMetadataData::DeviceId(o) => { - (pyo3::intern!(py, "device_id"), o.into_py(py)) - } + EventInternalMetadataData::OutOfBandMembership(o) => ( + pyo3::intern!(py, "out_of_band_membership"), + o.into_pyobject(py) + .unwrap_infallible() + .to_owned() + .into_any(), + ), + EventInternalMetadataData::SendOnBehalfOf(o) => ( + pyo3::intern!(py, "send_on_behalf_of"), + o.into_pyobject(py).unwrap_infallible().into_any(), + ), + EventInternalMetadataData::RecheckRedaction(o) => ( + pyo3::intern!(py, "recheck_redaction"), + o.into_pyobject(py) + .unwrap_infallible() + .to_owned() + .into_any(), + ), + EventInternalMetadataData::SoftFailed(o) => ( + pyo3::intern!(py, "soft_failed"), + o.into_pyobject(py) + .unwrap_infallible() + .to_owned() + .into_any(), + ), + EventInternalMetadataData::ProactivelySend(o) => ( + pyo3::intern!(py, "proactively_send"), + o.into_pyobject(py) + .unwrap_infallible() + .to_owned() + .into_any(), + ), + EventInternalMetadataData::Redacted(o) => ( + pyo3::intern!(py, "redacted"), + o.into_pyobject(py) + .unwrap_infallible() + .to_owned() + .into_any(), + ), + EventInternalMetadataData::TxnId(o) => ( + pyo3::intern!(py, "txn_id"), + o.into_pyobject(py).unwrap_infallible().into_any(), + ), + EventInternalMetadataData::TokenId(o) => ( + pyo3::intern!(py, "token_id"), + o.into_pyobject(py).unwrap_infallible().into_any(), + ), + EventInternalMetadataData::DeviceId(o) => ( + pyo3::intern!(py, "device_id"), + o.into_pyobject(py).unwrap_infallible().into_any(), + ), } } @@ -247,7 +277,7 @@ impl EventInternalMetadata { /// /// Note that `outlier` and `stream_ordering` are stored in separate columns so are not returned here. fn get_dict(&self, py: Python<'_>) -> PyResult { - let dict = PyDict::new_bound(py); + let dict = PyDict::new(py); for entry in &self.data { let (key, value) = entry.to_python_pair(py); diff --git a/rust/src/events/mod.rs b/rust/src/events/mod.rs index 0bb6cdb181c5..209efb917be0 100644 --- a/rust/src/events/mod.rs +++ b/rust/src/events/mod.rs @@ -30,7 +30,7 @@ mod internal_metadata; /// Called when registering modules with python. pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { - let child_module = PyModule::new_bound(py, "events")?; + let child_module = PyModule::new(py, "events")?; child_module.add_class::()?; child_module.add_function(wrap_pyfunction!(filter::event_visible_to_server_py, m)?)?; @@ -38,7 +38,7 @@ pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> // We need to manually add the module to sys.modules to make `from // synapse.synapse_rust import events` work. - py.import_bound("sys")? + py.import("sys")? .getattr("modules")? .set_item("synapse.synapse_rust.events", child_module)?; diff --git a/rust/src/http.rs b/rust/src/http.rs index af052ab7214a..63ed05be5468 100644 --- a/rust/src/http.rs +++ b/rust/src/http.rs @@ -70,7 +70,7 @@ pub fn http_request_from_twisted(request: &Bound<'_, PyAny>) -> PyResult, m: &Bound<'_, PyModule>) -> PyResult<()> { Ok(()) } + +pub trait UnwrapInfallible { + fn unwrap_infallible(self) -> T; +} + +impl UnwrapInfallible for Result { + fn unwrap_infallible(self) -> T { + match self { + Ok(val) => val, + Err(never) => match never {}, + } + } +} diff --git a/rust/src/push/evaluator.rs b/rust/src/push/evaluator.rs index 0d436a1d7b54..db406acb8817 100644 --- a/rust/src/push/evaluator.rs +++ b/rust/src/push/evaluator.rs @@ -167,6 +167,7 @@ impl PushRuleEvaluator { /// /// Returns the set of actions, if any, that match (filtering out any /// `dont_notify` and `coalesce` actions). + #[pyo3(signature = (push_rules, user_id=None, display_name=None))] pub fn run( &self, push_rules: &FilteredPushRules, @@ -236,6 +237,7 @@ impl PushRuleEvaluator { } /// Check if the given condition matches. + #[pyo3(signature = (condition, user_id=None, display_name=None))] fn matches( &self, condition: Condition, diff --git a/rust/src/push/mod.rs b/rust/src/push/mod.rs index ef8ed150d421..bd0e853ac31f 100644 --- a/rust/src/push/mod.rs +++ b/rust/src/push/mod.rs @@ -65,8 +65,8 @@ use anyhow::{Context, Error}; use log::warn; use pyo3::exceptions::PyTypeError; use pyo3::prelude::*; -use pyo3::types::{PyBool, PyList, PyLong, PyString}; -use pythonize::{depythonize_bound, pythonize}; +use pyo3::types::{PyBool, PyInt, PyList, PyString}; +use pythonize::{depythonize, pythonize, PythonizeError}; use serde::de::Error as _; use serde::{Deserialize, Serialize}; use serde_json::Value; @@ -79,7 +79,7 @@ pub mod utils; /// Called when registering modules with python. pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { - let child_module = PyModule::new_bound(py, "push")?; + let child_module = PyModule::new(py, "push")?; child_module.add_class::()?; child_module.add_class::()?; child_module.add_class::()?; @@ -90,7 +90,7 @@ pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> // We need to manually add the module to sys.modules to make `from // synapse.synapse_rust import push` work. - py.import_bound("sys")? + py.import("sys")? .getattr("modules")? .set_item("synapse.synapse_rust.push", child_module)?; @@ -182,12 +182,16 @@ pub enum Action { Unknown(Value), } -impl IntoPy for Action { - fn into_py(self, py: Python<'_>) -> PyObject { +impl<'py> IntoPyObject<'py> for Action { + type Target = PyAny; + type Output = Bound<'py, Self::Target>; + type Error = PythonizeError; + + fn into_pyobject(self, py: Python<'py>) -> Result { // When we pass the `Action` struct to Python we want it to be converted // to a dict. We use `pythonize`, which converts the struct using the // `serde` serialization. - pythonize(py, &self).expect("valid action") + pythonize(py, &self) } } @@ -270,13 +274,13 @@ pub enum SimpleJsonValue { } impl<'source> FromPyObject<'source> for SimpleJsonValue { - fn extract(ob: &'source PyAny) -> PyResult { + fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult { if let Ok(s) = ob.downcast::() { Ok(SimpleJsonValue::Str(Cow::Owned(s.to_string()))) // A bool *is* an int, ensure we try bool first. } else if let Ok(b) = ob.downcast::() { Ok(SimpleJsonValue::Bool(b.extract()?)) - } else if let Ok(i) = ob.downcast::() { + } else if let Ok(i) = ob.downcast::() { Ok(SimpleJsonValue::Int(i.extract()?)) } else if ob.is_none() { Ok(SimpleJsonValue::Null) @@ -298,15 +302,19 @@ pub enum JsonValue { } impl<'source> FromPyObject<'source> for JsonValue { - fn extract(ob: &'source PyAny) -> PyResult { + fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult { if let Ok(l) = ob.downcast::() { - match l.iter().map(SimpleJsonValue::extract).collect() { + match l + .iter() + .map(|it| SimpleJsonValue::extract_bound(&it)) + .collect() + { Ok(a) => Ok(JsonValue::Array(a)), Err(e) => Err(PyTypeError::new_err(format!( "Can't convert to JsonValue::Array: {e}" ))), } - } else if let Ok(v) = SimpleJsonValue::extract(ob) { + } else if let Ok(v) = SimpleJsonValue::extract_bound(ob) { Ok(JsonValue::Value(v)) } else { Err(PyTypeError::new_err(format!( @@ -363,15 +371,19 @@ pub enum KnownCondition { }, } -impl IntoPy for Condition { - fn into_py(self, py: Python<'_>) -> PyObject { - pythonize(py, &self).expect("valid condition") +impl<'source> IntoPyObject<'source> for Condition { + type Target = PyAny; + type Output = Bound<'source, Self::Target>; + type Error = PythonizeError; + + fn into_pyobject(self, py: Python<'source>) -> Result { + pythonize(py, &self) } } impl<'source> FromPyObject<'source> for Condition { fn extract_bound(ob: &Bound<'source, PyAny>) -> PyResult { - Ok(depythonize_bound(ob.clone())?) + Ok(depythonize(ob)?) } } diff --git a/rust/src/rendezvous/mod.rs b/rust/src/rendezvous/mod.rs index 55df0203f3fa..23de66810282 100644 --- a/rust/src/rendezvous/mod.rs +++ b/rust/src/rendezvous/mod.rs @@ -29,7 +29,7 @@ use pyo3::{ exceptions::PyValueError, pyclass, pymethods, types::{PyAnyMethods, PyModule, PyModuleMethods}, - Bound, Py, PyAny, PyObject, PyResult, Python, ToPyObject, + Bound, IntoPyObject, Py, PyAny, PyObject, PyResult, Python, }; use ulid::Ulid; @@ -37,6 +37,7 @@ use self::session::Session; use crate::{ errors::{NotFoundError, SynapseError}, http::{http_request_from_twisted, http_response_to_twisted, HeaderMapPyExt}, + UnwrapInfallible, }; mod session; @@ -125,7 +126,11 @@ impl RendezvousHandler { let base = Uri::try_from(format!("{base}_synapse/client/rendezvous")) .map_err(|_| PyValueError::new_err("Invalid base URI"))?; - let clock = homeserver.call_method0("get_clock")?.to_object(py); + let clock = homeserver + .call_method0("get_clock")? + .into_pyobject(py) + .unwrap_infallible() + .unbind(); // Construct a Python object so that we can get a reference to the // evict method and schedule it to run. @@ -318,7 +323,7 @@ impl RendezvousHandler { } pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> { - let child_module = PyModule::new_bound(py, "rendezvous")?; + let child_module = PyModule::new(py, "rendezvous")?; child_module.add_class::()?; @@ -326,7 +331,7 @@ pub fn register_module(py: Python<'_>, m: &Bound<'_, PyModule>) -> PyResult<()> // We need to manually add the module to sys.modules to make `from // synapse.synapse_rust import rendezvous` work. - py.import_bound("sys")? + py.import("sys")? .getattr("modules")? .set_item("synapse.synapse_rust.rendezvous", child_module)?; From 59ad4b18fc81f373c57a7c7d09a2f340a129de76 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 27 Nov 2024 13:31:43 +0000 Subject: [PATCH 122/147] Update setuptools-rust and fix building abi3 wheels (#17969) Newer versions of `setuptools-rust` ignore the `py_limited_api` flag to `RustExtension`, and instead read it from `bdist_wheel` config. c.f. https://github.com/PyO3/setuptools-rust/blob/main/CHANGELOG.md#190-2024-02-24 --- build_rust.py | 19 +++++++++++++++++++ changelog.d/17969.misc | 1 + poetry.lock | 11 +++++------ 3 files changed, 25 insertions(+), 6 deletions(-) create mode 100644 changelog.d/17969.misc diff --git a/build_rust.py b/build_rust.py index 662474dcb47c..d2726cee265f 100644 --- a/build_rust.py +++ b/build_rust.py @@ -1,8 +1,10 @@ # A build script for poetry that adds the rust extension. +import itertools import os from typing import Any, Dict +from packaging.specifiers import SpecifierSet from setuptools_rust import Binding, RustExtension @@ -14,6 +16,8 @@ def build(setup_kwargs: Dict[str, Any]) -> None: target="synapse.synapse_rust", path=cargo_toml_path, binding=Binding.PyO3, + # This flag is a no-op in the latest versions. Instead, we need to + # specify this in the `bdist_wheel` config below. py_limited_api=True, # We force always building in release mode, as we can't tell the # difference between using `poetry` in development vs production. @@ -21,3 +25,18 @@ def build(setup_kwargs: Dict[str, Any]) -> None: ) setup_kwargs.setdefault("rust_extensions", []).append(extension) setup_kwargs["zip_safe"] = False + + # We lookup the minimum supported python version by looking at + # `python_requires` (e.g. ">=3.9.0,<4.0.0") and finding the first python + # version that matches. We then convert that into the `py_limited_api` form, + # e.g. cp39 for python 3.9. + py_limited_api: str + python_bounds = SpecifierSet(setup_kwargs["python_requires"]) + for minor_version in itertools.count(start=8): + if f"3.{minor_version}.0" in python_bounds: + py_limited_api = f"cp3{minor_version}" + break + + setup_kwargs.setdefault("options", {}).setdefault("bdist_wheel", {})[ + "py_limited_api" + ] = py_limited_api diff --git a/changelog.d/17969.misc b/changelog.d/17969.misc new file mode 100644 index 000000000000..05506daaa06f --- /dev/null +++ b/changelog.d/17969.misc @@ -0,0 +1 @@ +Update setuptools-rust and fix building abi3 wheels in latest version. diff --git a/poetry.lock b/poetry.lock index 14bb9ad1723f..f43fe2489ae5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. [[package]] name = "annotated-types" @@ -2405,19 +2405,18 @@ test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "importlib-metadata [[package]] name = "setuptools-rust" -version = "1.8.1" +version = "1.10.2" description = "Setuptools Rust extension plugin" optional = false python-versions = ">=3.8" files = [ - {file = "setuptools-rust-1.8.1.tar.gz", hash = "sha256:94b1dd5d5308b3138d5b933c3a2b55e6d6927d1a22632e509fcea9ddd0f7e486"}, - {file = "setuptools_rust-1.8.1-py3-none-any.whl", hash = "sha256:b5324493949ccd6aa0c03890c5f6b5f02de4512e3ac1697d02e9a6c02b18aa8e"}, + {file = "setuptools_rust-1.10.2-py3-none-any.whl", hash = "sha256:4b39c435ae9670315d522ed08fa0e8cb29f2a6048033966b6be2571a90ce4f1c"}, + {file = "setuptools_rust-1.10.2.tar.gz", hash = "sha256:5d73e7eee5f87a6417285b617c97088a7c20d1a70fcea60e3bdc94ff567c29dc"}, ] [package.dependencies] semantic-version = ">=2.8.2,<3" setuptools = ">=62.4" -tomli = {version = ">=1.2.1", markers = "python_version < \"3.11\""} [[package]] name = "signedjson" @@ -2528,7 +2527,7 @@ files = [ name = "tornado" version = "6.4.2" description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." -optional = false +optional = true python-versions = ">=3.8" files = [ {file = "tornado-6.4.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e828cce1123e9e44ae2a50a9de3055497ab1d0aeb440c5ac23064d9e44880da1"}, From d80cd57c54427687afcb48740d99219c88a0fff1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 28 Nov 2024 18:06:19 +0000 Subject: [PATCH 123/147] Fix new scheduled tasks jumping the queue (#17962) Currently, when a new scheduled task is added and its scheduled time has already passed, we set it to ACTIVE. This is problematic, because it means it will jump the queue ahead of all other SCHEDULED tasks; furthermore, if the Synapse process gets restarted, it will jump ahead of any ACTIVE tasks which have been started but are taking a while to run. Instead, we leave it set to SCHEDULED, but kick off a call to `_launch_scheduled_tasks`, which will decide if we actually have capacity to start a new task, and start the newly-added task if so. --- changelog.d/17962.misc | 1 + synapse/replication/tcp/commands.py | 2 +- synapse/replication/tcp/handler.py | 2 +- synapse/util/task_scheduler.py | 65 ++++++++++++++--------------- tests/util/test_task_scheduler.py | 49 +++++++++++++--------- 5 files changed, 64 insertions(+), 55 deletions(-) create mode 100644 changelog.d/17962.misc diff --git a/changelog.d/17962.misc b/changelog.d/17962.misc new file mode 100644 index 000000000000..adf634870799 --- /dev/null +++ b/changelog.d/17962.misc @@ -0,0 +1 @@ +Fix new scheduled tasks jumping the queue. diff --git a/synapse/replication/tcp/commands.py b/synapse/replication/tcp/commands.py index 7d51441e9145..6ab53566600d 100644 --- a/synapse/replication/tcp/commands.py +++ b/synapse/replication/tcp/commands.py @@ -495,7 +495,7 @@ def to_line(self) -> str: class NewActiveTaskCommand(_SimpleCommand): - """Sent to inform instance handling background tasks that a new active task is available to run. + """Sent to inform instance handling background tasks that a new task is ready to run. Format:: diff --git a/synapse/replication/tcp/handler.py b/synapse/replication/tcp/handler.py index 61012269380f..1fafbb48c3e7 100644 --- a/synapse/replication/tcp/handler.py +++ b/synapse/replication/tcp/handler.py @@ -727,7 +727,7 @@ def on_NEW_ACTIVE_TASK( ) -> None: """Called when get a new NEW_ACTIVE_TASK command.""" if self._task_scheduler: - self._task_scheduler.launch_task_by_id(cmd.data) + self._task_scheduler.on_new_task(cmd.data) def new_connection(self, connection: IReplicationConnection) -> None: """Called when we have a new connection.""" diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index 448960b29784..3ed457bd3078 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -174,9 +174,10 @@ async def schedule_task( The id of the scheduled task """ status = TaskStatus.SCHEDULED + start_now = False if timestamp is None or timestamp < self._clock.time_msec(): timestamp = self._clock.time_msec() - status = TaskStatus.ACTIVE + start_now = True task = ScheduledTask( random_string(16), @@ -190,9 +191,11 @@ async def schedule_task( ) await self._store.insert_scheduled_task(task) - if status == TaskStatus.ACTIVE: + # If the task is ready to run immediately, run the scheduling algorithm now + # rather than waiting + if start_now: if self._run_background_tasks: - await self._launch_task(task) + self._launch_scheduled_tasks() else: self._hs.get_replication_command_handler().send_new_active_task(task.id) @@ -300,23 +303,13 @@ async def delete_task(self, id: str) -> None: raise Exception(f"Task {id} is currently ACTIVE and can't be deleted") await self._store.delete_scheduled_task(id) - def launch_task_by_id(self, id: str) -> None: - """Try launching the task with the given ID.""" - # Don't bother trying to launch new tasks if we're already at capacity. - if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS: - return - - run_as_background_process("launch_task_by_id", self._launch_task_by_id, id) - - async def _launch_task_by_id(self, id: str) -> None: - """Helper async function for `launch_task_by_id`.""" - task = await self.get_task(id) - if task: - await self._launch_task(task) + def on_new_task(self, task_id: str) -> None: + """Handle a notification that a new ready-to-run task has been added to the queue""" + # Just run the scheduler + self._launch_scheduled_tasks() - @wrap_as_background_process("launch_scheduled_tasks") - async def _launch_scheduled_tasks(self) -> None: - """Retrieve and launch scheduled tasks that should be running at that time.""" + def _launch_scheduled_tasks(self) -> None: + """Retrieve and launch scheduled tasks that should be running at this time.""" # Don't bother trying to launch new tasks if we're already at capacity. if len(self._running_tasks) >= TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS: return @@ -326,20 +319,26 @@ async def _launch_scheduled_tasks(self) -> None: self._launching_new_tasks = True - try: - for task in await self.get_tasks( - statuses=[TaskStatus.ACTIVE], limit=self.MAX_CONCURRENT_RUNNING_TASKS - ): - await self._launch_task(task) - for task in await self.get_tasks( - statuses=[TaskStatus.SCHEDULED], - max_timestamp=self._clock.time_msec(), - limit=self.MAX_CONCURRENT_RUNNING_TASKS, - ): - await self._launch_task(task) - - finally: - self._launching_new_tasks = False + async def inner() -> None: + try: + for task in await self.get_tasks( + statuses=[TaskStatus.ACTIVE], + limit=self.MAX_CONCURRENT_RUNNING_TASKS, + ): + # _launch_task will ignore tasks that we're already running, and + # will also do nothing if we're already at the maximum capacity. + await self._launch_task(task) + for task in await self.get_tasks( + statuses=[TaskStatus.SCHEDULED], + max_timestamp=self._clock.time_msec(), + limit=self.MAX_CONCURRENT_RUNNING_TASKS, + ): + await self._launch_task(task) + + finally: + self._launching_new_tasks = False + + run_as_background_process("launch_scheduled_tasks", inner) @wrap_as_background_process("clean_scheduled_tasks") async def _clean_scheduled_tasks(self) -> None: diff --git a/tests/util/test_task_scheduler.py b/tests/util/test_task_scheduler.py index 30f0510c9f27..9e403b948be0 100644 --- a/tests/util/test_task_scheduler.py +++ b/tests/util/test_task_scheduler.py @@ -18,8 +18,7 @@ # [This file includes modifications made by New Vector Limited] # # - -from typing import Optional, Tuple +from typing import List, Optional, Tuple from twisted.internet.task import deferLater from twisted.test.proto_helpers import MemoryReactor @@ -104,33 +103,43 @@ def test_schedule_lot_of_tasks(self) -> None: ) ) - # This is to give the time to the active tasks to finish + def get_tasks_of_status(status: TaskStatus) -> List[ScheduledTask]: + tasks = ( + self.get_success(self.task_scheduler.get_task(task_id)) + for task_id in task_ids + ) + return [t for t in tasks if t is not None and t.status == status] + + # At this point, there should be MAX_CONCURRENT_RUNNING_TASKS active tasks and + # one scheduled task. + self.assertEquals( + len(get_tasks_of_status(TaskStatus.ACTIVE)), + TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS, + ) + self.assertEquals( + len(get_tasks_of_status(TaskStatus.SCHEDULED)), + 1, + ) + + # Give the time to the active tasks to finish self.reactor.advance(1) - # Check that only MAX_CONCURRENT_RUNNING_TASKS tasks has run and that one + # Check that MAX_CONCURRENT_RUNNING_TASKS tasks have run and that one # is still scheduled. - tasks = [ - self.get_success(self.task_scheduler.get_task(task_id)) - for task_id in task_ids - ] - self.assertEquals( - len( - [t for t in tasks if t is not None and t.status == TaskStatus.COMPLETE] - ), + len(get_tasks_of_status(TaskStatus.COMPLETE)), TaskScheduler.MAX_CONCURRENT_RUNNING_TASKS, ) - - scheduled_tasks = [ - t for t in tasks if t is not None and t.status == TaskStatus.ACTIVE - ] + scheduled_tasks = get_tasks_of_status(TaskStatus.SCHEDULED) self.assertEquals(len(scheduled_tasks), 1) - # We need to wait for the next run of the scheduler loop - self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)) - self.reactor.advance(1) + # The scheduled task should start 0.1s after the first of the active tasks + # finishes + self.reactor.advance(0.1) + self.assertEquals(len(get_tasks_of_status(TaskStatus.ACTIVE)), 1) - # Check that the last task has been properly executed after the next scheduler loop run + # ... and should finally complete after another second + self.reactor.advance(1) prev_scheduled_task = self.get_success( self.task_scheduler.get_task(scheduled_tasks[0].id) ) From 6a909aade2eef99c2cd18a6f4c0e923b199600d6 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 29 Nov 2024 11:26:37 -0600 Subject: [PATCH 124/147] Consolidate SSO redirects through `/_matrix/client/v3/login/sso/redirect(/{idpId})` (#17972) Consolidate SSO redirects through `/_matrix/client/v3/login/sso/redirect(/{idpId})` Spawning from https://github.com/element-hq/sbg/pull/421#discussion_r1859497330 where we have a proxy that intercepts responses to `/_matrix/client/v3/login/sso/redirect(/{idpId})` in order to upgrade them to use OAuth 2.0 Pushed Authorization Requests (PAR). Instead of needing to intercept multiple endpoints that redirect to the authorization endpoint, it seems better to just have Synapse consolidate to a single flow. ### Testing strategy 1. Create a new OAuth application. I'll be using GitHub for example but there are [many options](https://github.com/matrix-org/synapse/blob/be65a8ec0195955c15fdb179c9158b187638e39a/docs/openid.md). Visit https://github.com/settings/developers -> **New OAuth App** - Application name: `Synapse local testing` - Homepage URL: `http://localhost:8008` - Authorization callback URL: `http://localhost:8008/_synapse/client/oidc/callback` 1. Update your Synapse `homeserver.yaml` ```yaml server_name: "my.synapse.server" public_baseurl: http://localhost:8008/ listeners: - port: 8008 bind_addresses: [ #'::1', '127.0.0.1' ] tls: false type: http x_forwarded: true resources: - names: [client, federation, metrics] compress: false # SSO login testing oidc_providers: - idp_id: github idp_name: Github idp_brand: "github" # optional: styling hint for clients discover: false issuer: "https://github.com/" client_id: "xxx" # TO BE FILLED client_secret: "xxx" # TO BE FILLED authorization_endpoint: "https://github.com/login/oauth/authorize" token_endpoint: "https://github.com/login/oauth/access_token" userinfo_endpoint: "https://api.github.com/user" scopes: ["read:user"] user_mapping_provider: config: subject_claim: "id" localpart_template: "{{ user.login }}" display_name_template: "{{ user.name }}" ``` 1. Start Synapse: `poetry run synapse_homeserver --config-path homeserver.yaml` 1. Visit `http://localhost:8008/_synapse/client/pick_idp?redirectUrl=http%3A%2F%2Fexample.com` 1. Choose GitHub 1. Notice that you're redirected to GitHub to sign in (`https://github.com/login/oauth/authorize?...`) Tested locally and works: 1. `http://localhost:8008/_synapse/client/pick_idp?idp=oidc-github&redirectUrl=http%3A//example.com` -> 1. `http://localhost:8008/_matrix/client/v3/login/sso/redirect/oidc-github?redirectUrl=http://example.com` -> 1. `https://github.com/login/oauth/authorize?response_type=code&client_id=xxx&redirect_uri=http%3A%2F%2Flocalhost%3A8008%2F_synapse%2Fclient%2Foidc%2Fcallback&scope=read%3Auser&state=xxx&nonce=xxx` --- changelog.d/17972.misc | 1 + synapse/api/urls.py | 42 ++++++- synapse/config/cas.py | 6 +- synapse/config/server.py | 6 + synapse/rest/synapse/client/pick_idp.py | 29 ++--- tests/api/test_urls.py | 55 +++++++++ tests/rest/client/test_login.py | 144 ++++++++++++++++++++++-- tests/rest/client/utils.py | 7 +- 8 files changed, 262 insertions(+), 28 deletions(-) create mode 100644 changelog.d/17972.misc create mode 100644 tests/api/test_urls.py diff --git a/changelog.d/17972.misc b/changelog.d/17972.misc new file mode 100644 index 000000000000..e7f009d20d4a --- /dev/null +++ b/changelog.d/17972.misc @@ -0,0 +1 @@ +Consolidate SSO redirects through `/_matrix/client/v3/login/sso/redirect(/{idpId})`. diff --git a/synapse/api/urls.py b/synapse/api/urls.py index 03a3e96f2891..655b5edd7a20 100644 --- a/synapse/api/urls.py +++ b/synapse/api/urls.py @@ -23,7 +23,8 @@ import hmac from hashlib import sha256 -from urllib.parse import urlencode +from typing import Optional +from urllib.parse import urlencode, urljoin from synapse.config import ConfigError from synapse.config.homeserver import HomeServerConfig @@ -66,3 +67,42 @@ def build_user_consent_uri(self, user_id: str) -> str: urlencode({"u": user_id, "h": mac}), ) return consent_uri + + +class LoginSSORedirectURIBuilder: + def __init__(self, hs_config: HomeServerConfig): + self._public_baseurl = hs_config.server.public_baseurl + + def build_login_sso_redirect_uri( + self, *, idp_id: Optional[str], client_redirect_url: str + ) -> str: + """Build a `/login/sso/redirect` URI for the given identity provider. + + Builds `/_matrix/client/v3/login/sso/redirect/{idpId}?redirectUrl=xxx` when `idp_id` is specified. + Otherwise, builds `/_matrix/client/v3/login/sso/redirect?redirectUrl=xxx` when `idp_id` is `None`. + + Args: + idp_id: Optional ID of the identity provider + client_redirect_url: URL to redirect the user to after login + + Returns + The URI to follow when choosing a specific identity provider. + """ + base_url = urljoin( + self._public_baseurl, + f"{CLIENT_API_PREFIX}/v3/login/sso/redirect", + ) + + serialized_query_parameters = urlencode({"redirectUrl": client_redirect_url}) + + if idp_id: + resultant_url = urljoin( + # We have to add a trailing slash to the base URL to ensure that the + # last path segment is not stripped away when joining with another path. + f"{base_url}/", + f"{idp_id}?{serialized_query_parameters}", + ) + else: + resultant_url = f"{base_url}?{serialized_query_parameters}" + + return resultant_url diff --git a/synapse/config/cas.py b/synapse/config/cas.py index fa59c350c15c..c32bf36951d8 100644 --- a/synapse/config/cas.py +++ b/synapse/config/cas.py @@ -20,7 +20,7 @@ # # -from typing import Any, List +from typing import Any, List, Optional from synapse.config.sso import SsoAttributeRequirement from synapse.types import JsonDict @@ -46,7 +46,9 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # TODO Update this to a _synapse URL. public_baseurl = self.root.server.public_baseurl - self.cas_service_url = public_baseurl + "_matrix/client/r0/login/cas/ticket" + self.cas_service_url: Optional[str] = ( + public_baseurl + "_matrix/client/r0/login/cas/ticket" + ) self.cas_protocol_version = cas_config.get("protocol_version") if ( diff --git a/synapse/config/server.py b/synapse/config/server.py index ad7331de4288..6b2998361768 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -332,8 +332,14 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: logger.info("Using default public_baseurl %s", public_baseurl) else: self.serve_client_wellknown = True + # Ensure that public_baseurl ends with a trailing slash if public_baseurl[-1] != "/": public_baseurl += "/" + + # Scrutinize user-provided config + if not isinstance(public_baseurl, str): + raise ConfigError("Must be a string", ("public_baseurl",)) + self.public_baseurl = public_baseurl # check that public_baseurl is valid diff --git a/synapse/rest/synapse/client/pick_idp.py b/synapse/rest/synapse/client/pick_idp.py index f26929bd6082..5e599f85b0bc 100644 --- a/synapse/rest/synapse/client/pick_idp.py +++ b/synapse/rest/synapse/client/pick_idp.py @@ -21,6 +21,7 @@ import logging from typing import TYPE_CHECKING +from synapse.api.urls import LoginSSORedirectURIBuilder from synapse.http.server import ( DirectServeHtmlResource, finish_request, @@ -49,6 +50,8 @@ def __init__(self, hs: "HomeServer"): hs.config.sso.sso_login_idp_picker_template ) self._server_name = hs.hostname + self._public_baseurl = hs.config.server.public_baseurl + self._login_sso_redirect_url_builder = LoginSSORedirectURIBuilder(hs.config) async def _async_render_GET(self, request: SynapseRequest) -> None: client_redirect_url = parse_string( @@ -56,25 +59,23 @@ async def _async_render_GET(self, request: SynapseRequest) -> None: ) idp = parse_string(request, "idp", required=False) - # if we need to pick an IdP, do so + # If we need to pick an IdP, do so if not idp: return await self._serve_id_picker(request, client_redirect_url) - # otherwise, redirect to the IdP's redirect URI - providers = self._sso_handler.get_identity_providers() - auth_provider = providers.get(idp) - if not auth_provider: - logger.info("Unknown idp %r", idp) - self._sso_handler.render_error( - request, "unknown_idp", "Unknown identity provider ID" + # Otherwise, redirect to the login SSO redirect endpoint for the given IdP + # (which will in turn take us to the the IdP's redirect URI). + # + # We could go directly to the IdP's redirect URI, but this way we ensure that + # the user goes through the same logic as normal flow. Additionally, if a proxy + # needs to intercept the request, it only needs to intercept the one endpoint. + sso_login_redirect_url = ( + self._login_sso_redirect_url_builder.build_login_sso_redirect_uri( + idp_id=idp, client_redirect_url=client_redirect_url ) - return - - sso_url = await auth_provider.handle_redirect_request( - request, client_redirect_url.encode("utf8") ) - logger.info("Redirecting to %s", sso_url) - request.redirect(sso_url) + logger.info("Redirecting to %s", sso_login_redirect_url) + request.redirect(sso_login_redirect_url) finish_request(request) async def _serve_id_picker( diff --git a/tests/api/test_urls.py b/tests/api/test_urls.py new file mode 100644 index 000000000000..ce156a05dc48 --- /dev/null +++ b/tests/api/test_urls.py @@ -0,0 +1,55 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2024 New Vector, Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + + +from twisted.test.proto_helpers import MemoryReactor + +from synapse.api.urls import LoginSSORedirectURIBuilder +from synapse.server import HomeServer +from synapse.util import Clock + +from tests.unittest import HomeserverTestCase + +# a (valid) url with some annoying characters in. %3D is =, %26 is &, %2B is + +TRICKY_TEST_CLIENT_REDIRECT_URL = 'https://x?&q"+%3D%2B"="fö%26=o"' + + +class LoginSSORedirectURIBuilderTestCase(HomeserverTestCase): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.login_sso_redirect_url_builder = LoginSSORedirectURIBuilder(hs.config) + + def test_no_idp_id(self) -> None: + self.assertEqual( + self.login_sso_redirect_url_builder.build_login_sso_redirect_uri( + idp_id=None, client_redirect_url="http://example.com/redirect" + ), + "https://test/_matrix/client/v3/login/sso/redirect?redirectUrl=http%3A%2F%2Fexample.com%2Fredirect", + ) + + def test_explicit_idp_id(self) -> None: + self.assertEqual( + self.login_sso_redirect_url_builder.build_login_sso_redirect_uri( + idp_id="oidc-github", client_redirect_url="http://example.com/redirect" + ), + "https://test/_matrix/client/v3/login/sso/redirect/oidc-github?redirectUrl=http%3A%2F%2Fexample.com%2Fredirect", + ) + + def test_tricky_redirect_uri(self) -> None: + self.assertEqual( + self.login_sso_redirect_url_builder.build_login_sso_redirect_uri( + idp_id="oidc-github", + client_redirect_url=TRICKY_TEST_CLIENT_REDIRECT_URL, + ), + "https://test/_matrix/client/v3/login/sso/redirect/oidc-github?redirectUrl=https%3A%2F%2Fx%3F%3Cab+c%3E%26q%22%2B%253D%252B%22%3D%22f%C3%B6%2526%3Do%22", + ) diff --git a/tests/rest/client/test_login.py b/tests/rest/client/test_login.py index cbd6d8d4bf8c..1451fd7c29c8 100644 --- a/tests/rest/client/test_login.py +++ b/tests/rest/client/test_login.py @@ -43,6 +43,7 @@ import synapse.rest.admin from synapse.api.constants import ApprovalNoticeMedium, LoginType from synapse.api.errors import Codes +from synapse.api.urls import LoginSSORedirectURIBuilder from synapse.appservice import ApplicationService from synapse.http.client import RawHeaders from synapse.module_api import ModuleApi @@ -69,6 +70,10 @@ except ImportError: HAS_JWT = False +import logging + +logger = logging.getLogger(__name__) + # synapse server name: used to populate public_baseurl in some tests SYNAPSE_SERVER_PUBLIC_HOSTNAME = "synapse" @@ -77,7 +82,7 @@ # FakeChannel.isSecure() returns False, so synapse will see the requested uri as # http://..., so using http in the public_baseurl stops Synapse trying to redirect to # https://.... -BASE_URL = "http://%s/" % (SYNAPSE_SERVER_PUBLIC_HOSTNAME,) +PUBLIC_BASEURL = "http://%s/" % (SYNAPSE_SERVER_PUBLIC_HOSTNAME,) # CAS server used in some tests CAS_SERVER = "https://fake.test" @@ -109,6 +114,23 @@ ] +def get_relative_uri_from_absolute_uri(absolute_uri: str) -> str: + """ + Peels off the path and query string from an absolute URI. Useful when interacting + with `make_request(...)` util function which expects a relative path instead of a + full URI. + """ + parsed_uri = urllib.parse.urlparse(absolute_uri) + # Sanity check that we're working with an absolute URI + assert parsed_uri.scheme == "http" or parsed_uri.scheme == "https" + + relative_uri = parsed_uri.path + if parsed_uri.query: + relative_uri += "?" + parsed_uri.query + + return relative_uri + + class TestSpamChecker: def __init__(self, config: None, api: ModuleApi): api.register_spam_checker_callbacks( @@ -614,7 +636,7 @@ class MultiSSOTestCase(unittest.HomeserverTestCase): def default_config(self) -> Dict[str, Any]: config = super().default_config() - config["public_baseurl"] = BASE_URL + config["public_baseurl"] = PUBLIC_BASEURL config["cas_config"] = { "enabled": True, @@ -653,6 +675,9 @@ def default_config(self) -> Dict[str, Any]: ] return config + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.login_sso_redirect_url_builder = LoginSSORedirectURIBuilder(hs.config) + def create_resource_dict(self) -> Dict[str, Resource]: d = super().create_resource_dict() d.update(build_synapse_client_resource_tree(self.hs)) @@ -725,6 +750,32 @@ def test_multi_sso_redirect_to_cas(self) -> None: + "&idp=cas", shorthand=False, ) + self.assertEqual(channel.code, 302, channel.result) + location_headers = channel.headers.getRawHeaders("Location") + assert location_headers + sso_login_redirect_uri = location_headers[0] + + # it should redirect us to the standard login SSO redirect flow + self.assertEqual( + sso_login_redirect_uri, + self.login_sso_redirect_url_builder.build_login_sso_redirect_uri( + idp_id="cas", client_redirect_url=TEST_CLIENT_REDIRECT_URL + ), + ) + + # follow the redirect + channel = self.make_request( + "GET", + # We have to make this relative to be compatible with `make_request(...)` + get_relative_uri_from_absolute_uri(sso_login_redirect_uri), + # We have to set the Host header to match the `public_baseurl` to avoid + # the extra redirect in the `SsoRedirectServlet` in order for the + # cookies to be visible. + custom_headers=[ + ("Host", SYNAPSE_SERVER_PUBLIC_HOSTNAME), + ], + ) + self.assertEqual(channel.code, 302, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers @@ -750,6 +801,32 @@ def test_multi_sso_redirect_to_saml(self) -> None: + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL) + "&idp=saml", ) + self.assertEqual(channel.code, 302, channel.result) + location_headers = channel.headers.getRawHeaders("Location") + assert location_headers + sso_login_redirect_uri = location_headers[0] + + # it should redirect us to the standard login SSO redirect flow + self.assertEqual( + sso_login_redirect_uri, + self.login_sso_redirect_url_builder.build_login_sso_redirect_uri( + idp_id="saml", client_redirect_url=TEST_CLIENT_REDIRECT_URL + ), + ) + + # follow the redirect + channel = self.make_request( + "GET", + # We have to make this relative to be compatible with `make_request(...)` + get_relative_uri_from_absolute_uri(sso_login_redirect_uri), + # We have to set the Host header to match the `public_baseurl` to avoid + # the extra redirect in the `SsoRedirectServlet` in order for the + # cookies to be visible. + custom_headers=[ + ("Host", SYNAPSE_SERVER_PUBLIC_HOSTNAME), + ], + ) + self.assertEqual(channel.code, 302, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers @@ -773,10 +850,35 @@ def test_login_via_oidc(self) -> None: # pick the default OIDC provider channel = self.make_request( "GET", - "/_synapse/client/pick_idp?redirectUrl=" - + urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL) - + "&idp=oidc", + f"/_synapse/client/pick_idp?redirectUrl={urllib.parse.quote_plus(TEST_CLIENT_REDIRECT_URL)}&idp=oidc", + ) + self.assertEqual(channel.code, 302, channel.result) + location_headers = channel.headers.getRawHeaders("Location") + assert location_headers + sso_login_redirect_uri = location_headers[0] + + # it should redirect us to the standard login SSO redirect flow + self.assertEqual( + sso_login_redirect_uri, + self.login_sso_redirect_url_builder.build_login_sso_redirect_uri( + idp_id="oidc", client_redirect_url=TEST_CLIENT_REDIRECT_URL + ), + ) + + with fake_oidc_server.patch_homeserver(hs=self.hs): + # follow the redirect + channel = self.make_request( + "GET", + # We have to make this relative to be compatible with `make_request(...)` + get_relative_uri_from_absolute_uri(sso_login_redirect_uri), + # We have to set the Host header to match the `public_baseurl` to avoid + # the extra redirect in the `SsoRedirectServlet` in order for the + # cookies to be visible. + custom_headers=[ + ("Host", SYNAPSE_SERVER_PUBLIC_HOSTNAME), + ], ) + self.assertEqual(channel.code, 302, channel.result) location_headers = channel.headers.getRawHeaders("Location") assert location_headers @@ -838,12 +940,38 @@ def test_login_via_oidc(self) -> None: self.assertEqual(chan.json_body["user_id"], "@user1:test") def test_multi_sso_redirect_to_unknown(self) -> None: - """An unknown IdP should cause a 400""" + """An unknown IdP should cause a 404""" channel = self.make_request( "GET", "/_synapse/client/pick_idp?redirectUrl=http://x&idp=xyz", ) - self.assertEqual(channel.code, 400, channel.result) + self.assertEqual(channel.code, 302, channel.result) + location_headers = channel.headers.getRawHeaders("Location") + assert location_headers + sso_login_redirect_uri = location_headers[0] + + # it should redirect us to the standard login SSO redirect flow + self.assertEqual( + sso_login_redirect_uri, + self.login_sso_redirect_url_builder.build_login_sso_redirect_uri( + idp_id="xyz", client_redirect_url="http://x" + ), + ) + + # follow the redirect + channel = self.make_request( + "GET", + # We have to make this relative to be compatible with `make_request(...)` + get_relative_uri_from_absolute_uri(sso_login_redirect_uri), + # We have to set the Host header to match the `public_baseurl` to avoid + # the extra redirect in the `SsoRedirectServlet` in order for the + # cookies to be visible. + custom_headers=[ + ("Host", SYNAPSE_SERVER_PUBLIC_HOSTNAME), + ], + ) + + self.assertEqual(channel.code, 404, channel.result) def test_client_idp_redirect_to_unknown(self) -> None: """If the client tries to pick an unknown IdP, return a 404""" @@ -1473,7 +1601,7 @@ def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: def default_config(self) -> Dict[str, Any]: config = super().default_config() - config["public_baseurl"] = BASE_URL + config["public_baseurl"] = PUBLIC_BASEURL config["oidc_config"] = {} config["oidc_config"].update(TEST_OIDC_CONFIG) diff --git a/tests/rest/client/utils.py b/tests/rest/client/utils.py index a1c284726ad9..dbd6049f9fc3 100644 --- a/tests/rest/client/utils.py +++ b/tests/rest/client/utils.py @@ -889,7 +889,7 @@ def initiate_sso_login( "GET", uri, ) - assert channel.code == 302 + assert channel.code == 302, f"Expected 302 for {uri}, got {channel.code}" # hit the redirect url again with the right Host header, which should now issue # a cookie and redirect to the SSO provider. @@ -901,17 +901,18 @@ def get_location(channel: FakeChannel) -> str: location = get_location(channel) parts = urllib.parse.urlsplit(location) + next_uri = urllib.parse.urlunsplit(("", "") + parts[2:]) channel = make_request( self.reactor, self.site, "GET", - urllib.parse.urlunsplit(("", "") + parts[2:]), + next_uri, custom_headers=[ ("Host", parts[1]), ], ) - assert channel.code == 302 + assert channel.code == 302, f"Expected 302 for {next_uri}, got {channel.code}" channel.extract_cookies(cookies) return get_location(channel) From a82f5f206f7aeb18a48b7eb1aa6205d66d56e25b Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Dec 2024 10:54:14 +0000 Subject: [PATCH 125/147] Fix release process to not create duplicate releases (#17970) This is to work around https://github.com/softprops/action-gh-release/issues/445 --------- Co-authored-by: Quentin Gliech --- .github/workflows/release-artifacts.yml | 3 ++- changelog.d/17970.bugfix | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17970.bugfix diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index c0aff7914134..42a374fa1908 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -212,7 +212,8 @@ jobs: mv debs*/* debs/ tar -cvJf debs.tar.xz debs - name: Attach to release - uses: softprops/action-gh-release@v2 + # Pinned to work around https://github.com/softprops/action-gh-release/issues/445 + uses: softprops/action-gh-release@v2.0.5 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/changelog.d/17970.bugfix b/changelog.d/17970.bugfix new file mode 100644 index 000000000000..835079de3f3b --- /dev/null +++ b/changelog.d/17970.bugfix @@ -0,0 +1 @@ +Fix release process to not create duplicate releases. From a89b697209018cfb8792d22651b646cd154f1c71 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 15:28:08 +0000 Subject: [PATCH 126/147] Bump pysaml2 from 7.3.1 to 7.5.0 (#17978) --- poetry.lock | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/poetry.lock b/poetry.lock index f43fe2489ae5..1a735e2fd647 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.8.4 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "annotated-types" @@ -1917,13 +1917,13 @@ test = ["pretend", "pytest (>=3.0.1)", "pytest-rerunfailures"] [[package]] name = "pysaml2" -version = "7.3.1" +version = "7.5.0" description = "Python implementation of SAML Version 2 Standard" optional = true -python-versions = ">=3.6.2,<4.0.0" +python-versions = ">=3.9,<4.0" files = [ - {file = "pysaml2-7.3.1-py3-none-any.whl", hash = "sha256:2cc66e7a371d3f5ff9601f0ed93b5276cca816fce82bb38447d5a0651f2f5193"}, - {file = "pysaml2-7.3.1.tar.gz", hash = "sha256:eab22d187c6dd7707c58b5bb1688f9b8e816427667fc99d77f54399e15cd0a0a"}, + {file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"}, + {file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"}, ] [package.dependencies] @@ -1933,7 +1933,7 @@ pyopenssl = "*" python-dateutil = "*" pytz = "*" requests = ">=2,<3" -xmlschema = ">=1.2.1" +xmlschema = ">=2,<3" [package.extras] s2repoze = ["paste", "repoze.who", "zope.interface"] From 9b2ae62d20a7862c58e1302fdfba03773caf2d83 Mon Sep 17 00:00:00 2001 From: Travis Ralston Date: Mon, 2 Dec 2024 08:28:47 -0700 Subject: [PATCH 127/147] Use stable error code for account locking (#17965) --- changelog.d/17965.feature | 1 + synapse/api/errors.py | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 changelog.d/17965.feature diff --git a/changelog.d/17965.feature b/changelog.d/17965.feature new file mode 100644 index 000000000000..e447a58986e0 --- /dev/null +++ b/changelog.d/17965.feature @@ -0,0 +1 @@ +Use stable `M_USER_LOCKED` error code for locked accounts, as per [Matrix 1.12](https://spec.matrix.org/v1.12/client-server-api/#account-locking). \ No newline at end of file diff --git a/synapse/api/errors.py b/synapse/api/errors.py index e6efa7a4249a..71e4bb49716d 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -87,8 +87,7 @@ class Codes(str, Enum): WEAK_PASSWORD = "M_WEAK_PASSWORD" INVALID_SIGNATURE = "M_INVALID_SIGNATURE" USER_DEACTIVATED = "M_USER_DEACTIVATED" - # USER_LOCKED = "M_USER_LOCKED" - USER_LOCKED = "ORG_MATRIX_MSC3939_USER_LOCKED" + USER_LOCKED = "M_USER_LOCKED" NOT_YET_UPLOADED = "M_NOT_YET_UPLOADED" CANNOT_OVERWRITE_MEDIA = "M_CANNOT_OVERWRITE_MEDIA" From e5d3bfba30351f4f9c2bcf89a2b002c6be9ee099 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 2 Dec 2024 10:17:55 -0600 Subject: [PATCH 128/147] Sliding Sync: Include invite, ban, kick, targets when `$LAZY`-loading room members (#17947) Part of https://github.com/element-hq/synapse/issues/17929 --- changelog.d/17947.feature | 1 + synapse/api/constants.py | 2 + synapse/handlers/sliding_sync/__init__.py | 12 +- synapse/types/handlers/sliding_sync.py | 11 +- .../sliding_sync/test_rooms_required_state.py | 166 +++++++++++++++++- 5 files changed, 181 insertions(+), 11 deletions(-) create mode 100644 changelog.d/17947.feature diff --git a/changelog.d/17947.feature b/changelog.d/17947.feature new file mode 100644 index 000000000000..2d1b99cec2db --- /dev/null +++ b/changelog.d/17947.feature @@ -0,0 +1 @@ +Update [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync to include invite, ban, kick, targets when `$LAZY`-loading room members. diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 8db302b3d8b2..1206d1e00f36 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -231,6 +231,8 @@ class EventContentFields: ROOM_NAME: Final = "name" MEMBERSHIP: Final = "membership" + MEMBERSHIP_DISPLAYNAME: Final = "displayname" + MEMBERSHIP_AVATAR_URL: Final = "avatar_url" # Used in m.room.guest_access events. GUEST_ACCESS: Final = "guest_access" diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 85cfbc6dbf57..4f4faef524a9 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -955,15 +955,21 @@ async def get_room_sync_data( and state_key == StateValues.LAZY ): lazy_load_room_members = True + # Everyone in the timeline is relevant - # - # FIXME: We probably also care about invite, ban, kick, targets, etc - # but the spec only mentions "senders". timeline_membership: Set[str] = set() if timeline_events is not None: for timeline_event in timeline_events: + # Anyone who sent a message is relevant timeline_membership.add(timeline_event.sender) + # We also care about invite, ban, kick, targets, + # etc. + if timeline_event.type == EventTypes.Member: + timeline_membership.add( + timeline_event.state_key + ) + # Update the required state filter so we pick up the new # membership for user_id in timeline_membership: diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py index aae60fddeabd..3ebd334a6d53 100644 --- a/synapse/types/handlers/sliding_sync.py +++ b/synapse/types/handlers/sliding_sync.py @@ -407,8 +407,8 @@ class StateValues: # Include all state events of the given type WILDCARD: Final = "*" # Lazy-load room membership events (include room membership events for any event - # `sender` in the timeline). We only give special meaning to this value when it's a - # `state_key`. + # `sender` or membership change target in the timeline). We only give special + # meaning to this value when it's a `state_key`. LAZY: Final = "$LAZY" # Subsitute with the requester's user ID. Typically used by clients to get # the user's membership. @@ -641,9 +641,10 @@ def must_await_full_state( if user_id == StateValues.ME: continue # We're lazy-loading membership so we can just return the state we have. - # Lazy-loading means we include membership for any event `sender` in the - # timeline but since we had to auth those timeline events, we will have the - # membership state for them (including from remote senders). + # Lazy-loading means we include membership for any event `sender` or + # membership change target in the timeline but since we had to auth those + # timeline events, we will have the membership state for them (including + # from remote senders). elif user_id == StateValues.LAZY: continue elif user_id == StateValues.WILDCARD: diff --git a/tests/rest/client/sliding_sync/test_rooms_required_state.py b/tests/rest/client/sliding_sync/test_rooms_required_state.py index ecea5f2d5b32..b4869d5fa3c6 100644 --- a/tests/rest/client/sliding_sync/test_rooms_required_state.py +++ b/tests/rest/client/sliding_sync/test_rooms_required_state.py @@ -11,6 +11,7 @@ # See the GNU Affero General Public License for more details: # . # +import enum import logging from parameterized import parameterized, parameterized_class @@ -18,9 +19,9 @@ from twisted.test.proto_helpers import MemoryReactor import synapse.rest.admin -from synapse.api.constants import EventTypes, Membership +from synapse.api.constants import EventContentFields, EventTypes, JoinRules, Membership from synapse.handlers.sliding_sync import StateValues -from synapse.rest.client import login, room, sync +from synapse.rest.client import knock, login, room, sync from synapse.server import HomeServer from synapse.util import Clock @@ -30,6 +31,17 @@ logger = logging.getLogger(__name__) +# Inherit from `str` so that they show up in the test description when we +# `@parameterized.expand(...)` the first parameter +class MembershipAction(str, enum.Enum): + INVITE = "invite" + JOIN = "join" + KNOCK = "knock" + LEAVE = "leave" + BAN = "ban" + KICK = "kick" + + # FIXME: This can be removed once we bump `SCHEMA_COMPAT_VERSION` and run the # foreground update for # `sliding_sync_joined_rooms`/`sliding_sync_membership_snapshots` (tracked by @@ -52,6 +64,7 @@ class SlidingSyncRoomsRequiredStateTestCase(SlidingSyncBase): servlets = [ synapse.rest.admin.register_servlets, login.register_servlets, + knock.register_servlets, room.register_servlets, sync.register_servlets, ] @@ -496,6 +509,153 @@ def test_rooms_required_state_lazy_loading_room_members_incremental_sync( ) self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + @parameterized.expand( + [ + (MembershipAction.LEAVE,), + (MembershipAction.INVITE,), + (MembershipAction.KNOCK,), + (MembershipAction.JOIN,), + (MembershipAction.BAN,), + (MembershipAction.KICK,), + ] + ) + def test_rooms_required_state_changed_membership_in_timeline_lazy_loading_room_members_incremental_sync( + self, + room_membership_action: str, + ) -> None: + """ + On incremental sync, test `rooms.required_state` returns people relevant to the + timeline when lazy-loading room members, `["m.room.member","$LAZY"]` **including + changes to membership**. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + user4_id = self.register_user("user4", "pass") + user4_tok = self.login(user4_id, "pass") + user5_id = self.register_user("user5", "pass") + user5_tok = self.login(user5_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok, is_public=True) + # If we're testing knocks, set the room to knock + if room_membership_action == MembershipAction.KNOCK: + self.helper.send_state( + room_id1, + EventTypes.JoinRules, + {"join_rule": JoinRules.KNOCK}, + tok=user2_tok, + ) + + # Join the test users to the room + self.helper.invite(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.invite(room_id1, src=user2_id, targ=user3_id, tok=user2_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + self.helper.invite(room_id1, src=user2_id, targ=user4_id, tok=user2_tok) + self.helper.join(room_id1, user4_id, tok=user4_tok) + if room_membership_action in ( + MembershipAction.LEAVE, + MembershipAction.BAN, + MembershipAction.JOIN, + ): + self.helper.invite(room_id1, src=user2_id, targ=user5_id, tok=user2_tok) + self.helper.join(room_id1, user5_id, tok=user5_tok) + + # Send some messages to fill up the space + self.helper.send(room_id1, "1", tok=user2_tok) + self.helper.send(room_id1, "2", tok=user2_tok) + self.helper.send(room_id1, "3", tok=user2_tok) + + # Make the Sliding Sync request with lazy loading for the room members + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, StateValues.LAZY], + ], + "timeline_limit": 3, + } + } + } + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send more timeline events into the room + self.helper.send(room_id1, "4", tok=user2_tok) + self.helper.send(room_id1, "5", tok=user4_tok) + # The third event will be our membership event concerning user5 + if room_membership_action == MembershipAction.LEAVE: + # User 5 leaves + self.helper.leave(room_id1, user5_id, tok=user5_tok) + elif room_membership_action == MembershipAction.INVITE: + # User 5 is invited + self.helper.invite(room_id1, src=user2_id, targ=user5_id, tok=user2_tok) + elif room_membership_action == MembershipAction.KNOCK: + # User 5 knocks + self.helper.knock(room_id1, user5_id, tok=user5_tok) + # The admin of the room accepts the knock + self.helper.invite(room_id1, src=user2_id, targ=user5_id, tok=user2_tok) + elif room_membership_action == MembershipAction.JOIN: + # Update the display name of user5 (causing a membership change) + self.helper.send_state( + room_id1, + event_type=EventTypes.Member, + state_key=user5_id, + body={ + EventContentFields.MEMBERSHIP: Membership.JOIN, + EventContentFields.MEMBERSHIP_DISPLAYNAME: "quick changer", + }, + tok=user5_tok, + ) + elif room_membership_action == MembershipAction.BAN: + self.helper.ban(room_id1, src=user2_id, targ=user5_id, tok=user2_tok) + elif room_membership_action == MembershipAction.KICK: + # Kick user5 from the room + self.helper.change_membership( + room=room_id1, + src=user2_id, + targ=user5_id, + tok=user2_tok, + membership=Membership.LEAVE, + extra_data={ + "reason": "Bad manners", + }, + ) + else: + raise AssertionError( + f"Unknown room_membership_action: {room_membership_action}" + ) + + # Make an incremental Sliding Sync request + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Only user2, user4, and user5 sent events in the last 3 events we see in the + # `timeline`. + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + # This appears because *some* membership in the room changed and the + # heroes are recalculated and is thrown in because we have it. But this + # is technically optional and not needed because we've already seen user2 + # in the last sync (and their membership hasn't changed). + state_map[(EventTypes.Member, user2_id)], + # Appears because there is a message in the timeline from this user + state_map[(EventTypes.Member, user4_id)], + # Appears because there is a membership event in the timeline from this user + state_map[(EventTypes.Member, user5_id)], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + def test_rooms_required_state_expand_lazy_loading_room_members_incremental_sync( self, ) -> None: @@ -1243,7 +1403,7 @@ def test_rooms_required_state_expand_retract_expand(self) -> None: # Update the room name self.helper.send_state( - room_id1, "m.room.name", {"name": "Bar"}, state_key="", tok=user1_tok + room_id1, EventTypes.Name, {"name": "Bar"}, state_key="", tok=user1_tok ) # Update the sliding sync requests to exclude the room name again From 190c400a8379b99b7e5889d3cb2fb102995f6ab4 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 16:55:40 +0000 Subject: [PATCH 129/147] Bump tomli from 2.1.0 to 2.2.1 (#17979) --- poetry.lock | 36 +++++++++++++++++++++++++++++++++--- 1 file changed, 33 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 1a735e2fd647..af5d9a44061b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2514,13 +2514,43 @@ twisted = ["twisted"] [[package]] name = "tomli" -version = "2.1.0" +version = "2.2.1" description = "A lil' TOML parser" optional = false python-versions = ">=3.8" files = [ - {file = "tomli-2.1.0-py3-none-any.whl", hash = "sha256:a5c57c3d1c56f5ccdf89f6523458f60ef716e210fc47c4cfb188c5ba473e0391"}, - {file = "tomli-2.1.0.tar.gz", hash = "sha256:3f646cae2aec94e17d04973e4249548320197cfabdf130015d023de4b74d8ab8"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:678e4fa69e4575eb77d103de3df8a895e1591b48e740211bd1067378c69e8249"}, + {file = "tomli-2.2.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:023aa114dd824ade0100497eb2318602af309e5a55595f76b626d6d9f3b7b0a6"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ece47d672db52ac607a3d9599a9d48dcb2f2f735c6c2d1f34130085bb12b112a"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6972ca9c9cc9f0acaa56a8ca1ff51e7af152a9f87fb64623e31d5c83700080ee"}, + {file = "tomli-2.2.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c954d2250168d28797dd4e3ac5cf812a406cd5a92674ee4c8f123c889786aa8e"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8dd28b3e155b80f4d54beb40a441d366adcfe740969820caf156c019fb5c7ec4"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:e59e304978767a54663af13c07b3d1af22ddee3bb2fb0618ca1593e4f593a106"}, + {file = "tomli-2.2.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:33580bccab0338d00994d7f16f4c4ec25b776af3ffaac1ed74e0b3fc95e885a8"}, + {file = "tomli-2.2.1-cp311-cp311-win32.whl", hash = "sha256:465af0e0875402f1d226519c9904f37254b3045fc5084697cefb9bdde1ff99ff"}, + {file = "tomli-2.2.1-cp311-cp311-win_amd64.whl", hash = "sha256:2d0f2fdd22b02c6d81637a3c95f8cd77f995846af7414c5c4b8d0545afa1bc4b"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:4a8f6e44de52d5e6c657c9fe83b562f5f4256d8ebbfe4ff922c495620a7f6cea"}, + {file = "tomli-2.2.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8d57ca8095a641b8237d5b079147646153d22552f1c637fd3ba7f4b0b29167a8"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e340144ad7ae1533cb897d406382b4b6fede8890a03738ff1683af800d54192"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:db2b95f9de79181805df90bedc5a5ab4c165e6ec3fe99f970d0e302f384ad222"}, + {file = "tomli-2.2.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40741994320b232529c802f8bc86da4e1aa9f413db394617b9a256ae0f9a7f77"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:400e720fe168c0f8521520190686ef8ef033fb19fc493da09779e592861b78c6"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:02abe224de6ae62c19f090f68da4e27b10af2b93213d36cf44e6e1c5abd19fdd"}, + {file = "tomli-2.2.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:b82ebccc8c8a36f2094e969560a1b836758481f3dc360ce9a3277c65f374285e"}, + {file = "tomli-2.2.1-cp312-cp312-win32.whl", hash = "sha256:889f80ef92701b9dbb224e49ec87c645ce5df3fa2cc548664eb8a25e03127a98"}, + {file = "tomli-2.2.1-cp312-cp312-win_amd64.whl", hash = "sha256:7fc04e92e1d624a4a63c76474610238576942d6b8950a2d7f908a340494e67e4"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:f4039b9cbc3048b2416cc57ab3bda989a6fcf9b36cf8937f01a6e731b64f80d7"}, + {file = "tomli-2.2.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:286f0ca2ffeeb5b9bd4fcc8d6c330534323ec51b2f52da063b11c502da16f30c"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a92ef1a44547e894e2a17d24e7557a5e85a9e1d0048b0b5e7541f76c5032cb13"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9316dc65bed1684c9a98ee68759ceaed29d229e985297003e494aa825ebb0281"}, + {file = "tomli-2.2.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e85e99945e688e32d5a35c1ff38ed0b3f41f43fad8df0bdf79f72b2ba7bc5272"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ac065718db92ca818f8d6141b5f66369833d4a80a9d74435a268c52bdfa73140"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:d920f33822747519673ee656a4b6ac33e382eca9d331c87770faa3eef562aeb2"}, + {file = "tomli-2.2.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:a198f10c4d1b1375d7687bc25294306e551bf1abfa4eace6650070a5c1ae2744"}, + {file = "tomli-2.2.1-cp313-cp313-win32.whl", hash = "sha256:d3f5614314d758649ab2ab3a62d4f2004c825922f9e370b29416484086b264ec"}, + {file = "tomli-2.2.1-cp313-cp313-win_amd64.whl", hash = "sha256:a38aa0308e754b0e3c67e344754dff64999ff9b513e691d0e786265c93583c69"}, + {file = "tomli-2.2.1-py3-none-any.whl", hash = "sha256:cb55c73c5f4408779d0cf3eef9f762b9c9f147a77de7b258bef0a5628adc85cc"}, + {file = "tomli-2.2.1.tar.gz", hash = "sha256:cd45e1dc79c835ce60f7404ec8119f2eb06d38b1deba146f07ced3bbc44505ff"}, ] [[package]] From d648c8ce3f4cbf61191b9f5302e405f7b0288677 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 2 Dec 2024 16:55:53 +0000 Subject: [PATCH 130/147] Bump bytes from 1.8.0 to 1.9.0 (#17982) --- Cargo.lock | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index b7084165ee21..e74e7226f267 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -61,9 +61,9 @@ checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "bytes" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ac0150caa2ae65ca5bd83f25c7de183dea78d4d366469f148435e2acfbad0da" +checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" [[package]] name = "cfg-if" From f3fd6852ac3dd8196511b97772285e06c301da31 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 2 Dec 2024 10:54:14 +0000 Subject: [PATCH 131/147] Fix release process to not create duplicate releases (#17970) This is to work around https://github.com/softprops/action-gh-release/issues/445 --------- Co-authored-by: Quentin Gliech --- .github/workflows/release-artifacts.yml | 3 ++- changelog.d/17970.bugfix | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17970.bugfix diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index c0aff7914134..42a374fa1908 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -212,7 +212,8 @@ jobs: mv debs*/* debs/ tar -cvJf debs.tar.xz debs - name: Attach to release - uses: softprops/action-gh-release@v2 + # Pinned to work around https://github.com/softprops/action-gh-release/issues/445 + uses: softprops/action-gh-release@v2.0.5 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/changelog.d/17970.bugfix b/changelog.d/17970.bugfix new file mode 100644 index 000000000000..835079de3f3b --- /dev/null +++ b/changelog.d/17970.bugfix @@ -0,0 +1 @@ +Fix release process to not create duplicate releases. From 4daa533e82f345ce87b9495d31781af570ba3ead Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Thu, 24 Oct 2024 14:54:09 -0500 Subject: [PATCH 132/147] Sliding Sync: Fix state leaking on incremental sync --- synapse/handlers/sliding_sync/__init__.py | 63 ++++++- .../storage/databases/main/state_deltas.py | 7 + .../sliding_sync/test_rooms_required_state.py | 160 +++++++++++++++++- 3 files changed, 226 insertions(+), 4 deletions(-) diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 85cfbc6dbf57..089301712690 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -39,6 +39,7 @@ trace, ) from synapse.storage.databases.main.roommember import extract_heroes_from_room_summary +from synapse.storage.databases.main.state_deltas import StateDelta from synapse.storage.databases.main.stream import PaginateFunction from synapse.storage.roommember import ( MemberSummary, @@ -48,6 +49,7 @@ MutableStateMap, PersistedEventPosition, Requester, + RoomStreamToken, SlidingSyncStreamToken, StateMap, StrCollection, @@ -470,6 +472,64 @@ async def get_current_state_at( return state_map + @trace + async def get_current_state_deltas_for_room( + self, + room_id: str, + room_membership_for_user_at_to_token: RoomsForUserType, + from_token: RoomStreamToken, + to_token: RoomStreamToken, + ) -> List[StateDelta]: + """ + Get the state deltas between two tokens taking into account the user's + membership. If the user is LEAVE/BAN, we will only get the state deltas up to + their LEAVE/BAN event (inclusive). + + (> `from_token` and <= `to_token`) + """ + membership = room_membership_for_user_at_to_token.membership + # We don't know how to handle `membership` values other than these. The + # code below would need to be updated. + assert membership in ( + Membership.JOIN, + Membership.INVITE, + Membership.KNOCK, + Membership.LEAVE, + Membership.BAN, + ) + + # People shouldn't see past their leave/ban event + if membership in ( + Membership.LEAVE, + Membership.BAN, + ): + to_bound = ( + room_membership_for_user_at_to_token.event_pos.to_room_stream_token() + ) + # If we are participating in the room, we can get the latest current state in + # the room + elif membership == Membership.JOIN: + to_bound = to_token + # We can only rely on the stripped state included in the invite/knock event + # itself so there will never be any state deltas to send down. + elif membership in (Membership.INVITE, Membership.KNOCK): + return [] + else: + # We don't know how to handle this type of membership yet + # + # FIXME: We should use `assert_never` here but for some reason + # the exhaustive matching doesn't recognize the `Never` here. + # assert_never(membership) + raise AssertionError( + f"Unexpected membership {membership} that we don't know how to handle yet" + ) + + return await self.store.get_current_state_deltas_for_room( + room_id=room_id, + from_token=from_token, + to_token=to_bound, + ) + @trace async def get_room_sync_data( self, @@ -790,8 +850,9 @@ async def get_room_sync_data( # TODO: Limit the number of state events we're about to send down # the room, if its too many we should change this to an # `initial=True`? - deltas = await self.store.get_current_state_deltas_for_room( + deltas = await self.get_current_state_deltas_for_room( room_id=room_id, + room_membership_for_user_at_to_token=room_membership_for_user_at_to_token, from_token=from_bound, to_token=to_token.room_key, ) diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index 117ee89d0a00..b90f667da868 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -243,6 +243,13 @@ async def get_current_state_deltas_for_room( (> `from_token` and <= `to_token`) """ + # We can bail early if the `from_token` is after the `to_token` + if ( + to_token is not None + and from_token is not None + and to_token.is_before_or_eq(from_token) + ): + return [] if ( from_token is not None diff --git a/tests/rest/client/sliding_sync/test_rooms_required_state.py b/tests/rest/client/sliding_sync/test_rooms_required_state.py index ecea5f2d5b32..be13f945381b 100644 --- a/tests/rest/client/sliding_sync/test_rooms_required_state.py +++ b/tests/rest/client/sliding_sync/test_rooms_required_state.py @@ -751,9 +751,10 @@ def test_rooms_required_state_me(self) -> None: self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)]) - def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: + def test_rooms_required_state_leave_ban_initial(self, stop_membership: str) -> None: """ - Test `rooms.required_state` should not return state past a leave/ban event. + Test `rooms.required_state` should not return state past a leave/ban event when + it's the first "initial" time the room is being sent down the connection. """ user1_id = self.register_user("user1", "pass") user1_tok = self.login(user1_id, "pass") @@ -788,6 +789,13 @@ def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: body={"foo": "bar"}, tok=user2_tok, ) + self.helper.send_state( + room_id1, + event_type="org.matrix.bar_state", + state_key="", + body={"bar": "bar"}, + tok=user2_tok, + ) if stop_membership == Membership.LEAVE: # User 1 leaves @@ -796,6 +804,8 @@ def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: # User 1 is banned self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + # Get the state_map before we change the state as this is the final state we + # expect User1 to be able to see state_map = self.get_success( self.storage_controllers.state.get_current_state(room_id1) ) @@ -808,12 +818,36 @@ def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: body={"foo": "qux"}, tok=user2_tok, ) + self.helper.send_state( + room_id1, + event_type="org.matrix.bar_state", + state_key="", + body={"bar": "qux"}, + tok=user2_tok, + ) self.helper.leave(room_id1, user3_id, tok=user3_tok) # Make an incremental Sliding Sync request + # + # Also expand the required state to include the `org.matrix.bar_state` event. + # This is just an extra complication of the test. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, "*"], + ["org.matrix.foo_state", ""], + ["org.matrix.bar_state", ""], + ], + "timeline_limit": 3, + } + } + } response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) - # Only user2 and user3 sent events in the 3 events we see in the `timeline` + # We should only see the state up to the leave/ban event self._assertRequiredStateIncludes( response_body["rooms"][room_id1]["required_state"], { @@ -822,6 +856,126 @@ def test_rooms_required_state_leave_ban(self, stop_membership: str) -> None: state_map[(EventTypes.Member, user2_id)], state_map[(EventTypes.Member, user3_id)], state_map[("org.matrix.foo_state", "")], + state_map[("org.matrix.bar_state", "")], + }, + exact=True, + ) + self.assertIsNone(response_body["rooms"][room_id1].get("invite_state")) + + @parameterized.expand([(Membership.LEAVE,), (Membership.BAN,)]) + def test_rooms_required_state_leave_ban_incremental( + self, stop_membership: str + ) -> None: + """ + Test `rooms.required_state` should not return state past a leave/ban event on + incremental sync. + """ + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + user3_id = self.register_user("user3", "pass") + user3_tok = self.login(user3_id, "pass") + + room_id1 = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id1, user1_id, tok=user1_tok) + self.helper.join(room_id1, user3_id, tok=user3_tok) + + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="", + body={"foo": "bar"}, + tok=user2_tok, + ) + self.helper.send_state( + room_id1, + event_type="org.matrix.bar_state", + state_key="", + body={"bar": "bar"}, + tok=user2_tok, + ) + + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, "*"], + ["org.matrix.foo_state", ""], + ], + "timeline_limit": 3, + } + } + } + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + if stop_membership == Membership.LEAVE: + # User 1 leaves + self.helper.leave(room_id1, user1_id, tok=user1_tok) + elif stop_membership == Membership.BAN: + # User 1 is banned + self.helper.ban(room_id1, src=user2_id, targ=user1_id, tok=user2_tok) + + # Get the state_map before we change the state as this is the final state we + # expect User1 to be able to see + state_map = self.get_success( + self.storage_controllers.state.get_current_state(room_id1) + ) + + # Change the state after user 1 leaves + self.helper.send_state( + room_id1, + event_type="org.matrix.foo_state", + state_key="", + body={"foo": "qux"}, + tok=user2_tok, + ) + self.helper.send_state( + room_id1, + event_type="org.matrix.bar_state", + state_key="", + body={"bar": "qux"}, + tok=user2_tok, + ) + self.helper.leave(room_id1, user3_id, tok=user3_tok) + + # Make an incremental Sliding Sync request + # + # Also expand the required state to include the `org.matrix.bar_state` event. + # This is just an extra complication of the test. + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1]], + "required_state": [ + [EventTypes.Create, ""], + [EventTypes.Member, "*"], + ["org.matrix.foo_state", ""], + ["org.matrix.bar_state", ""], + ], + "timeline_limit": 3, + } + } + } + response_body, _ = self.do_sync(sync_body, since=from_token, tok=user1_tok) + + # User1 should only see the state up to the leave/ban event + self._assertRequiredStateIncludes( + response_body["rooms"][room_id1]["required_state"], + { + # User1 should see their leave/ban membership + state_map[(EventTypes.Member, user1_id)], + state_map[("org.matrix.bar_state", "")], + # The commented out state events were already returned in the initial + # sync so we shouldn't see them again on the incremental sync. And we + # shouldn't see the state events that changed after the leave/ban event. + # + # state_map[(EventTypes.Create, "")], + # state_map[(EventTypes.Member, user2_id)], + # state_map[(EventTypes.Member, user3_id)], + # state_map[("org.matrix.foo_state", "")], }, exact=True, ) From d82e1ed357b7ee21dff83d06cba7a67840cfd464 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 15 Nov 2024 15:53:33 +0000 Subject: [PATCH 133/147] Handle null invite and knock room state --- .../federation/transport/server/federation.py | 3 +++ synapse/handlers/federation.py | 3 +++ synapse/handlers/sliding_sync/__init__.py | 16 +++++++++++----- synapse/push/push_tools.py | 8 ++++++-- synapse/rest/client/sync.py | 12 ++++++++++-- 5 files changed, 33 insertions(+), 9 deletions(-) diff --git a/synapse/federation/transport/server/federation.py b/synapse/federation/transport/server/federation.py index a05e5d5319c7..093ba30d3157 100644 --- a/synapse/federation/transport/server/federation.py +++ b/synapse/federation/transport/server/federation.py @@ -509,6 +509,9 @@ async def on_PUT( event = content["event"] invite_room_state = content.get("invite_room_state", []) + if not isinstance(invite_room_state, list): + invite_room_state = [] + # Synapse expects invite_room_state to be in unsigned, as it is in v1 # API diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 2b7aad5b5816..17dd4af13ed8 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -880,6 +880,9 @@ async def do_knock( if stripped_room_state is None: raise KeyError("Missing 'knock_room_state' field in send_knock response") + if not isinstance(stripped_room_state, list): + raise TypeError("'knock_room_state' has wrong type") + event.unsigned["knock_room_state"] = stripped_room_state context = EventContext.for_outlier(self._storage_controllers) diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 089301712690..0175da1a1398 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -815,13 +815,19 @@ async def get_room_sync_data( stripped_state = [] if invite_or_knock_event.membership == Membership.INVITE: - stripped_state.extend( - invite_or_knock_event.unsigned.get("invite_room_state", []) + invite_state = invite_or_knock_event.unsigned.get( + "invite_room_state", [] ) + if not isinstance(invite_state, list): + invite_state = [] + + stripped_state.extend(invite_state) elif invite_or_knock_event.membership == Membership.KNOCK: - stripped_state.extend( - invite_or_knock_event.unsigned.get("knock_room_state", []) - ) + knock_state = invite_or_knock_event.unsigned.get("knock_room_state", []) + if not isinstance(knock_state, list): + knock_state = [] + + stripped_state.extend(knock_state) stripped_state.append(strip_event(invite_or_knock_event)) diff --git a/synapse/push/push_tools.py b/synapse/push/push_tools.py index 1ef881f70247..3f3e4a92343d 100644 --- a/synapse/push/push_tools.py +++ b/synapse/push/push_tools.py @@ -74,9 +74,13 @@ async def get_context_for_event( room_state = [] if ev.content.get("membership") == Membership.INVITE: - room_state = ev.unsigned.get("invite_room_state", []) + invite_room_state = ev.unsigned.get("invite_room_state", []) + if isinstance(invite_room_state, list): + room_state = invite_room_state elif ev.content.get("membership") == Membership.KNOCK: - room_state = ev.unsigned.get("knock_room_state", []) + knock_room_state = ev.unsigned.get("knock_room_state", []) + if isinstance(knock_room_state, list): + room_state = knock_room_state # Ideally we'd reuse the logic in `calculate_room_name`, but that gets # complicated to handle partial events vs pulling events from the DB. diff --git a/synapse/rest/client/sync.py b/synapse/rest/client/sync.py index 5c62a74f41c4..f4ef84a038dd 100644 --- a/synapse/rest/client/sync.py +++ b/synapse/rest/client/sync.py @@ -436,7 +436,12 @@ async def encode_invited( ) unsigned = dict(invite.get("unsigned", {})) invite["unsigned"] = unsigned - invited_state = list(unsigned.pop("invite_room_state", [])) + + invited_state = unsigned.pop("invite_room_state", []) + if not isinstance(invited_state, list): + invited_state = [] + + invited_state = list(invited_state) invited_state.append(invite) invited[room.room_id] = {"invite_state": {"events": invited_state}} @@ -476,7 +481,10 @@ async def encode_knocked( # Extract the stripped room state from the unsigned dict # This is for clients to get a little bit of information about # the room they've knocked on, without revealing any sensitive information - knocked_state = list(unsigned.pop("knock_room_state", [])) + knocked_state = unsigned.pop("knock_room_state", []) + if not isinstance(knocked_state, list): + knocked_state = [] + knocked_state = list(knocked_state) # Append the actual knock membership event itself as well. This provides # the client with: From 4b7154c58501b4bf5e1c2d6c11ebef96529f2fdf Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 5 Nov 2024 15:05:22 -0700 Subject: [PATCH 134/147] Don't allow unsupported content-type Co-authored-by: Eric Eastwood --- synapse/http/site.py | 36 ++++++++++++++++++++++++++++ tests/http/test_site.py | 53 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+) diff --git a/synapse/http/site.py b/synapse/http/site.py index 1cd90cb9b72b..e83a4447b2fc 100644 --- a/synapse/http/site.py +++ b/synapse/http/site.py @@ -21,6 +21,7 @@ import contextlib import logging import time +from http import HTTPStatus from typing import TYPE_CHECKING, Any, Generator, Optional, Tuple, Union import attr @@ -139,6 +140,41 @@ def __repr__(self) -> str: self.synapse_site.site_tag, ) + # Twisted machinery: this method is called by the Channel once the full request has + # been received, to dispatch the request to a resource. + # + # We're patching Twisted to bail/abort early when we see someone trying to upload + # `multipart/form-data` so we can avoid Twisted parsing the entire request body into + # in-memory (specific problem of this specific `Content-Type`). This protects us + # from an attacker uploading something bigger than the available RAM and crashing + # the server with a `MemoryError`, or carefully block just enough resources to cause + # all other requests to fail. + # + # FIXME: This can be removed once we Twisted releases a fix and we update to a + # version that is patched + def requestReceived(self, command: bytes, path: bytes, version: bytes) -> None: + if command == b"POST": + ctype = self.requestHeaders.getRawHeaders(b"content-type") + if ctype and b"multipart/form-data" in ctype[0]: + self.method, self.uri = command, path + self.clientproto = version + self.code = HTTPStatus.UNSUPPORTED_MEDIA_TYPE.value + self.code_message = bytes( + HTTPStatus.UNSUPPORTED_MEDIA_TYPE.phrase, "ascii" + ) + self.responseHeaders.setRawHeaders(b"content-length", [b"0"]) + + logger.warning( + "Aborting connection from %s because `content-type: multipart/form-data` is unsupported: %s %s", + self.client, + command, + path, + ) + self.write(b"") + self.loseConnection() + return + return super().requestReceived(command, path, version) + def handleContentChunk(self, data: bytes) -> None: # we should have a `content` by now. assert self.content, "handleContentChunk() called before gotLength()" diff --git a/tests/http/test_site.py b/tests/http/test_site.py index bfa26a329c5e..fc620c705a5a 100644 --- a/tests/http/test_site.py +++ b/tests/http/test_site.py @@ -90,3 +90,56 @@ def test_large_request(self) -> None: # default max upload size is 50M, so it should drop on the next buffer after # that. self.assertEqual(sent, 50 * 1024 * 1024 + 1024) + + def test_content_type_multipart(self) -> None: + """HTTP POST requests with `content-type: multipart/form-data` should be rejected""" + self.hs.start_listening() + + # find the HTTP server which is configured to listen on port 0 + (port, factory, _backlog, interface) = self.reactor.tcpServers[0] + self.assertEqual(interface, "::") + self.assertEqual(port, 0) + + # as a control case, first send a regular request. + + # complete the connection and wire it up to a fake transport + client_address = IPv6Address("TCP", "::1", 2345) + protocol = factory.buildProtocol(client_address) + transport = StringTransport() + protocol.makeConnection(transport) + + protocol.dataReceived( + b"POST / HTTP/1.1\r\n" + b"Connection: close\r\n" + b"Transfer-Encoding: chunked\r\n" + b"\r\n" + b"0\r\n" + b"\r\n" + ) + + while not transport.disconnecting: + self.reactor.advance(1) + + # we should get a 404 + self.assertRegex(transport.value().decode(), r"^HTTP/1\.1 404 ") + + # now send request with content-type header + protocol = factory.buildProtocol(client_address) + transport = StringTransport() + protocol.makeConnection(transport) + + protocol.dataReceived( + b"POST / HTTP/1.1\r\n" + b"Connection: close\r\n" + b"Transfer-Encoding: chunked\r\n" + b"Content-Type: multipart/form-data\r\n" + b"\r\n" + b"0\r\n" + b"\r\n" + ) + + while not transport.disconnecting: + self.reactor.advance(1) + + # we should get a 415 + self.assertRegex(transport.value().decode(), r"^HTTP/1\.1 415 ") From b64a4e5fbbbf119b6c65aedf0d999b4237d55503 Mon Sep 17 00:00:00 2001 From: Olivier 'reivilibre Date: Fri, 22 Nov 2024 15:30:29 +0000 Subject: [PATCH 135/147] Restrict which image formats we will decode in order to generate thumbnails --- synapse/media/thumbnailer.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/synapse/media/thumbnailer.py b/synapse/media/thumbnailer.py index 3845067835a6..d6b8ce4a09fa 100644 --- a/synapse/media/thumbnailer.py +++ b/synapse/media/thumbnailer.py @@ -67,6 +67,11 @@ class ThumbnailError(Exception): class Thumbnailer: FORMATS = {"image/jpeg": "JPEG", "image/png": "PNG"} + # Which image formats we allow Pillow to open. + # This should intentionally be kept restrictive, because the decoder of any + # format in this list becomes part of our trusted computing base. + PILLOW_FORMATS = ("jpeg", "png", "webp", "gif") + @staticmethod def set_limits(max_image_pixels: int) -> None: Image.MAX_IMAGE_PIXELS = max_image_pixels @@ -76,7 +81,7 @@ def __init__(self, input_path: str): self._closed = False try: - self.image = Image.open(input_path) + self.image = Image.open(input_path, formats=self.PILLOW_FORMATS) except OSError as e: # If an error occurs opening the image, a thumbnail won't be able to # be generated. From fe3d88b833f76742874642c119b14b788341c905 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 3 Dec 2024 11:15:00 +0100 Subject: [PATCH 136/147] 1.120.1 --- CHANGES.md | 42 ++++++++++++++++++++++++++++++++++++++++ changelog.d/17970.bugfix | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 49 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/17970.bugfix diff --git a/CHANGES.md b/CHANGES.md index 0caac3f89e33..bac2b4c21041 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,45 @@ +# Synapse 1.120.1 (2024-12-03) + +This patch release fixes multiple security vulnerabilities, some affecting all prior versions of Synapse. Server administrators are encouraged to update Synapse as soon as possible. We are not aware of these vulnerabilities being exploited in the wild. + +Administrators who are unable to update Synapse may use the workarounds described in the linked GitHub Security Advisory below. + +### Security advisory + +The following issues are fixed in 1.120.1. + +- [GHSA-rfq8-j7rh-8hf2](https://github.com/element-hq/synapse/security/advisories/GHSA-rfq8-j7rh-8hf2) / [CVE-2024-52805](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-52805): **Unsupported content types can lead to memory exhaustion** + + Synapse instances which have a high `max_upload_size` and which don't have a reverse proxy in front of them that would otherwise limit upload size are affected. + + Fixed by [4b7154c58501b4bf5e1c2d6c11ebef96529f2fdf](https://github.com/element-hq/synapse/commit/4b7154c58501b4bf5e1c2d6c11ebef96529f2fdf). + +- [GHSA-f3r3-h2mq-hx2h](https://github.com/element-hq/synapse/security/advisories/GHSA-f3r3-h2mq-hx2h) / [CVE-2024-52815](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-52815): **Malicious invites via federation can break a user's sync** + + Fixed by [d82e1ed357b7ee21dff83d06cba7a67840cfd464](https://github.com/element-hq/synapse/commit/d82e1ed357b7ee21dff83d06cba7a67840cfd464). + +- [GHSA-vp6v-whfm-rv3g](https://github.com/element-hq/synapse/security/advisories/GHSA-vp6v-whfm-rv3g) / [CVE-2024-53863](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-53863): **Synapse can be forced to thumbnail unexpected file formats, invoking potentially untrustworthy decoders** + + Synapse instances can disable dynamic thumbnailing by setting `dynamic_thumbnails` to `false` in the configuration file. + + Fixed by [b64a4e5fbbbf119b6c65aedf0d999b4237d55503](https://github.com/element-hq/synapse/commit/b64a4e5fbbbf119b6c65aedf0d999b4237d55503). + +- [GHSA-56w4-5538-8v8h](https://github.com/element-hq/synapse/security/advisories/GHSA-56w4-5538-8v8h) / [CVE-2024-53867](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2024-53867): **The Sliding Sync feature on Synapse versions between 1.113.0rc1 and 1.120.0 can leak partial room state changes to users no longer in a room** + + Non-state events, like messages, are unaffected. + + Synapse instances can disable the Sliding Sync feature by setting `experimental_features.msc3575_enabled` to `false` in the configuration file. + + Fixed by [4daa533e82f345ce87b9495d31781af570ba3ead](https://github.com/element-hq/synapse/commit/4daa533e82f345ce87b9495d31781af570ba3ead). + +See the advisories for more details. If you have any questions, email [security at element.io](mailto:security@element.io). + +### Bugfixes + +- Fix release process to not create duplicate releases. ([\#17970](https://github.com/element-hq/synapse/issues/17970)) + + + # Synapse 1.120.0 (2024-11-26) ### Bugfixes diff --git a/changelog.d/17970.bugfix b/changelog.d/17970.bugfix deleted file mode 100644 index 835079de3f3b..000000000000 --- a/changelog.d/17970.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix release process to not create duplicate releases. diff --git a/debian/changelog b/debian/changelog index bd4466d7aaae..936447b8b643 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.120.1) stable; urgency=medium + + * New synapse release 1.120.1. + + -- Synapse Packaging team Tue, 03 Dec 2024 09:07:57 +0000 + matrix-synapse-py3 (1.120.0) stable; urgency=medium * New synapse release 1.120.0. diff --git a/pyproject.toml b/pyproject.toml index 5fd1d7c19837..f838d4d7bed6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.120.0" +version = "1.120.1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From b257c7ab199f8c800254764d2ac5d4a9708ceaa2 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Tue, 3 Dec 2024 06:54:25 -0600 Subject: [PATCH 137/147] Be able to test `/login/sso/redirect` in Complement (#17986) Be able to test `/login/sso/redirect` in Complement Spawning from https://github.com/element-hq/sbg/pull/421#discussion_r1854926218 where we have a proxy that intercepts responses to `/_matrix/client/v3/login/sso/redirect(/{idpId})` in order to upgrade them to use OAuth 2.0 Pushed Authorization Requests (PAR). We have some Complement tests in that codebase that go over this flow and these changes are required [in order for the URL's to line up](https://github.com/element-hq/synapse/blob/d648c8ce3f4cbf61191b9f5302e405f7b0288677/synapse/rest/client/login.py#L652-L673). --- changelog.d/17986.misc | 1 + docker/complement/conf/workers-shared-extra.yaml.j2 | 1 + docker/conf-workers/nginx.conf.j2 | 2 +- scripts-dev/complement.sh | 4 ++++ synapse/config/logger.py | 1 + 5 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17986.misc diff --git a/changelog.d/17986.misc b/changelog.d/17986.misc new file mode 100644 index 000000000000..c062f3ecdf98 --- /dev/null +++ b/changelog.d/17986.misc @@ -0,0 +1 @@ +Fix Docker and Complement config to be able to use `public_baseurl`. diff --git a/docker/complement/conf/workers-shared-extra.yaml.j2 b/docker/complement/conf/workers-shared-extra.yaml.j2 index 9a74c617bc4d..c5228af72d06 100644 --- a/docker/complement/conf/workers-shared-extra.yaml.j2 +++ b/docker/complement/conf/workers-shared-extra.yaml.j2 @@ -7,6 +7,7 @@ #} ## Server ## +public_baseurl: http://127.0.0.1:8008/ report_stats: False trusted_key_servers: [] enable_registration: true diff --git a/docker/conf-workers/nginx.conf.j2 b/docker/conf-workers/nginx.conf.j2 index d1e02af72328..c3f9b584d299 100644 --- a/docker/conf-workers/nginx.conf.j2 +++ b/docker/conf-workers/nginx.conf.j2 @@ -42,6 +42,6 @@ server { {% endif %} proxy_set_header X-Forwarded-For $remote_addr; proxy_set_header X-Forwarded-Proto $scheme; - proxy_set_header Host $host; + proxy_set_header Host $host:$server_port; } } diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index b6dcb96e2c90..6be9177f1105 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -195,6 +195,10 @@ if [ -z "$skip_docker_build" ]; then # Build the unified Complement image (from the worker Synapse image we just built). echo_if_github "::group::Build Docker image: complement/Dockerfile" $CONTAINER_RUNTIME build -t complement-synapse \ + `# This is the tag we end up pushing to the registry (see` \ + `# .github/workflows/push_complement_image.yml) so let's just label it now` \ + `# so people can reference it by the same name locally.` \ + -t ghcr.io/element-hq/synapse/complement-synapse \ -f "docker/complement/Dockerfile" "docker/complement" echo_if_github "::endgroup::" diff --git a/synapse/config/logger.py b/synapse/config/logger.py index cfc1a57107f6..e5aca36b75e0 100644 --- a/synapse/config/logger.py +++ b/synapse/config/logger.py @@ -360,5 +360,6 @@ def setup_logging( "Licensed under the AGPL 3.0 license. Website: https://github.com/element-hq/synapse" ) logging.info("Server hostname: %s", config.server.server_name) + logging.info("Public Base URL: %s", config.server.public_baseurl) logging.info("Instance name: %s", hs.get_instance_name()) logging.info("Twisted reactor: %s", type(reactor).__name__) From 650492ed4d95ca8bf9a41eb0456c7e82b4616a22 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 3 Dec 2024 16:39:41 +0100 Subject: [PATCH 138/147] Stop building wheels for macOS --- .github/workflows/release-artifacts.yml | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 42a374fa1908..0c0e023c7531 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -5,7 +5,7 @@ name: Build release artifacts on: # we build on PRs and develop to (hopefully) get early warning # of things breaking (but only build one set of debs). PRs skip - # building wheels on macOS & ARM. + # building wheels on ARM. pull_request: push: branches: ["develop", "release-*"] @@ -111,7 +111,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-22.04, macos-13] + os: [ubuntu-22.04] arch: [x86_64, aarch64] # is_pr is a flag used to exclude certain jobs from the matrix on PRs. # It is not read by the rest of the workflow. @@ -119,12 +119,6 @@ jobs: - ${{ startsWith(github.ref, 'refs/pull/') }} exclude: - # Don't build macos wheels on PR CI. - - is_pr: true - os: "macos-13" - # Don't build aarch64 wheels on mac. - - os: "macos-13" - arch: aarch64 # Don't build aarch64 wheels on PR CI. - is_pr: true arch: aarch64 From 6f689d452c5632df558e76bc5a24111e555a3c8a Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Tue, 3 Dec 2024 16:58:40 +0100 Subject: [PATCH 139/147] 1.120.2 --- CHANGES.md | 8 ++++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 15 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index bac2b4c21041..535c41f6a66b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,11 @@ +# Synapse 1.120.2 (2024-12-03) + +This version has building of wheels for macOS disabled. +It is functionally identical to 1.120.1, which contains multiple security fixes. +If you are already using 1.120.1, there is no need to upgrade to this version. + + + # Synapse 1.120.1 (2024-12-03) This patch release fixes multiple security vulnerabilities, some affecting all prior versions of Synapse. Server administrators are encouraged to update Synapse as soon as possible. We are not aware of these vulnerabilities being exploited in the wild. diff --git a/debian/changelog b/debian/changelog index 936447b8b643..3aa74d1d24e7 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.120.2) stable; urgency=medium + + * New synapse release 1.120.2. + + -- Synapse Packaging team Tue, 03 Dec 2024 15:43:37 +0000 + matrix-synapse-py3 (1.120.1) stable; urgency=medium * New synapse release 1.120.1. diff --git a/pyproject.toml b/pyproject.toml index f838d4d7bed6..a403952d8930 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.120.1" +version = "1.120.2" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From abf44ad3244aa92273dcf79c53babd52363123f6 Mon Sep 17 00:00:00 2001 From: manuroe Date: Tue, 3 Dec 2024 23:58:43 +0100 Subject: [PATCH 140/147] MSC4076: Add disable_badge_count to pusher configuration (#17975) This PR implements [MSC4076: Let E2EE clients calculate app badge counts themselves (disable_badge_count)](https://github.com/matrix-org/matrix-spec-proposals/pull/4076). --- changelog.d/17975.feature | 1 + synapse/config/experimental.py | 3 ++ synapse/push/httppusher.py | 16 +++++-- tests/push/test_http.py | 84 +++++++++++++++++++++++++++++++++- 4 files changed, 98 insertions(+), 6 deletions(-) create mode 100644 changelog.d/17975.feature diff --git a/changelog.d/17975.feature b/changelog.d/17975.feature new file mode 100644 index 000000000000..48f41bddad03 --- /dev/null +++ b/changelog.d/17975.feature @@ -0,0 +1 @@ +[MSC4076](https://github.com/matrix-org/matrix-spec-proposals/pull/4076): Add `disable_badge_count` to pusher configuration. diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 3411179a2a31..57ac27697f4b 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -448,3 +448,6 @@ def read_config(self, config: JsonDict, **kwargs: Any) -> None: # MSC4222: Adding `state_after` to sync v2 self.msc4222_enabled: bool = experimental.get("msc4222_enabled", False) + + # MSC4076: Add `disable_badge_count`` to pusher configuration + self.msc4076_enabled: bool = experimental.get("msc4076_enabled", False) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index dd9b64d6effb..69790ecab545 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -127,6 +127,11 @@ def __init__(self, hs: "HomeServer", pusher_config: PusherConfig): if self.data is None: raise PusherConfigException("'data' key can not be null for HTTP pusher") + # Check if badge counts should be disabled for this push gateway + self.disable_badge_count = self.hs.config.experimental.msc4076_enabled and bool( + self.data.get("org.matrix.msc4076.disable_badge_count", False) + ) + self.name = "%s/%s/%s" % ( pusher_config.user_name, pusher_config.app_id, @@ -461,9 +466,10 @@ async def dispatch_push_event( content: JsonDict = { "event_id": event.event_id, "room_id": event.room_id, - "counts": {"unread": badge}, "prio": priority, } + if not self.disable_badge_count: + content["counts"] = {"unread": badge} # event_id_only doesn't include the tweaks, so override them. tweaks = {} else: @@ -478,11 +484,11 @@ async def dispatch_push_event( "type": event.type, "sender": event.user_id, "prio": priority, - "counts": { - "unread": badge, - # 'missed_calls': 2 - }, } + if not self.disable_badge_count: + content["counts"] = { + "unread": badge, + } if event.type == "m.room.member" and event.is_state(): content["membership"] = event.content["membership"] content["user_is_target"] = event.state_key == self.user_id diff --git a/tests/push/test_http.py b/tests/push/test_http.py index bcca472617e3..5c235bbe5363 100644 --- a/tests/push/test_http.py +++ b/tests/push/test_http.py @@ -17,9 +17,11 @@ # [This file includes modifications made by New Vector Limited] # # -from typing import Any, List, Tuple +from typing import Any, Dict, List, Tuple from unittest.mock import Mock +from parameterized import parameterized + from twisted.internet.defer import Deferred from twisted.test.proto_helpers import MemoryReactor @@ -1085,3 +1087,83 @@ def test_jitter(self) -> None: self.pump() self.assertEqual(len(self.push_attempts), 11) + + @parameterized.expand( + [ + # Badge count disabled + (True, True), + (True, False), + # Badge count enabled + (False, True), + (False, False), + ] + ) + @override_config({"experimental_features": {"msc4076_enabled": True}}) + def test_msc4076_badge_count( + self, disable_badge_count: bool, event_id_only: bool + ) -> None: + # Register the user who gets notified + user_id = self.register_user("user", "pass") + access_token = self.login("user", "pass") + + # Register the user who sends the message + other_user_id = self.register_user("otheruser", "pass") + other_access_token = self.login("otheruser", "pass") + + # Register the pusher with disable_badge_count set to True + user_tuple = self.get_success( + self.hs.get_datastores().main.get_user_by_access_token(access_token) + ) + assert user_tuple is not None + device_id = user_tuple.device_id + + # Set the push data dict based on test input parameters + push_data: Dict[str, Any] = { + "url": "http://example.com/_matrix/push/v1/notify", + } + if disable_badge_count: + push_data["org.matrix.msc4076.disable_badge_count"] = True + if event_id_only: + push_data["format"] = "event_id_only" + + self.get_success( + self.hs.get_pusherpool().add_or_update_pusher( + user_id=user_id, + device_id=device_id, + kind="http", + app_id="m.http", + app_display_name="HTTP Push Notifications", + device_display_name="pushy push", + pushkey="a@example.com", + lang=None, + data=push_data, + ) + ) + + # Create a room + room = self.helper.create_room_as(user_id, tok=access_token) + + # The other user joins + self.helper.join(room=room, user=other_user_id, tok=other_access_token) + + # The other user sends a message + self.helper.send(room, body="Hi!", tok=other_access_token) + + # Advance time a bit, so the pusher will register something has happened + self.pump() + + # One push was attempted to be sent + self.assertEqual(len(self.push_attempts), 1) + self.assertEqual( + self.push_attempts[0][1], "http://example.com/_matrix/push/v1/notify" + ) + + if disable_badge_count: + # Verify that the notification DOESN'T contain a counts field + self.assertNotIn("counts", self.push_attempts[0][2]["notification"]) + else: + # Ensure that the notification DOES contain a counts field + self.assertIn("counts", self.push_attempts[0][2]["notification"]) + self.assertEqual( + self.push_attempts[0][2]["notification"]["counts"]["unread"], 1 + ) From 23b626f2e68e985a3218abd0fc7d03b53bbcaf89 Mon Sep 17 00:00:00 2001 From: Quentin Gliech Date: Wed, 4 Dec 2024 12:04:49 +0100 Subject: [PATCH 141/147] Support for MSC4190: device management for application services (#17705) This is an implementation of MSC4190, which allows appservices to manage their user's devices without /login & /logout. --------- Co-authored-by: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> --- changelog.d/17705.feature | 1 + synapse/appservice/__init__.py | 2 + synapse/config/appservice.py | 13 ++ synapse/handlers/device.py | 34 +++++ synapse/handlers/register.py | 6 +- synapse/rest/client/devices.py | 62 +++++--- synapse/rest/client/register.py | 7 +- tests/handlers/test_appservice.py | 15 +- tests/handlers/test_oauth_delegation.py | 31 +++- tests/rest/client/test_devices.py | 181 ++++++++++++++++++++++++ tests/rest/client/test_register.py | 28 ++++ tests/unittest.py | 4 +- 12 files changed, 351 insertions(+), 33 deletions(-) create mode 100644 changelog.d/17705.feature diff --git a/changelog.d/17705.feature b/changelog.d/17705.feature new file mode 100644 index 000000000000..e2cd7bca4f41 --- /dev/null +++ b/changelog.d/17705.feature @@ -0,0 +1 @@ +Support for [MSC4190](https://github.com/matrix-org/matrix-spec-proposals/pull/4190): device management for Application Services. diff --git a/synapse/appservice/__init__.py b/synapse/appservice/__init__.py index a96cdbf1e7e9..6ee5240c4ee0 100644 --- a/synapse/appservice/__init__.py +++ b/synapse/appservice/__init__.py @@ -87,6 +87,7 @@ def __init__( ip_range_whitelist: Optional[IPSet] = None, supports_ephemeral: bool = False, msc3202_transaction_extensions: bool = False, + msc4190_device_management: bool = False, ): self.token = token self.url = ( @@ -100,6 +101,7 @@ def __init__( self.ip_range_whitelist = ip_range_whitelist self.supports_ephemeral = supports_ephemeral self.msc3202_transaction_extensions = msc3202_transaction_extensions + self.msc4190_device_management = msc4190_device_management if "|" in self.id: raise Exception("application service ID cannot contain '|' character") diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index 6ff00e1ff8b3..dda6bcd1b790 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -183,6 +183,18 @@ def _load_appservice( "The `org.matrix.msc3202` option should be true or false if specified." ) + # Opt-in flag for the MSC4190 behaviours. + # When enabled, the following C-S API endpoints change for appservices: + # - POST /register does not return an access token + # - PUT /devices/{device_id} creates a new device if one does not exist + # - DELETE /devices/{device_id} no longer requires UIA + # - POST /delete_devices/{device_id} no longer requires UIA + msc4190_enabled = as_info.get("io.element.msc4190", False) + if not isinstance(msc4190_enabled, bool): + raise ValueError( + "The `io.element.msc4190` option should be true or false if specified." + ) + return ApplicationService( token=as_info["as_token"], url=as_info["url"], @@ -195,4 +207,5 @@ def _load_appservice( ip_range_whitelist=ip_range_whitelist, supports_ephemeral=supports_ephemeral, msc3202_transaction_extensions=msc3202_transaction_extensions, + msc4190_device_management=msc4190_enabled, ) diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index d88660e273bd..d9622080b4f4 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -729,6 +729,40 @@ async def delete_devices(self, user_id: str, device_ids: List[str]) -> None: await self.notify_device_update(user_id, device_ids) + async def upsert_device( + self, user_id: str, device_id: str, display_name: Optional[str] = None + ) -> bool: + """Create or update a device + + Args: + user_id: The user to update devices of. + device_id: The device to update. + display_name: The new display name for this device. + + Returns: + True if the device was created, False if it was updated. + + """ + + # Reject a new displayname which is too long. + self._check_device_name_length(display_name) + + created = await self.store.store_device( + user_id, + device_id, + initial_device_display_name=display_name, + ) + + if not created: + await self.store.update_device( + user_id, + device_id, + new_display_name=display_name, + ) + + await self.notify_device_update(user_id, [device_id]) + return created + async def update_device(self, user_id: str, device_id: str, content: dict) -> None: """Update the given device diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index c200e2956956..c49db83ce7a7 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -630,7 +630,9 @@ async def post_consent_actions(self, user_id: str) -> None: """ await self._auto_join_rooms(user_id) - async def appservice_register(self, user_localpart: str, as_token: str) -> str: + async def appservice_register( + self, user_localpart: str, as_token: str + ) -> Tuple[str, ApplicationService]: user = UserID(user_localpart, self.hs.hostname) user_id = user.to_string() service = self.store.get_app_service_by_token(as_token) @@ -653,7 +655,7 @@ async def appservice_register(self, user_localpart: str, as_token: str) -> str: appservice_id=service_id, create_profile_with_displayname=user.localpart, ) - return user_id + return (user_id, service) def check_user_id_not_appservice_exclusive( self, user_id: str, allowed_appservice: Optional[ApplicationService] = None diff --git a/synapse/rest/client/devices.py b/synapse/rest/client/devices.py index 6a45a5d13079..4607b2349459 100644 --- a/synapse/rest/client/devices.py +++ b/synapse/rest/client/devices.py @@ -114,15 +114,19 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: else: raise e - await self.auth_handler.validate_user_via_ui_auth( - requester, - request, - body.dict(exclude_unset=True), - "remove device(s) from your account", - # Users might call this multiple times in a row while cleaning up - # devices, allow a single UI auth session to be re-used. - can_skip_ui_auth=True, - ) + if requester.app_service and requester.app_service.msc4190_device_management: + # MSC4190 can skip UIA for this endpoint + pass + else: + await self.auth_handler.validate_user_via_ui_auth( + requester, + request, + body.dict(exclude_unset=True), + "remove device(s) from your account", + # Users might call this multiple times in a row while cleaning up + # devices, allow a single UI auth session to be re-used. + can_skip_ui_auth=True, + ) await self.device_handler.delete_devices( requester.user.to_string(), body.devices @@ -175,9 +179,6 @@ class DeleteBody(RequestBodyModel): async def on_DELETE( self, request: SynapseRequest, device_id: str ) -> Tuple[int, JsonDict]: - if self._msc3861_oauth_delegation_enabled: - raise UnrecognizedRequestError(code=404) - requester = await self.auth.get_user_by_req(request) try: @@ -192,15 +193,24 @@ async def on_DELETE( else: raise - await self.auth_handler.validate_user_via_ui_auth( - requester, - request, - body.dict(exclude_unset=True), - "remove a device from your account", - # Users might call this multiple times in a row while cleaning up - # devices, allow a single UI auth session to be re-used. - can_skip_ui_auth=True, - ) + if requester.app_service and requester.app_service.msc4190_device_management: + # MSC4190 allows appservices to delete devices through this endpoint without UIA + # It's also allowed with MSC3861 enabled + pass + + else: + if self._msc3861_oauth_delegation_enabled: + raise UnrecognizedRequestError(code=404) + + await self.auth_handler.validate_user_via_ui_auth( + requester, + request, + body.dict(exclude_unset=True), + "remove a device from your account", + # Users might call this multiple times in a row while cleaning up + # devices, allow a single UI auth session to be re-used. + can_skip_ui_auth=True, + ) await self.device_handler.delete_devices( requester.user.to_string(), [device_id] @@ -216,6 +226,16 @@ async def on_PUT( requester = await self.auth.get_user_by_req(request, allow_guest=True) body = parse_and_validate_json_object_from_request(request, self.PutBody) + + # MSC4190 allows appservices to create devices through this endpoint + if requester.app_service and requester.app_service.msc4190_device_management: + created = await self.device_handler.upsert_device( + user_id=requester.user.to_string(), + device_id=device_id, + display_name=body.display_name, + ) + return 201 if created else 200, {} + await self.device_handler.update_device( requester.user.to_string(), device_id, body.dict() ) diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 61e143684185..ad76f188ab4d 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -771,9 +771,12 @@ async def _do_appservice_registration( body: JsonDict, should_issue_refresh_token: bool = False, ) -> JsonDict: - user_id = await self.registration_handler.appservice_register( + user_id, appservice = await self.registration_handler.appservice_register( username, as_token ) + if appservice.msc4190_device_management: + body["inhibit_login"] = True + return await self._create_registration_details( user_id, body, @@ -937,7 +940,7 @@ async def on_POST(self, request: SynapseRequest) -> Tuple[int, JsonDict]: as_token = self.auth.get_access_token_from_request(request) - user_id = await self.registration_handler.appservice_register( + user_id, _ = await self.registration_handler.appservice_register( desired_username, as_token ) return 200, {"user_id": user_id} diff --git a/tests/handlers/test_appservice.py b/tests/handlers/test_appservice.py index 1eec0d43b7e5..1db630e9e47b 100644 --- a/tests/handlers/test_appservice.py +++ b/tests/handlers/test_appservice.py @@ -1165,12 +1165,23 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.hs.get_datastores().main.services_cache = [self._service] # Register some appservice users - self._sender_user, self._sender_device = self.register_appservice_user( + user_id, device_id = self.register_appservice_user( "as.sender", self._service_token ) - self._namespaced_user, self._namespaced_device = self.register_appservice_user( + # With MSC4190 enabled, there will not be a device created + # during AS registration. However MSC4190 is not enabled + # in this test. It may become the default behaviour in the + # future, in which case this test will need to be updated. + assert device_id is not None + self._sender_user = user_id + self._sender_device = device_id + + user_id, device_id = self.register_appservice_user( "_as_user1", self._service_token ) + assert device_id is not None + self._namespaced_user = user_id + self._namespaced_device = device_id # Register a real user as well. self._real_user = self.register_user("real.user", "meow") diff --git a/tests/handlers/test_oauth_delegation.py b/tests/handlers/test_oauth_delegation.py index 5b5dc713d17b..5f73469daa4c 100644 --- a/tests/handlers/test_oauth_delegation.py +++ b/tests/handlers/test_oauth_delegation.py @@ -560,9 +560,15 @@ def expect_unauthorized( self.assertEqual(channel.code, 401, channel.json_body) def expect_unrecognized( - self, method: str, path: str, content: Union[bytes, str, JsonDict] = "" + self, + method: str, + path: str, + content: Union[bytes, str, JsonDict] = "", + auth: bool = False, ) -> None: - channel = self.make_request(method, path, content) + channel = self.make_request( + method, path, content, access_token="token" if auth else None + ) self.assertEqual(channel.code, 404, channel.json_body) self.assertEqual( @@ -648,8 +654,25 @@ def test_session_management_endpoints_removed(self) -> None: def test_device_management_endpoints_removed(self) -> None: """Test that device management endpoints that were removed in MSC2964 are no longer available.""" - self.expect_unrecognized("POST", "/_matrix/client/v3/delete_devices") - self.expect_unrecognized("DELETE", "/_matrix/client/v3/devices/{DEVICE}") + + # Because we still support those endpoints with ASes, it checks the + # access token before returning 404 + self.http_client.request = AsyncMock( + return_value=FakeResponse.json( + code=200, + payload={ + "active": True, + "sub": SUBJECT, + "scope": " ".join([MATRIX_USER_SCOPE, MATRIX_DEVICE_SCOPE]), + "username": USERNAME, + }, + ) + ) + + self.expect_unrecognized("POST", "/_matrix/client/v3/delete_devices", auth=True) + self.expect_unrecognized( + "DELETE", "/_matrix/client/v3/devices/{DEVICE}", auth=True + ) def test_openid_endpoints_removed(self) -> None: """Test that OpenID id_token endpoints that were removed in MSC2964 are no longer available.""" diff --git a/tests/rest/client/test_devices.py b/tests/rest/client/test_devices.py index a3ed12a38fee..dd3abdebac0d 100644 --- a/tests/rest/client/test_devices.py +++ b/tests/rest/client/test_devices.py @@ -24,6 +24,7 @@ from twisted.test.proto_helpers import MemoryReactor from synapse.api.errors import NotFoundError +from synapse.appservice import ApplicationService from synapse.rest import admin, devices, sync from synapse.rest.client import keys, login, register from synapse.server import HomeServer @@ -455,3 +456,183 @@ def test_msc3814_dehydrated_device_delete_works(self) -> None: token, ) self.assertEqual(channel.json_body["device_keys"], {"@mikey:test": {}}) + + +class MSC4190AppserviceDevicesTestCase(unittest.HomeserverTestCase): + servlets = [ + register.register_servlets, + devices.register_servlets, + ] + + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: + self.hs = self.setup_test_homeserver() + + # This application service uses the new MSC4190 behaviours + self.msc4190_service = ApplicationService( + id="msc4190", + token="some_token", + hs_token="some_token", + sender="@as:example.com", + namespaces={ + ApplicationService.NS_USERS: [{"regex": "@.*", "exclusive": False}] + }, + msc4190_device_management=True, + ) + # This application service doesn't use the new MSC4190 behaviours + self.pre_msc_service = ApplicationService( + id="regular", + token="other_token", + hs_token="other_token", + sender="@as2:example.com", + namespaces={ + ApplicationService.NS_USERS: [{"regex": "@.*", "exclusive": False}] + }, + msc4190_device_management=False, + ) + self.hs.get_datastores().main.services_cache.append(self.msc4190_service) + self.hs.get_datastores().main.services_cache.append(self.pre_msc_service) + return self.hs + + def test_PUT_device(self) -> None: + self.register_appservice_user("alice", self.msc4190_service.token) + self.register_appservice_user("bob", self.pre_msc_service.token) + + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.json_body, {"devices": []}) + + channel = self.make_request( + "PUT", + "/_matrix/client/v3/devices/AABBCCDD?user_id=@alice:test", + content={"display_name": "Alice's device"}, + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 201, channel.json_body) + + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(len(channel.json_body["devices"]), 1) + self.assertEqual(channel.json_body["devices"][0]["device_id"], "AABBCCDD") + + # Doing a second time should return a 200 instead of a 201 + channel = self.make_request( + "PUT", + "/_matrix/client/v3/devices/AABBCCDD?user_id=@alice:test", + content={"display_name": "Alice's device"}, + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # On the regular service, that API should not allow for the + # creation of new devices. + channel = self.make_request( + "PUT", + "/_matrix/client/v3/devices/AABBCCDD?user_id=@bob:test", + content={"display_name": "Bob's device"}, + access_token=self.pre_msc_service.token, + ) + self.assertEqual(channel.code, 404, channel.json_body) + + def test_DELETE_device(self) -> None: + self.register_appservice_user("alice", self.msc4190_service.token) + + # There should be no device + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.json_body, {"devices": []}) + + # Create a device + channel = self.make_request( + "PUT", + "/_matrix/client/v3/devices/AABBCCDD?user_id=@alice:test", + content={}, + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 201, channel.json_body) + + # There should be one device + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(len(channel.json_body["devices"]), 1) + + # Delete the device. UIA should not be required. + channel = self.make_request( + "DELETE", + "/_matrix/client/v3/devices/AABBCCDD?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # There should be no device again + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.json_body, {"devices": []}) + + def test_POST_delete_devices(self) -> None: + self.register_appservice_user("alice", self.msc4190_service.token) + + # There should be no device + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.json_body, {"devices": []}) + + # Create a device + channel = self.make_request( + "PUT", + "/_matrix/client/v3/devices/AABBCCDD?user_id=@alice:test", + content={}, + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 201, channel.json_body) + + # There should be one device + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(len(channel.json_body["devices"]), 1) + + # Delete the device with delete_devices + # UIA should not be required. + channel = self.make_request( + "POST", + "/_matrix/client/v3/delete_devices?user_id=@alice:test", + content={"devices": ["AABBCCDD"]}, + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + + # There should be no device again + channel = self.make_request( + "GET", + "/_matrix/client/v3/devices?user_id=@alice:test", + access_token=self.msc4190_service.token, + ) + self.assertEqual(channel.code, 200, channel.json_body) + self.assertEqual(channel.json_body, {"devices": []}) diff --git a/tests/rest/client/test_register.py b/tests/rest/client/test_register.py index c091f403cc06..b697bf6f6754 100644 --- a/tests/rest/client/test_register.py +++ b/tests/rest/client/test_register.py @@ -120,6 +120,34 @@ def test_POST_appservice_registration_invalid(self) -> None: self.assertEqual(channel.code, 401, msg=channel.result) + def test_POST_appservice_msc4190_enabled(self) -> None: + # With MSC4190 enabled, the registration should *not* return an access token + user_id = "@as_user_kermit:test" + as_token = "i_am_an_app_service" + + appservice = ApplicationService( + as_token, + id="1234", + namespaces={"users": [{"regex": r"@as_user.*", "exclusive": True}]}, + sender="@as:test", + msc4190_device_management=True, + ) + + self.hs.get_datastores().main.services_cache.append(appservice) + request_data = { + "username": "as_user_kermit", + "type": APP_SERVICE_REGISTRATION_TYPE, + } + + channel = self.make_request( + b"POST", self.url + b"?access_token=i_am_an_app_service", request_data + ) + + self.assertEqual(channel.code, 200, msg=channel.result) + det_data = {"user_id": user_id, "home_server": self.hs.hostname} + self.assertLessEqual(det_data.items(), channel.json_body.items()) + self.assertNotIn("access_token", channel.json_body) + def test_POST_bad_password(self) -> None: request_data = {"username": "kermit", "password": 666} channel = self.make_request(b"POST", self.url, request_data) diff --git a/tests/unittest.py b/tests/unittest.py index 614e805abd50..6a32861a3e22 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -781,7 +781,7 @@ def register_appservice_user( self, username: str, appservice_token: str, - ) -> Tuple[str, str]: + ) -> Tuple[str, Optional[str]]: """Register an appservice user as an application service. Requires the client-facing registration API be registered. @@ -805,7 +805,7 @@ def register_appservice_user( access_token=appservice_token, ) self.assertEqual(channel.code, 200, channel.json_body) - return channel.json_body["user_id"], channel.json_body["device_id"] + return channel.json_body["user_id"], channel.json_body.get("device_id") def login( self, From 05d58b86ac7062def93dd43e3eb794029c10e9e5 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 4 Dec 2024 12:53:51 +0000 Subject: [PATCH 142/147] Pin softprops/action-gh-release to v0.1.15 (#17995) We are still seeing duplicate releases on v2.0.5, so roll back further. [Other](https://github.com/Poko-Apps/curl-openssl-android/commit/f8a5a60b7c4b196c703d322bb3d11e9495807426#diff-88ab30345d9874c4336fe50b54b083ba5bdd925be961c34060e6a192b56b0433R72) [repositories](https://github.com/Glistix/glistix/commit/55fca4fec74aa114faf553b563ae5883b5d76be0#diff-e426ed45842837026e10e66af23d9c7077e89eacbe6958ce7cb991130ad05adaR105) seem to have settled on this version. Addresses https://github.com/element-hq/synapse/issues/17991 We're just going to test this during 1.121.0rc1. --- .github/workflows/release-artifacts.yml | 2 +- changelog.d/17995.misc | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/17995.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 0c0e023c7531..deb5ec33e353 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -207,7 +207,7 @@ jobs: tar -cvJf debs.tar.xz debs - name: Attach to release # Pinned to work around https://github.com/softprops/action-gh-release/issues/445 - uses: softprops/action-gh-release@v2.0.5 + uses: softprops/action-gh-release@v0.1.15 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: diff --git a/changelog.d/17995.misc b/changelog.d/17995.misc new file mode 100644 index 000000000000..d8e608c1ae6a --- /dev/null +++ b/changelog.d/17995.misc @@ -0,0 +1 @@ +Pin `softprops/action-gh-release` to v0.1.15 to work around https://github.com/softprops/action-gh-release/issues/445. \ No newline at end of file From 45ca6392f42c1cf609186c93131e18d22b04dab7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 4 Dec 2024 12:58:26 +0000 Subject: [PATCH 143/147] Pin Rust to 1.82.0 when building Python wheels (#17993) Addresses step 1 of #17988. --- .github/workflows/release-artifacts.yml | 10 ++++++++-- changelog.d/17993.misc | 1 + pyproject.toml | 7 +++++-- 3 files changed, 14 insertions(+), 4 deletions(-) create mode 100644 changelog.d/17993.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index deb5ec33e353..5b5bfc1896b0 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -5,7 +5,7 @@ name: Build release artifacts on: # we build on PRs and develop to (hopefully) get early warning # of things breaking (but only build one set of debs). PRs skip - # building wheels on ARM. + # building wheels on macOS & ARM. pull_request: push: branches: ["develop", "release-*"] @@ -111,7 +111,7 @@ jobs: runs-on: ${{ matrix.os }} strategy: matrix: - os: [ubuntu-22.04] + os: [ubuntu-22.04, macos-13] arch: [x86_64, aarch64] # is_pr is a flag used to exclude certain jobs from the matrix on PRs. # It is not read by the rest of the workflow. @@ -119,6 +119,12 @@ jobs: - ${{ startsWith(github.ref, 'refs/pull/') }} exclude: + # Don't build macos wheels on PR CI. + - is_pr: true + os: "macos-13" + # Don't build aarch64 wheels on mac. + - os: "macos-13" + arch: aarch64 # Don't build aarch64 wheels on PR CI. - is_pr: true arch: aarch64 diff --git a/changelog.d/17993.misc b/changelog.d/17993.misc new file mode 100644 index 000000000000..149cb358c3c7 --- /dev/null +++ b/changelog.d/17993.misc @@ -0,0 +1 @@ +Fix building wheels for MacOS which was temporarily disabled in Synapse 1.120.2. \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 6fd6a9521976..60c62015b7e2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -386,8 +386,11 @@ build-backend = "poetry.core.masonry.api" # c.f. https://github.com/matrix-org/synapse/pull/14259 skip = "cp36* cp37* cp38* pp37* pp38* *-musllinux_i686 pp*aarch64 *-musllinux_aarch64" -# We need a rust compiler -before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain stable -y --profile minimal" +# We need a rust compiler. +# +# We temporarily pin Rust to 1.82.0 to work around +# https://github.com/element-hq/synapse/issues/17988 +before-all = "curl https://sh.rustup.rs -sSf | sh -s -- --default-toolchain 1.82.0 -y --profile minimal" environment= { PATH = "$PATH:$HOME/.cargo/bin" } # For some reason if we don't manually clean the build directory we From a00d0b3d0e72cd56733c30b1b52b5402c92f81cc Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 4 Dec 2024 14:49:28 +0000 Subject: [PATCH 144/147] 1.121.0rc1 --- CHANGES.md | 47 +++++++++++++++++++++++++++++++++++++++ changelog.d/17253.misc | 1 - changelog.d/17705.feature | 1 - changelog.d/17872.doc | 1 - changelog.d/17933.bugfix | 1 - changelog.d/17936.misc | 1 - changelog.d/17944.misc | 1 - changelog.d/17945.misc | 1 - changelog.d/17947.feature | 1 - changelog.d/17952.misc | 1 - changelog.d/17953.doc | 1 - changelog.d/17962.misc | 1 - changelog.d/17965.feature | 1 - changelog.d/17966.misc | 1 - changelog.d/17969.misc | 1 - changelog.d/17970.bugfix | 1 - changelog.d/17972.misc | 1 - changelog.d/17975.feature | 1 - changelog.d/17986.misc | 1 - changelog.d/17993.misc | 1 - changelog.d/17995.misc | 1 - debian/changelog | 6 +++++ pyproject.toml | 2 +- 23 files changed, 54 insertions(+), 21 deletions(-) delete mode 100644 changelog.d/17253.misc delete mode 100644 changelog.d/17705.feature delete mode 100644 changelog.d/17872.doc delete mode 100644 changelog.d/17933.bugfix delete mode 100644 changelog.d/17936.misc delete mode 100644 changelog.d/17944.misc delete mode 100644 changelog.d/17945.misc delete mode 100644 changelog.d/17947.feature delete mode 100644 changelog.d/17952.misc delete mode 100644 changelog.d/17953.doc delete mode 100644 changelog.d/17962.misc delete mode 100644 changelog.d/17965.feature delete mode 100644 changelog.d/17966.misc delete mode 100644 changelog.d/17969.misc delete mode 100644 changelog.d/17970.bugfix delete mode 100644 changelog.d/17972.misc delete mode 100644 changelog.d/17975.feature delete mode 100644 changelog.d/17986.misc delete mode 100644 changelog.d/17993.misc delete mode 100644 changelog.d/17995.misc diff --git a/CHANGES.md b/CHANGES.md index 535c41f6a66b..20177bf00aed 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,50 @@ +# Synapse 1.121.0rc1 (2024-12-04) + +This release candidate contains the security fixes from [v1.120.2](https://github.com/element-hq/synapse/releases/tag/v1.120.2). + +New changes listed below. + +### Features + +- Support for [MSC4190](https://github.com/matrix-org/matrix-spec-proposals/pull/4190): device management for Application Services. ([\#17705](https://github.com/element-hq/synapse/issues/17705)) +- Update [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync to include invite, ban, kick, targets when `$LAZY`-loading room members. ([\#17947](https://github.com/element-hq/synapse/issues/17947)) +- Use stable `M_USER_LOCKED` error code for locked accounts, as per [Matrix 1.12](https://spec.matrix.org/v1.12/client-server-api/#account-locking). ([\#17965](https://github.com/element-hq/synapse/issues/17965)) +- [MSC4076](https://github.com/matrix-org/matrix-spec-proposals/pull/4076): Add `disable_badge_count` to pusher configuration. ([\#17975](https://github.com/element-hq/synapse/issues/17975)) + +### Bugfixes + +- Fix long-standing bug where read receipts could get overly delayed being sent over federation. ([\#17933](https://github.com/element-hq/synapse/issues/17933)) + +### Improved Documentation + +- Add OIDC example configuration for Forgejo (fork of Gitea). ([\#17872](https://github.com/element-hq/synapse/issues/17872)) +- Link to element-docker-demo from contrib/docker*. ([\#17953](https://github.com/element-hq/synapse/issues/17953)) + +### Internal Changes + +- [MSC4108](https://github.com/matrix-org/matrix-spec-proposals/pull/4108): Add a `Content-Type` header on the `PUT` response to work around a faulty behavior in some caching reverse proxies. ([\#17253](https://github.com/element-hq/synapse/issues/17253)) +- Fix incorrect comment in new schema delta. ([\#17936](https://github.com/element-hq/synapse/issues/17936)) +- Raise setuptools_rust version cap to 1.10.2. ([\#17944](https://github.com/element-hq/synapse/issues/17944)) +- Enable encrypted appservice related experimental features in the complement docker image. ([\#17945](https://github.com/element-hq/synapse/issues/17945)) +- Return whether the user is suspended when querying the user account in the Admin API. ([\#17952](https://github.com/element-hq/synapse/issues/17952)) +- Fix new scheduled tasks jumping the queue. ([\#17962](https://github.com/element-hq/synapse/issues/17962)) +- Bump pyo3 and dependencies to v0.23.2. ([\#17966](https://github.com/element-hq/synapse/issues/17966)) +- Update setuptools-rust and fix building abi3 wheels in latest version. ([\#17969](https://github.com/element-hq/synapse/issues/17969)) +- Consolidate SSO redirects through `/_matrix/client/v3/login/sso/redirect(/{idpId})`. ([\#17972](https://github.com/element-hq/synapse/issues/17972)) +- Fix Docker and Complement config to be able to use `public_baseurl`. ([\#17986](https://github.com/element-hq/synapse/issues/17986)) +- Fix building wheels for MacOS which was temporarily disabled in Synapse 1.120.2. ([\#17993](https://github.com/element-hq/synapse/issues/17993)) +- Fix release process to not create duplicate releases. ([\#17970](https://github.com/element-hq/synapse/issues/17970), [\#17995](https://github.com/element-hq/synapse/issues/17995)) + + +### Updates to locked dependencies + +* Bump bytes from 1.8.0 to 1.9.0. ([\#17982](https://github.com/element-hq/synapse/issues/17982)) +* Bump pysaml2 from 7.3.1 to 7.5.0. ([\#17978](https://github.com/element-hq/synapse/issues/17978)) +* Bump serde_json from 1.0.132 to 1.0.133. ([\#17939](https://github.com/element-hq/synapse/issues/17939)) +* Bump tomli from 2.0.2 to 2.1.0. ([\#17959](https://github.com/element-hq/synapse/issues/17959)) +* Bump tomli from 2.1.0 to 2.2.1. ([\#17979](https://github.com/element-hq/synapse/issues/17979)) +* Bump tornado from 6.4.1 to 6.4.2. ([\#17955](https://github.com/element-hq/synapse/issues/17955)) + # Synapse 1.120.2 (2024-12-03) This version has building of wheels for macOS disabled. diff --git a/changelog.d/17253.misc b/changelog.d/17253.misc deleted file mode 100644 index 868691624d01..000000000000 --- a/changelog.d/17253.misc +++ /dev/null @@ -1 +0,0 @@ -[MSC4108](https://github.com/matrix-org/matrix-spec-proposals/pull/4108): Add a `Content-Type` header on the `PUT` response to work around a faulty behavior in some caching reverse proxies. diff --git a/changelog.d/17705.feature b/changelog.d/17705.feature deleted file mode 100644 index e2cd7bca4f41..000000000000 --- a/changelog.d/17705.feature +++ /dev/null @@ -1 +0,0 @@ -Support for [MSC4190](https://github.com/matrix-org/matrix-spec-proposals/pull/4190): device management for Application Services. diff --git a/changelog.d/17872.doc b/changelog.d/17872.doc deleted file mode 100644 index 7f8b2d349535..000000000000 --- a/changelog.d/17872.doc +++ /dev/null @@ -1 +0,0 @@ -Add OIDC example configuration for Forgejo (fork of Gitea). diff --git a/changelog.d/17933.bugfix b/changelog.d/17933.bugfix deleted file mode 100644 index 8d30ac587eba..000000000000 --- a/changelog.d/17933.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix long-standing bug where read receipts could get overly delayed being sent over federation. diff --git a/changelog.d/17936.misc b/changelog.d/17936.misc deleted file mode 100644 index 91d976fbd9ca..000000000000 --- a/changelog.d/17936.misc +++ /dev/null @@ -1 +0,0 @@ -Fix incorrect comment in new schema delta. diff --git a/changelog.d/17944.misc b/changelog.d/17944.misc deleted file mode 100644 index a8a645103f49..000000000000 --- a/changelog.d/17944.misc +++ /dev/null @@ -1 +0,0 @@ -Raise setuptools_rust version cap to 1.10.2. \ No newline at end of file diff --git a/changelog.d/17945.misc b/changelog.d/17945.misc deleted file mode 100644 index eeebb921699c..000000000000 --- a/changelog.d/17945.misc +++ /dev/null @@ -1 +0,0 @@ -Enable encrypted appservice related experimental features in the complement docker image. diff --git a/changelog.d/17947.feature b/changelog.d/17947.feature deleted file mode 100644 index 2d1b99cec2db..000000000000 --- a/changelog.d/17947.feature +++ /dev/null @@ -1 +0,0 @@ -Update [MSC4186](https://github.com/matrix-org/matrix-spec-proposals/pull/4186) Sliding Sync to include invite, ban, kick, targets when `$LAZY`-loading room members. diff --git a/changelog.d/17952.misc b/changelog.d/17952.misc deleted file mode 100644 index 84fc8bfc2906..000000000000 --- a/changelog.d/17952.misc +++ /dev/null @@ -1 +0,0 @@ -Return whether the user is suspended when querying the user account in the Admin API. \ No newline at end of file diff --git a/changelog.d/17953.doc b/changelog.d/17953.doc deleted file mode 100644 index 10f5a27ba9fd..000000000000 --- a/changelog.d/17953.doc +++ /dev/null @@ -1 +0,0 @@ -Link to element-docker-demo from contrib/docker*. diff --git a/changelog.d/17962.misc b/changelog.d/17962.misc deleted file mode 100644 index adf634870799..000000000000 --- a/changelog.d/17962.misc +++ /dev/null @@ -1 +0,0 @@ -Fix new scheduled tasks jumping the queue. diff --git a/changelog.d/17965.feature b/changelog.d/17965.feature deleted file mode 100644 index e447a58986e0..000000000000 --- a/changelog.d/17965.feature +++ /dev/null @@ -1 +0,0 @@ -Use stable `M_USER_LOCKED` error code for locked accounts, as per [Matrix 1.12](https://spec.matrix.org/v1.12/client-server-api/#account-locking). \ No newline at end of file diff --git a/changelog.d/17966.misc b/changelog.d/17966.misc deleted file mode 100644 index c6d6e55fbf92..000000000000 --- a/changelog.d/17966.misc +++ /dev/null @@ -1 +0,0 @@ -Bump pyo3 and dependencies to v0.23.2. \ No newline at end of file diff --git a/changelog.d/17969.misc b/changelog.d/17969.misc deleted file mode 100644 index 05506daaa06f..000000000000 --- a/changelog.d/17969.misc +++ /dev/null @@ -1 +0,0 @@ -Update setuptools-rust and fix building abi3 wheels in latest version. diff --git a/changelog.d/17970.bugfix b/changelog.d/17970.bugfix deleted file mode 100644 index 835079de3f3b..000000000000 --- a/changelog.d/17970.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix release process to not create duplicate releases. diff --git a/changelog.d/17972.misc b/changelog.d/17972.misc deleted file mode 100644 index e7f009d20d4a..000000000000 --- a/changelog.d/17972.misc +++ /dev/null @@ -1 +0,0 @@ -Consolidate SSO redirects through `/_matrix/client/v3/login/sso/redirect(/{idpId})`. diff --git a/changelog.d/17975.feature b/changelog.d/17975.feature deleted file mode 100644 index 48f41bddad03..000000000000 --- a/changelog.d/17975.feature +++ /dev/null @@ -1 +0,0 @@ -[MSC4076](https://github.com/matrix-org/matrix-spec-proposals/pull/4076): Add `disable_badge_count` to pusher configuration. diff --git a/changelog.d/17986.misc b/changelog.d/17986.misc deleted file mode 100644 index c062f3ecdf98..000000000000 --- a/changelog.d/17986.misc +++ /dev/null @@ -1 +0,0 @@ -Fix Docker and Complement config to be able to use `public_baseurl`. diff --git a/changelog.d/17993.misc b/changelog.d/17993.misc deleted file mode 100644 index 149cb358c3c7..000000000000 --- a/changelog.d/17993.misc +++ /dev/null @@ -1 +0,0 @@ -Fix building wheels for MacOS which was temporarily disabled in Synapse 1.120.2. \ No newline at end of file diff --git a/changelog.d/17995.misc b/changelog.d/17995.misc deleted file mode 100644 index d8e608c1ae6a..000000000000 --- a/changelog.d/17995.misc +++ /dev/null @@ -1 +0,0 @@ -Pin `softprops/action-gh-release` to v0.1.15 to work around https://github.com/softprops/action-gh-release/issues/445. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 3aa74d1d24e7..805c036c82bf 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.121.0~rc1) stable; urgency=medium + + * New Synapse release 1.121.0rc1. + + -- Synapse Packaging team Wed, 04 Dec 2024 14:47:23 +0000 + matrix-synapse-py3 (1.120.2) stable; urgency=medium * New synapse release 1.120.2. diff --git a/pyproject.toml b/pyproject.toml index 60c62015b7e2..e5051770f5b1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.120.2" +version = "1.121.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 5b0873516c5ecf86e6cc4dff95f087410599b1a1 Mon Sep 17 00:00:00 2001 From: Till <2353100+S7evinK@users.noreply.github.com> Date: Wed, 11 Dec 2024 12:40:36 +0100 Subject: [PATCH 145/147] Attempt to fix duplicate releases issue (#18025) This hopefully fixes https://github.com/element-hq/synapse/issues/17991, as we first upgraded to v2 and are now back to 0.1.15. (This was lost in https://github.com/element-hq/synapse/pull/17923, related https://github.com/element-hq/synapse/pull/17995) --- .github/workflows/release-artifacts.yml | 4 ++++ changelog.d/18025.misc | 1 + 2 files changed, 5 insertions(+) create mode 100644 changelog.d/18025.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 5b5bfc1896b0..10583bc0600f 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -221,3 +221,7 @@ jobs: Sdist/* Wheel*/* debs.tar.xz + # if it's not already published, keep the release as a draft. + draft: true + # mark it as a prerelease if the tag contains 'rc'. + prerelease: ${{ contains(github.ref, 'rc') }} diff --git a/changelog.d/18025.misc b/changelog.d/18025.misc new file mode 100644 index 000000000000..835079de3f3b --- /dev/null +++ b/changelog.d/18025.misc @@ -0,0 +1 @@ +Fix release process to not create duplicate releases. From ed6edc17d0bef167b0833d962e7abaf2449a21ef Mon Sep 17 00:00:00 2001 From: Till Faelligen <2353100+S7evinK@users.noreply.github.com> Date: Wed, 11 Dec 2024 13:12:50 +0100 Subject: [PATCH 146/147] 1.121.0 --- CHANGES.md | 9 +++++++++ changelog.d/18025.misc | 1 - debian/changelog | 6 ++++++ pyproject.toml | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/18025.misc diff --git a/CHANGES.md b/CHANGES.md index 20177bf00aed..6096463cc6ea 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +# Synapse 1.121.0 (2024-12-11) + +### Internal Changes + +- Fix release process to not create duplicate releases. ([\#18025](https://github.com/element-hq/synapse/issues/18025)) + + + + # Synapse 1.121.0rc1 (2024-12-04) This release candidate contains the security fixes from [v1.120.2](https://github.com/element-hq/synapse/releases/tag/v1.120.2). diff --git a/changelog.d/18025.misc b/changelog.d/18025.misc deleted file mode 100644 index 835079de3f3b..000000000000 --- a/changelog.d/18025.misc +++ /dev/null @@ -1 +0,0 @@ -Fix release process to not create duplicate releases. diff --git a/debian/changelog b/debian/changelog index 805c036c82bf..8149cd74864b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.121.0) stable; urgency=medium + + * New Synapse release 1.121.0. + + -- Synapse Packaging team Wed, 11 Dec 2024 13:12:30 +0100 + matrix-synapse-py3 (1.121.0~rc1) stable; urgency=medium * New Synapse release 1.121.0rc1. diff --git a/pyproject.toml b/pyproject.toml index e5051770f5b1..a8a86b4d5c47 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -97,7 +97,7 @@ module-name = "synapse.synapse_rust" [tool.poetry] name = "matrix-synapse" -version = "1.121.0rc1" +version = "1.121.0" description = "Homeserver for the Matrix decentralised comms protocol" authors = ["Matrix.org Team and Contributors "] license = "AGPL-3.0-or-later" From 737f6c73f7ccb611c271fd568a6af0a7c705619d Mon Sep 17 00:00:00 2001 From: Till Faelligen <2353100+S7evinK@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:20:39 +0100 Subject: [PATCH 147/147] Update changelog --- CHANGES.md | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/CHANGES.md b/CHANGES.md index 6096463cc6ea..2ef27f6ade2a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,18 +1,15 @@ # Synapse 1.121.0 (2024-12-11) +This release contains the security fixes from [v1.120.2](https://github.com/element-hq/synapse/releases/tag/v1.120.2). + ### Internal Changes - Fix release process to not create duplicate releases. ([\#18025](https://github.com/element-hq/synapse/issues/18025)) - # Synapse 1.121.0rc1 (2024-12-04) -This release candidate contains the security fixes from [v1.120.2](https://github.com/element-hq/synapse/releases/tag/v1.120.2). - -New changes listed below. - ### Features - Support for [MSC4190](https://github.com/matrix-org/matrix-spec-proposals/pull/4190): device management for Application Services. ([\#17705](https://github.com/element-hq/synapse/issues/17705))