Compare commits

...

14 Commits

Author SHA1 Message Date
Tulir Asokan
d017cecb14 Remove unnecessary pusher URL validation 2023-02-21 23:24:08 +02:00
Tulir Asokan
c958c0f163 Allow specific users to use timestamp massaging without being appservices 2023-02-21 23:24:08 +02:00
Tulir Asokan
a2832fd7b5 Allow custom content in read receipts 2023-02-21 23:24:08 +02:00
Tulir Asokan
cbb9830671 Add support for batch sending new events 2023-02-21 23:24:08 +02:00
Tulir Asokan
a4e0d14fc2 Allow appservices to batch send as any local user 2023-02-21 23:24:08 +02:00
Tulir Asokan
843ce460ef Allow unhiding events that the C-S API filters away by default 2023-02-21 23:24:08 +02:00
Tulir Asokan
594a17358b Allow bypassing unnecessary validation in C-S API 2023-02-21 23:24:07 +02:00
Tulir Asokan
105ffa5f2e Set immutable cache-control header for media downloads 2023-02-21 23:23:22 +02:00
Tulir Asokan
93e338a8ef Thumbnail webp images as webp to avoid losing transparency 2023-02-21 23:23:22 +02:00
Tulir Asokan
b9c2050abb Allow registering invalid user IDs with admin API 2023-02-21 23:23:22 +02:00
Tulir Asokan
1816a8cc59 Allow specifying room ID when creating room 2023-02-21 23:23:22 +02:00
Tulir Asokan
e03bdba265 Fix default power level for room creator 2023-02-21 23:23:22 +02:00
Tulir Asokan
fc1f0d9f3e Add meow readme and config extension 2023-02-21 23:23:22 +02:00
Tulir Asokan
25d218e479 Add meow dockerfile
N.B. requires requirements.txt to be generated in repo root beforehand
2023-02-21 23:23:22 +02:00
32 changed files with 427 additions and 102 deletions

View File

@ -8,6 +8,7 @@
!README.rst
!pyproject.toml
!poetry.lock
!requirements.txt
!Cargo.lock
!Cargo.toml
!build_rust.py

19
.gitlab-ci.yml Normal file
View File

@ -0,0 +1,19 @@
image: docker:stable
stages:
- build
build amd64:
stage: build
tags:
- amd64
only:
- master
before_script:
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
script:
- synversion=$(cat pyproject.toml | grep '^version =' | sed -E 's/^version = "(.+)"$/\1/')
- docker build --tag $CI_REGISTRY_IMAGE:latest --tag $CI_REGISTRY_IMAGE:$synversion .
- docker push $CI_REGISTRY_IMAGE:latest
- docker push $CI_REGISTRY_IMAGE:$synversion
- docker rmi $CI_REGISTRY_IMAGE:latest $CI_REGISTRY_IMAGE:$synversion

61
Dockerfile Normal file
View File

@ -0,0 +1,61 @@
ARG PYTHON_VERSION=3.11
FROM docker.io/python:${PYTHON_VERSION}-slim as builder
RUN apt-get update && apt-get install -y \
build-essential \
libffi-dev \
libjpeg-dev \
libpq-dev \
libssl-dev \
libwebp-dev \
libxml++2.6-dev \
libxslt1-dev \
zlib1g-dev \
openssl \
git \
curl \
&& rm -rf /var/lib/apt/lists/*
ENV RUSTUP_HOME=/rust
ENV CARGO_HOME=/cargo
ENV PATH=/cargo/bin:/rust/bin:$PATH
RUN mkdir /rust /cargo
RUN curl -sSf https://sh.rustup.rs | sh -s -- -y --no-modify-path --default-toolchain stable
COPY synapse /synapse/synapse/
COPY rust /synapse/rust/
COPY README.rst pyproject.toml requirements.txt build_rust.py /synapse/
RUN pip install --prefix="/install" --no-warn-script-location --ignore-installed \
--no-deps -r /synapse/requirements.txt \
&& pip install --prefix="/install" --no-warn-script-location \
--no-deps \
'git+https://github.com/maunium/synapse-simple-antispam#egg=synapse-simple-antispam' \
'git+https://github.com/devture/matrix-synapse-shared-secret-auth@2.0.2#egg=shared_secret_authenticator' \
&& pip install --prefix="/install" --no-warn-script-location \
--no-deps /synapse
FROM docker.io/python:${PYTHON_VERSION}-slim
RUN apt-get update && apt-get install -y \
curl \
libjpeg62-turbo \
libpq5 \
libwebp6 \
xmlsec1 \
libjemalloc2 \
openssl \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /install /usr/local
VOLUME ["/data"]
ENV LD_PRELOAD="/usr/lib/x86_64-linux-gnu/libjemalloc.so.2"
ENTRYPOINT ["python3", "-m", "synapse.app.homeserver"]
CMD ["--keys-directory", "/data", "-c", "/data/homeserver.yaml"]
HEALTHCHECK --start-period=5s --interval=1m --timeout=5s \
CMD curl -fSs http://localhost:8008/health || exit 1

69
README.md Normal file
View File

@ -0,0 +1,69 @@
# Maunium Synapse
This is a fork of [Synapse] to remove dumb limits and fix bugs that the
upstream devs don't want to fix.
The only official distribution is the docker image in the [GitLab container
registry], but you can also install from source ([upstream instructions]).
The master branch and `:latest` docker tag are upgraded to each upstream
release candidate very soon after release (usually within 10 minutes†). There
are also docker tags for each release, e.g. `:1.75.0`. If you don't want RCs,
use the specific release tags.
†If there are merge conflicts, the update may be delayed for up to a few days
after the full release.
[Synapse]: https://github.com/matrix-org/synapse
[GitLab container registry]: https://mau.dev/maunium/synapse/container_registry
[upstream instructions]: https://github.com/matrix-org/synapse/blob/develop/INSTALL.md#installing-from-source
## List of changes
* Default power level for room creator is 9001 instead of 100.
* Room creator can specify a custom room ID with the `room_id` param in the
request body. If the room ID is already in use, it will return `M_CONFLICT`.
* ~~URL previewer user agent includes `Bot` so Twitter previews work properly.~~
Upstreamed after over 2 years 🎉
* ~~Local event creation concurrency is disabled to avoid unnecessary state
resolution.~~ Upstreamed after over 3 years 🎉
* Register admin API can register invalid user IDs.
* Docker image with jemalloc enabled by default.
* Config option to allow specific users to send events without unnecessary
validation.
* Config option to allow specific users to receive events that are usually
filtered away (e.g. `org.matrix.dummy_event` and `m.room.aliases`).
* Config option to allow specific users to use timestamp massaging without
being appservice users.
* Config option to allow appservices to use MSC2716 batch sending as any local user.
* Removed bad pusher URL validation.
* webp images are thumbnailed to webp instead of jpeg to avoid losing
transparency.
* Media repo `Cache-Control` header says `immutable` and 1 year for all media
that exists, as media IDs in Matrix are immutable.
* Allowed sending custom data with read receipts.
You can view the full list of changes on the [meow-patchset] branch.
Additionally, historical patch sets are saved as `meow-patchset-vX` [tags].
[meow-patchset]: https://mau.dev/maunium/synapse/-/compare/patchset-base...meow-patchset
[tags]: https://mau.dev/maunium/synapse/-/tags?search=meow-patchset&sort=updated_desc
## Configuration
Generating a new config will include the `meow` section, but this is here for
reference for existing configs.
```yaml
meow:
# List of users who aren't subject to unnecessary validation in the C-S API.
validation_override:
- "@you:example.com"
# List of users who will get org.matrix.dummy_event and m.room.aliases events down /sync
filter_override:
- "@you:example.com"
# Whether or not the admin API should be able to register invalid user IDs.
admin_api_register_invalid: true
# List of users who can use timestamp massaging without being appservices
timestamp_override:
- "@you:example.com"
# Whether appservices should be allowed to use MSC2716 batch sending as any local user.
appservice_batch_send_any: false
```

View File

@ -254,7 +254,7 @@ class Auth:
raise MissingClientTokenError()
async def validate_appservice_can_control_user_id(
self, app_service: ApplicationService, user_id: str
self, app_service: ApplicationService, user_id: str, allow_any: bool = False
) -> None:
"""Validates that the app service is allowed to control
the given user.
@ -262,6 +262,7 @@ class Auth:
Args:
app_service: The app service that controls the user
user_id: The author MXID that the app service is controlling
allow_any: Allow the appservice to control any local user
Raises:
AuthError: If the application service is not allowed to control the user
@ -273,7 +274,7 @@ class Auth:
if app_service.sender == user_id:
pass
# Check to make sure the app service is allowed to control the user
elif not app_service.is_interested_in_user(user_id):
elif not app_service.is_interested_in_user(user_id) and not allow_any:
raise AuthError(
403,
"Application service cannot masquerade as this user (%s)." % user_id,

View File

@ -35,6 +35,7 @@ from synapse.config import ( # noqa: F401
jwt,
key,
logger,
meow,
metrics,
modules,
oembed,
@ -90,6 +91,7 @@ class RootConfig:
voip: voip.VoipConfig
registration: registration.RegistrationConfig
account_validity: account_validity.AccountValidityConfig
meow: meow.MeowConfig
metrics: metrics.MetricsConfig
api: api.ApiConfig
appservice: appservice.AppServiceConfig

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import RootConfig
from .meow import MeowConfig
from .account_validity import AccountValidityConfig
from .api import ApiConfig
from .appservice import AppServiceConfig
@ -58,6 +59,7 @@ from .workers import WorkerConfig
class HomeServerConfig(RootConfig):
config_classes = [
MeowConfig,
ModulesConfig,
ServerConfig,
RetentionConfig,

56
synapse/config/meow.py Normal file
View File

@ -0,0 +1,56 @@
# -*- coding: utf-8 -*-
# Copyright 2020 Maunium
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import Config
class MeowConfig(Config):
"""Meow Configuration
Configuration for disabling dumb limits in Synapse
"""
section = "meow"
def read_config(self, config, **kwargs):
meow_config = config.get("meow", {})
self.validation_override = set(meow_config.get("validation_override", []))
self.filter_override = set(meow_config.get("filter_override", []))
self.timestamp_override = set(meow_config.get("timestamp_override", []))
self.admin_api_register_invalid = meow_config.get(
"admin_api_register_invalid", True
)
self.appservice_batch_send_any = meow_config.get(
"appservice_batch_send_any", False
)
def generate_config_section(self, config_dir_path, server_name, **kwargs):
return """
# Configuration for disabling dumb limits in Synapse
#
#meow:
# # List of users who aren't subject to unnecessary validation in the C-S API.
# validation_override:
# - "@you:example.com"
# # List of users who will get org.matrix.dummy_event and m.room.aliases events down /sync
# filter_override:
# - "@you:example.com"
# # List of users who can use timestamp massaging without being appservices
# timestamp_override:
# - "@you:example.com"
# # Whether or not the admin API should be able to register invalid user IDs.
# admin_api_register_invalid: true
# # Whether appservices should be allowed to use MSC2716 batch sending as any local user.
# appservice_batch_send_any: false
"""

View File

@ -47,10 +47,8 @@ THUMBNAIL_SIZE_YAML = """\
THUMBNAIL_SUPPORTED_MEDIA_FORMAT_MAP = {
"image/jpeg": "jpeg",
"image/jpg": "jpeg",
"image/webp": "jpeg",
# Thumbnails can only be jpeg or png. We choose png thumbnails for gif
# because it can have transparency.
"image/gif": "png",
"image/webp": "webp",
"image/gif": "webp",
"image/png": "png",
}
@ -102,6 +100,10 @@ def parse_thumbnail_requirements(
requirement.append(
ThumbnailRequirement(width, height, method, "image/png")
)
elif thumbnail_format == "webp":
requirement.append(
ThumbnailRequirement(width, height, method, "image/webp")
)
else:
raise Exception(
"Unknown thumbnail mapping from %s to %s. This is a Synapse problem, please report!"

View File

@ -43,7 +43,7 @@ class EventValidator:
event: The event to validate.
config: The homeserver's configuration.
"""
self.validate_builder(event)
self.validate_builder(event, config)
if event.format_version == EventFormatVersions.ROOM_V1_V2:
EventID.from_string(event.event_id)
@ -74,6 +74,12 @@ class EventValidator:
# Note that only the client controlled portion of the event is
# checked, since we trust the portions of the event we created.
validate_canonicaljson(event.content)
if not 0 < event.origin_server_ts < 2**53:
raise SynapseError(400, "Event timestamp is out of range")
# meow: allow specific users to send potentially dangerous events.
if event.sender in config.meow.validation_override:
return
if event.type == EventTypes.Aliases:
if "aliases" in event.content:
@ -165,7 +171,9 @@ class EventValidator:
errcode=Codes.BAD_JSON,
)
def validate_builder(self, event: Union[EventBase, EventBuilder]) -> None:
def validate_builder(
self, event: Union[EventBase, EventBuilder], config: HomeServerConfig
) -> None:
"""Validates that the builder/event has roughly the right format. Only
checks values that we expect a proto event to have, rather than all the
fields an event would have
@ -183,6 +191,10 @@ class EventValidator:
RoomID.from_string(event.room_id)
UserID.from_string(event.sender)
# meow: allow specific users to send so-called invalid events
if event.sender in config.meow.validation_override:
return
if event.type == EventTypes.Message:
strings = ["body", "msgtype"]

View File

@ -71,9 +71,11 @@ class DirectoryHandler:
) -> None:
# general association creation for both human users and app services
for wchar in string.whitespace:
if wchar in room_alias.localpart:
raise SynapseError(400, "Invalid characters in room alias")
# meow: allow specific users to include anything in room aliases
if creator not in self.config.meow.validation_override:
for wchar in string.whitespace:
if wchar in room_alias.localpart:
raise SynapseError(400, "Invalid characters in room alias")
if ":" in room_alias.localpart:
raise SynapseError(400, "Invalid character in room alias localpart: ':'.")
@ -118,7 +120,10 @@ class DirectoryHandler:
user_id = requester.user.to_string()
room_alias_str = room_alias.to_string()
if len(room_alias_str) > MAX_ALIAS_LENGTH:
if (
user_id not in self.hs.config.meow.validation_override
and len(room_alias_str) > MAX_ALIAS_LENGTH
):
raise SynapseError(
400,
"Can't create aliases longer than %s characters" % MAX_ALIAS_LENGTH,

View File

@ -1417,7 +1417,7 @@ class FederationHandler:
room_version_obj, event_dict
)
EventValidator().validate_builder(builder)
EventValidator().validate_builder(builder, self.hs.config)
# Try several times, it could fail with PartialStateConflictError
# in send_membership_event, cf comment in except block.
@ -1586,7 +1586,7 @@ class FederationHandler:
builder = self.event_builder_factory.for_room_version(
room_version_obj, event_dict
)
EventValidator().validate_builder(builder)
EventValidator().validate_builder(builder, self.hs.config)
(
event,

View File

@ -669,7 +669,7 @@ class EventCreationHandler:
room_version_obj, event_dict
)
self.validator.validate_builder(builder)
self.validator.validate_builder(builder, self.config)
if builder.type == EventTypes.Member:
membership = builder.content.get("membership", None)
@ -1303,6 +1303,8 @@ class EventCreationHandler:
Raises:
SynapseError if the event is invalid.
"""
if event.sender in self.config.meow.validation_override:
return
relation = relation_from_event(event)
if not relation:
@ -1358,6 +1360,7 @@ class EventCreationHandler:
ratelimit: bool = True,
extra_users: Optional[List[UserID]] = None,
ignore_shadow_ban: bool = False,
dont_notify: bool = False,
) -> EventBase:
"""Processes new events. Please note that if batch persisting events, an error in
handling any one of these events will result in all of the events being dropped.
@ -1377,6 +1380,8 @@ class EventCreationHandler:
ignore_shadow_ban: True if shadow-banned users should be allowed to
send this event.
dont_notify
Return:
If the event was deduplicated, the previous, duplicate, event. Otherwise,
`event`.
@ -1454,6 +1459,7 @@ class EventCreationHandler:
events_and_context=events_and_context,
ratelimit=ratelimit,
extra_users=extra_users,
dont_notify=dont_notify,
),
run_in_background(
self.cache_joined_hosts_for_events, events_and_context
@ -1471,6 +1477,7 @@ class EventCreationHandler:
events_and_context: List[Tuple[EventBase, EventContext]],
ratelimit: bool = True,
extra_users: Optional[List[UserID]] = None,
dont_notify: bool = False,
) -> EventBase:
"""Actually persists new events. Should only be called by
`handle_new_client_event`, and see its docstring for documentation of
@ -1500,6 +1507,7 @@ class EventCreationHandler:
requester=requester,
ratelimit=ratelimit,
extra_users=extra_users,
dont_notify=dont_notify,
)
except SynapseError as e:
if e.code == HTTPStatus.CONFLICT:
@ -1529,6 +1537,7 @@ class EventCreationHandler:
events_and_context,
ratelimit=ratelimit,
extra_users=extra_users,
dont_notify=dont_notify,
)
return event
@ -1650,6 +1659,7 @@ class EventCreationHandler:
events_and_context: List[Tuple[EventBase, EventContext]],
ratelimit: bool = True,
extra_users: Optional[List[UserID]] = None,
dont_notify: bool = False,
) -> EventBase:
"""Called when we have fully built the events, have already
calculated the push actions for the events, and checked auth.
@ -1716,7 +1726,8 @@ class EventCreationHandler:
await self._maybe_kick_guest_users(event, context)
if event.type == EventTypes.CanonicalAlias:
validation_override = event.sender in self.config.meow.validation_override
if event.type == EventTypes.CanonicalAlias and not validation_override:
# Validate a newly added alias or newly added alt_aliases.
original_alias = None
@ -1951,7 +1962,7 @@ class EventCreationHandler:
pos = PersistedEventPosition(self._instance_name, stream_ordering)
events_and_pos.append((event, pos))
if event.type == EventTypes.Message:
if not dont_notify and event.type == EventTypes.Message:
# We don't want to block sending messages on any presence code. This
# matters as sometimes presence code can take a while.
run_as_background_process(
@ -1966,7 +1977,10 @@ class EventCreationHandler:
except Exception:
logger.exception("Error notifying about new room events")
run_in_background(_notify)
if not dont_notify:
# Skip notifying clients, this is used for Beeper's custom
# batch sending of non-historical messages.
run_in_background(_notify)
return persisted_events[-1]
@ -2107,7 +2121,7 @@ class EventCreationHandler:
builder = self.event_builder_factory.for_room_version(
original_event.room_version, third_party_result
)
self.validator.validate_builder(builder)
self.validator.validate_builder(builder, self.config)
except SynapseError as e:
raise Exception(
"Third party rules module created an invalid event: " + e.msg,

View File

@ -13,9 +13,10 @@
# limitations under the License.
import logging
from typing import TYPE_CHECKING
from typing import TYPE_CHECKING, Optional
from synapse.util.async_helpers import Linearizer
from synapse.types import JsonDict
if TYPE_CHECKING:
from synapse.server import HomeServer
@ -31,7 +32,11 @@ class ReadMarkerHandler:
self.read_marker_linearizer = Linearizer(name="read_marker")
async def received_client_read_marker(
self, room_id: str, user_id: str, event_id: str
self,
room_id: str,
user_id: str,
event_id: str,
extra_content: Optional[JsonDict] = None,
) -> None:
"""Updates the read marker for a given user in a given room if the event ID given
is ahead in the stream relative to the current read marker.
@ -54,7 +59,7 @@ class ReadMarkerHandler:
)
if should_update:
content = {"event_id": event_id}
content = {"event_id": event_id, **(extra_content or {})}
await self.account_data_handler.add_account_data_to_room(
user_id, room_id, "m.fully_read", content
)

View File

@ -161,6 +161,7 @@ class ReceiptsHandler:
user_id: str,
event_id: str,
thread_id: Optional[str],
extra_content: Optional[JsonDict] = None,
) -> None:
"""Called when a client tells us a local user has read up to the given
event_id in the room.
@ -171,7 +172,7 @@ class ReceiptsHandler:
user_id=user_id,
event_ids=[event_id],
thread_id=thread_id,
data={"ts": int(self.clock.time_msec())},
data={"ts": int(self.clock.time_msec()), **(extra_content or {})},
)
is_new = await self._handle_new_receipts([receipt])

View File

@ -141,22 +141,25 @@ class RegistrationHandler:
localpart: str,
guest_access_token: Optional[str] = None,
assigned_user_id: Optional[str] = None,
allow_invalid: bool = False,
inhibit_user_in_use_error: bool = False,
) -> None:
if types.contains_invalid_mxid_characters(localpart):
raise SynapseError(
400,
"User ID can only contain characters a-z, 0-9, or '=_-./'",
Codes.INVALID_USERNAME,
)
# meow: allow admins to register invalid user ids
if not allow_invalid:
if types.contains_invalid_mxid_characters(localpart):
raise SynapseError(
400,
"User ID can only contain characters a-z, 0-9, or '=_-./'",
Codes.INVALID_USERNAME,
)
if not localpart:
raise SynapseError(400, "User ID cannot be empty", Codes.INVALID_USERNAME)
if not localpart:
raise SynapseError(400, "User ID cannot be empty", Codes.INVALID_USERNAME)
if localpart[0] == "_":
raise SynapseError(
400, "User ID may not begin with _", Codes.INVALID_USERNAME
)
if localpart[0] == "_":
raise SynapseError(
400, "User ID may not begin with _", Codes.INVALID_USERNAME
)
user = UserID(localpart, self.hs.hostname)
user_id = user.to_string()
@ -170,14 +173,16 @@ class RegistrationHandler:
"A different user ID has already been registered for this session",
)
self.check_user_id_not_appservice_exclusive(user_id)
# meow: allow admins to register reserved user ids and long user ids
if not allow_invalid:
self.check_user_id_not_appservice_exclusive(user_id)
if len(user_id) > MAX_USERID_LENGTH:
raise SynapseError(
400,
"User ID may not be longer than %s characters" % (MAX_USERID_LENGTH,),
Codes.INVALID_USERNAME,
)
if len(user_id) > MAX_USERID_LENGTH:
raise SynapseError(
400,
"User ID may not be longer than %s characters" % (MAX_USERID_LENGTH,),
Codes.INVALID_USERNAME,
)
users = await self.store.get_users_by_id_case_insensitive(user_id)
if users:
@ -287,7 +292,12 @@ class RegistrationHandler:
await self.auth_blocking.check_auth_blocking(threepid=threepid)
if localpart is not None:
await self.check_username(localpart, guest_access_token=guest_access_token)
allow_invalid = by_admin and self.hs.config.meow.admin_api_register_invalid
await self.check_username(
localpart,
guest_access_token=guest_access_token,
allow_invalid=allow_invalid,
)
was_guest = guest_access_token is not None

View File

@ -857,11 +857,23 @@ class RoomCreationHandler:
visibility = config.get("visibility", "private")
is_public = visibility == "public"
room_id = await self._generate_and_create_room_id(
creator_id=user_id,
is_public=is_public,
room_version=room_version,
)
if "room_id" in config:
room_id = config["room_id"]
try:
await self.store.store_room(
room_id=room_id,
room_creator_user_id=user_id,
is_public=is_public,
room_version=room_version,
)
except StoreError:
raise SynapseError(409, "Room ID already in use", errcode="M_CONFLICT")
else:
room_id = await self._generate_and_create_room_id(
creator_id=user_id,
is_public=is_public,
room_version=room_version,
)
# Check whether this visibility value is blocked by a third party module
allowed_by_third_party_rules = await (
@ -1184,7 +1196,7 @@ class RoomCreationHandler:
events_to_send.append((power_event, power_context))
else:
power_level_content: JsonDict = {
"users": {creator_id: 100},
"users": {creator_id: 9001},
"users_default": 0,
"events": {
EventTypes.Name: 50,

View File

@ -21,6 +21,7 @@ class RoomBatchHandler:
self.event_creation_handler = hs.get_event_creation_handler()
self.room_member_handler = hs.get_room_member_handler()
self.auth = hs.get_auth()
self.allow_send_any = self.hs.config.meow.appservice_batch_send_any
async def inherit_depth_from_prev_ids(self, prev_event_ids: List[str]) -> int:
"""Finds the depth which would sort it after the most-recent
@ -118,7 +119,9 @@ class RoomBatchHandler:
Requester object
"""
await self.auth.validate_appservice_can_control_user_id(app_service, user_id)
await self.auth.validate_appservice_can_control_user_id(
app_service, user_id, allow_any=self.allow_send_any
)
return create_requester(user_id, app_service=app_service)
@ -271,6 +274,8 @@ class RoomBatchHandler:
inherited_depth: int,
initial_state_event_ids: List[str],
app_service_requester: Requester,
beeper_new_messages: bool,
beeper_initial_prev_event_ids: List[str] = None,
) -> List[str]:
"""Create and persists all events provided sequentially. Handles the
complexity of creating events in chronological order so they can
@ -290,21 +295,24 @@ class RoomBatchHandler:
the start of the historical batch since it's floating with no
prev_events to derive state from automatically.
app_service_requester: The requester of an application service.
beeper_new_messages: Is this a batch of new events rather than history?
beeper_initial_prev_event_ids: prev_event_ids for the first event to send.
Returns:
List of persisted event IDs
"""
assert app_service_requester.app_service
# We expect the first event in a historical batch to be an insertion event
assert events_to_create[0]["type"] == EventTypes.MSC2716_INSERTION
# We expect the last event in a historical batch to be an batch event
assert events_to_create[-1]["type"] == EventTypes.MSC2716_BATCH
if not beeper_new_messages:
# We expect the first event in a historical batch to be an insertion event
assert events_to_create[0]["type"] == EventTypes.MSC2716_INSERTION
# We expect the last event in a historical batch to be an batch event
assert events_to_create[-1]["type"] == EventTypes.MSC2716_BATCH
# Make the historical event chain float off on its own by specifying no
# prev_events for the first event in the chain which causes the HS to
# ask for the state at the start of the batch later.
prev_event_ids: List[str] = []
prev_event_ids: List[str] = beeper_initial_prev_event_ids or []
event_ids = []
events_to_persist = []
@ -335,14 +343,14 @@ class RoomBatchHandler:
# Only the first event (which is the insertion event) in the
# chain should be floating. The rest should hang off each other
# in a chain.
allow_no_prev_events=index == 0,
allow_no_prev_events=index == 0 and not beeper_new_messages,
prev_event_ids=event_dict.get("prev_events"),
# Since the first event (which is the insertion event) in the
# chain is floating with no `prev_events`, it can't derive state
# from anywhere automatically. So we need to set some state
# explicitly.
state_event_ids=initial_state_event_ids if index == 0 else None,
historical=True,
historical=not beeper_new_messages,
depth=inherited_depth,
)
@ -370,6 +378,18 @@ class RoomBatchHandler:
event_ids.append(event_id)
prev_event_ids = [event_id]
if beeper_new_messages:
for index, (event, context) in enumerate(events_to_persist):
await self.event_creation_handler.handle_new_client_event(
await self.create_requester_for_user_id_from_app_service(
event.sender, app_service_requester.app_service
),
event=event,
context=context,
dont_notify=index < len(events_to_persist) - 1,
)
return event_ids
# Persist events in reverse-chronological order so they have the
# correct stream_ordering as they are backfilled (which decrements).
# Events are sorted by (topological_ordering, stream_ordering)
@ -394,6 +414,8 @@ class RoomBatchHandler:
inherited_depth: int,
initial_state_event_ids: List[str],
app_service_requester: Requester,
beeper_new_messages: bool,
beeper_initial_prev_event_ids: List[str] = None,
) -> Tuple[List[str], str]:
"""
Handles creating and persisting all of the historical events as well as
@ -415,6 +437,8 @@ class RoomBatchHandler:
`/batch_send?prev_event_id=$abc` plus the outcome of
`persist_state_events_at_start`
app_service_requester: The requester of an application service.
beeper_new_messages: Is this a batch of new events rather than history?
beeper_initial_prev_event_ids: prev_event_ids for the first event to send.
Returns:
Tuple containing a list of created events and the next_batch_id
@ -435,8 +459,9 @@ class RoomBatchHandler:
# the last event we're inserting
"origin_server_ts": last_event_in_batch["origin_server_ts"],
}
# Add the batch event to the end of the batch (newest-in-time)
events_to_create.append(batch_event)
if not beeper_new_messages:
# Add the batch event to the end of the batch (newest-in-time)
events_to_create.append(batch_event)
# Add an "insertion" event to the start of each batch (next to the oldest-in-time
# event in the batch) so the next batch can be connected to this one.
@ -451,8 +476,9 @@ class RoomBatchHandler:
next_batch_id = insertion_event["content"][
EventContentFields.MSC2716_NEXT_BATCH_ID
]
# Prepend the insertion event to the start of the batch (oldest-in-time)
events_to_create = [insertion_event] + events_to_create
if not beeper_new_messages:
# Prepend the insertion event to the start of the batch (oldest-in-time)
events_to_create = [insertion_event] + events_to_create
# Create and persist all of the historical events
event_ids = await self.persist_historical_events(
@ -461,6 +487,8 @@ class RoomBatchHandler:
inherited_depth=inherited_depth,
initial_state_event_ids=initial_state_event_ids,
app_service_requester=app_service_requester,
beeper_new_messages=beeper_new_messages,
beeper_initial_prev_event_ids=beeper_initial_prev_event_ids,
)
return event_ids, next_batch_id

View File

@ -723,26 +723,6 @@ class RoomMemberHandler(metaclass=abc.ABCMeta):
content.pop("displayname", None)
content.pop("avatar_url", None)
if len(content.get("displayname") or "") > MAX_DISPLAYNAME_LEN:
raise SynapseError(
400,
f"Displayname is too long (max {MAX_DISPLAYNAME_LEN})",
errcode=Codes.BAD_JSON,
)
if len(content.get("avatar_url") or "") > MAX_AVATAR_URL_LEN:
raise SynapseError(
400,
f"Avatar URL is too long (max {MAX_AVATAR_URL_LEN})",
errcode=Codes.BAD_JSON,
)
if "avatar_url" in content and content.get("avatar_url") is not None:
if not await self.profile_handler.check_avatar_size_and_mime_type(
content["avatar_url"],
):
raise SynapseError(403, "This avatar is not allowed", Codes.FORBIDDEN)
# The event content should *not* include the authorising user as
# it won't be properly signed. Strip it out since it might come
# back from a client updating a display name / avatar.

View File

@ -1175,7 +1175,6 @@ class SyncHandler:
for e in await sync_config.filter_collection.filter_room_state(
list(state.values())
)
if e.type != EventTypes.Aliases # until MSC2261 or alternative solution
}
async def _find_missing_partial_state_memberships(

View File

@ -330,10 +330,10 @@ class BulkPushRuleEvaluator:
context: EventContext,
event_id_to_event: Mapping[str, EventBase],
) -> None:
if (
not event.internal_metadata.is_notifiable()
or event.internal_metadata.is_historical()
or event.content.get(EventContentFields.MSC2716_HISTORICAL)
):
# Push rules for events that aren't notifiable can't be processed by this and
# we want to skip push notification actions for historical messages

View File

@ -129,13 +129,6 @@ class HttpPusher(Pusher):
url = self.data["url"]
if not isinstance(url, str):
raise PusherConfigException("'url' must be a string")
url_parts = urllib.parse.urlparse(url)
# Note that the specification also says the scheme must be HTTPS, but
# it isn't up to the homeserver to verify that.
if url_parts.path != "/_matrix/push/v1/notify":
raise PusherConfigException(
"'url' must have a path of '/_matrix/push/v1/notify'"
)
self.url = url
self.http_client = hs.get_proxied_blacklisted_http_client()

View File

@ -84,6 +84,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
requester: Requester,
ratelimit: bool,
extra_users: List[UserID],
dont_notify: bool,
) -> JsonDict:
"""
Args:
@ -94,6 +95,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
context
ratelimit
extra_users: Any extra users to notify about event
dont_notify
"""
serialized_context = await context.serialize(event, store)
@ -108,6 +110,7 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
"requester": requester.serialize(),
"ratelimit": ratelimit,
"extra_users": [u.to_string() for u in extra_users],
"dont_notify": dont_notify,
}
return payload
@ -133,13 +136,18 @@ class ReplicationSendEventRestServlet(ReplicationEndpoint):
ratelimit = content["ratelimit"]
extra_users = [UserID.from_string(u) for u in content["extra_users"]]
dont_notify = content["dont_notify"]
logger.info(
"Got event to send with ID: %s into room: %s", event.event_id, event.room_id
)
event = await self.event_creation_handler.persist_and_notify_client_events(
requester, [(event, context)], ratelimit=ratelimit, extra_users=extra_users
requester,
[(event, context)],
ratelimit=ratelimit,
extra_users=extra_users,
dont_notify=dont_notify,
)
return (

View File

@ -82,6 +82,7 @@ class ReplicationSendEventsRestServlet(ReplicationEndpoint):
requester: Requester,
ratelimit: bool,
extra_users: List[UserID],
dont_notify: bool,
) -> JsonDict:
"""
Args:
@ -108,7 +109,7 @@ class ReplicationSendEventsRestServlet(ReplicationEndpoint):
}
serialized_events.append(serialized_event)
payload = {"events": serialized_events}
payload = {"events": serialized_events, "dont_notify": dont_notify}
return payload
@ -118,6 +119,7 @@ class ReplicationSendEventsRestServlet(ReplicationEndpoint):
with Measure(self.clock, "repl_send_events_parse"):
events_and_context = []
events = payload["events"]
dont_notify = payload["dont_notify"]
for event_payload in events:
event_dict = event_payload["event"]
@ -152,7 +154,11 @@ class ReplicationSendEventsRestServlet(ReplicationEndpoint):
last_event = (
await self.event_creation_handler.persist_and_notify_client_events(
requester, events_and_context, ratelimit, extra_users
requester,
events_and_context,
ratelimit,
extra_users,
dont_notify=dont_notify,
)
)

View File

@ -70,12 +70,16 @@ class ReadMarkerRestServlet(RestServlet):
# TODO Add validation to reject non-string event IDs.
if not event_id:
continue
extra_content = body.get(
receipt_type.replace("m.", "com.beeper.") + ".extra", None
)
if receipt_type == ReceiptTypes.FULLY_READ:
await self.read_marker_handler.received_client_read_marker(
room_id,
user_id=requester.user.to_string(),
event_id=event_id,
extra_content=extra_content,
)
else:
await self.receipts_handler.received_client_receipt(
@ -85,6 +89,7 @@ class ReadMarkerRestServlet(RestServlet):
event_id=event_id,
# Setting the thread ID is not possible with the /read_markers endpoint.
thread_id=None,
extra_content=extra_content,
)
return 200, {}

View File

@ -65,7 +65,7 @@ class ReceiptRestServlet(RestServlet):
f"Receipt type must be {', '.join(self._known_receipt_types)}",
)
body = parse_json_object_from_request(request)
body = parse_json_object_from_request(request, allow_empty_body=False)
# Pull the thread ID, if one exists.
thread_id = None
@ -100,6 +100,7 @@ class ReceiptRestServlet(RestServlet):
room_id,
user_id=requester.user.to_string(),
event_id=event_id,
extra_content=body,
)
else:
await self.receipts_handler.received_client_receipt(
@ -108,6 +109,7 @@ class ReceiptRestServlet(RestServlet):
user_id=requester.user.to_string(),
event_id=event_id,
thread_id=thread_id,
extra_content=body,
)
return 200, {}

View File

@ -320,6 +320,7 @@ class RoomSendEventRestServlet(TransactionRestServlet):
super().__init__(hs)
self.event_creation_handler = hs.get_event_creation_handler()
self.auth = hs.get_auth()
self.hs = hs
def register(self, http_server: HttpServer) -> None:
# /rooms/$roomid/send/$event_type[/$txn_id]
@ -343,7 +344,10 @@ class RoomSendEventRestServlet(TransactionRestServlet):
"sender": requester.user.to_string(),
}
if requester.app_service:
if (
requester.app_service
or requester.user.to_string() in self.hs.config.meow.timestamp_override
):
origin_server_ts = parse_integer(request, "ts")
if origin_server_ts is not None:
event_dict["origin_server_ts"] = origin_server_ts

View File

@ -28,6 +28,7 @@ from synapse.http.servlet import (
parse_json_object_from_request,
parse_string,
parse_strings_from_args,
parse_boolean_from_args,
)
from synapse.http.site import SynapseRequest
from synapse.rest.client.transactions import HttpTransactionCache
@ -100,6 +101,9 @@ class RoomBatchSendEventRestServlet(RestServlet):
request.args, "prev_event_id"
)
batch_id_from_query = parse_string(request, "batch_id")
beeper_new_messages = parse_boolean_from_args(
request.args, "com.beeper.new_messages"
)
if prev_event_ids_from_query is None:
raise SynapseError(
@ -155,7 +159,7 @@ class RoomBatchSendEventRestServlet(RestServlet):
# Create and persist all of the state events that float off on their own
# before the batch. These will most likely be all of the invite/member
# state events used to auth the upcoming historical messages.
if body["state_events_at_start"]:
if body["state_events_at_start"] and not beeper_new_messages:
state_event_ids_at_start = (
await self.room_batch_handler.persist_state_events_at_start(
state_events_at_start=body["state_events_at_start"],
@ -181,6 +185,8 @@ class RoomBatchSendEventRestServlet(RestServlet):
base_insertion_event = None
if batch_id_from_query:
batch_id_to_connect_to = batch_id_from_query
elif beeper_new_messages:
batch_id_to_connect_to = None
# Otherwise, create an insertion event to act as a starting point.
#
# We don't always have an insertion event to start hanging more history
@ -231,11 +237,20 @@ class RoomBatchSendEventRestServlet(RestServlet):
inherited_depth=inherited_depth,
initial_state_event_ids=state_event_ids,
app_service_requester=requester,
beeper_new_messages=beeper_new_messages,
beeper_initial_prev_event_ids=prev_event_ids_from_query
if beeper_new_messages
else None,
)
insertion_event_id = event_ids[0]
batch_event_id = event_ids[-1]
historical_event_ids = event_ids[1:-1]
if beeper_new_messages:
insertion_event_id = batch_event_id = None
historical_event_ids = event_ids
next_batch_id = None
else:
insertion_event_id = event_ids[0]
batch_event_id = event_ids[-1]
historical_event_ids = event_ids[1:-1]
response_dict = {
"state_event_ids": state_event_ids_at_start,

View File

@ -184,7 +184,9 @@ def add_file_headers(
# recommend caching as it's sensitive or private - or at least
# select private. don't bother setting Expires as all our
# clients are smart enough to be happy with Cache-Control
request.setHeader(b"Cache-Control", b"public,max-age=86400,s-maxage=86400")
request.setHeader(
b"Cache-Control", b"public,immutable,max-age=31536000,s-maxage=86400"
)
if file_size is not None:
request.setHeader(b"Content-Length", b"%d" % (file_size,))

View File

@ -39,7 +39,7 @@ class ThumbnailError(Exception):
class Thumbnailer:
FORMATS = {"image/jpeg": "JPEG", "image/png": "PNG"}
FORMATS = {"image/jpeg": "JPEG", "image/png": "PNG", "image/webp": "WEBP"}
@staticmethod
def set_limits(max_image_pixels: int) -> None:

View File

@ -37,6 +37,7 @@ class StorageControllers:
# rewrite all the existing code to split it into high vs low level
# interfaces.
self.main = stores.main
self.hs = hs
self.purge_events = PurgeEventsStorageController(hs, stores)
self.state = StateStorageController(hs, stores)

View File

@ -116,6 +116,10 @@ async def filter_events_for_client(
room_id
] = await storage.main.get_retention_policy_for_room(room_id)
# meow: let admins see secret events like org.matrix.dummy_event, m.room.aliases
# and events expired by the retention policy.
filter_override = user_id in storage.hs.config.meow.filter_override
def allowed(event: EventBase) -> Optional[EventBase]:
return _check_client_allowed_to_see_event(
user_id=user_id,
@ -128,6 +132,7 @@ async def filter_events_for_client(
state=event_id_to_state.get(event.event_id),
is_peeking=is_peeking,
sender_erased=erased_senders.get(event.sender, False),
filter_override=filter_override,
)
# Check each event: gives an iterable of None or (a potentially modified)
@ -275,6 +280,7 @@ def _check_client_allowed_to_see_event(
retention_policy: RetentionPolicy,
state: Optional[StateMap[EventBase]],
sender_erased: bool,
filter_override: bool,
) -> Optional[EventBase]:
"""Check with the given user is allowed to see the given event
@ -291,6 +297,7 @@ def _check_client_allowed_to_see_event(
retention_policy: The retention policy of the room
state: The state at the event, unless its an outlier
sender_erased: Whether the event sender has been marked as "erased"
filter_override: meow
Returns:
None if the user cannot see this event at all
@ -304,7 +311,7 @@ def _check_client_allowed_to_see_event(
# because, if this is not the case, we're probably only checking if the users can
# see events in the room at that point in the DAG, and that shouldn't be decided
# on those checks.
if filter_send_to_client:
if filter_send_to_client and not filter_override:
if (
_check_filter_send_to_client(event, clock, retention_policy, sender_ignored)
== _CheckFilter.DENIED
@ -314,6 +321,9 @@ def _check_client_allowed_to_see_event(
event.event_id,
)
return None
# meow: even with filter_override, we want to filter ignored users
elif filter_send_to_client and not event.is_state() and sender_ignored:
return None
if event.event_id in always_include_ids:
return event