Merge remote-tracking branch 'upstream/release-v1.57'

This commit is contained in:
Tulir Asokan 2022-04-21 13:53:47 +03:00
commit b2fa6ec9f6
248 changed files with 14616 additions and 8934 deletions

View file

@ -12,22 +12,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""This class implements the proposed relation APIs from MSC 1849.
Since the MSC has not been approved all APIs here are unstable and may change at
any time to reflect changes in the MSC.
"""
import logging
from typing import TYPE_CHECKING, Optional, Tuple
from synapse.api.constants import RelationTypes
from synapse.api.errors import SynapseError
from synapse.http.server import HttpServer
from synapse.http.servlet import RestServlet, parse_integer, parse_string
from synapse.http.site import SynapseRequest
from synapse.rest.client._base import client_patterns
from synapse.storage.relations import AggregationPaginationToken
from synapse.types import JsonDict, StreamToken
if TYPE_CHECKING:
@ -44,7 +35,7 @@ class RelationPaginationServlet(RestServlet):
PATTERNS = client_patterns(
"/rooms/(?P<room_id>[^/]*)/relations/(?P<parent_id>[^/]*)"
"(/(?P<relation_type>[^/]*)(/(?P<event_type>[^/]*))?)?$",
releases=(),
releases=("v1",),
)
def __init__(self, hs: "HomeServer"):
@ -93,166 +84,5 @@ class RelationPaginationServlet(RestServlet):
return 200, result
class RelationAggregationPaginationServlet(RestServlet):
"""API to paginate aggregation groups of relations, e.g. paginate the
types and counts of the reactions on the events.
Example request and response:
GET /rooms/{room_id}/aggregations/{parent_id}
{
chunk: [
{
"type": "m.reaction",
"key": "👍",
"count": 3
}
]
}
"""
PATTERNS = client_patterns(
"/rooms/(?P<room_id>[^/]*)/aggregations/(?P<parent_id>[^/]*)"
"(/(?P<relation_type>[^/]*)(/(?P<event_type>[^/]*))?)?$",
releases=(),
)
def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastores().main
self.event_handler = hs.get_event_handler()
async def on_GET(
self,
request: SynapseRequest,
room_id: str,
parent_id: str,
relation_type: Optional[str] = None,
event_type: Optional[str] = None,
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
await self.auth.check_user_in_room_or_world_readable(
room_id,
requester.user.to_string(),
allow_departed_users=True,
)
# This checks that a) the event exists and b) the user is allowed to
# view it.
event = await self.event_handler.get_event(requester.user, room_id, parent_id)
if event is None:
raise SynapseError(404, "Unknown parent event.")
if relation_type not in (RelationTypes.ANNOTATION, None):
raise SynapseError(
400, f"Relation type must be '{RelationTypes.ANNOTATION}'"
)
limit = parse_integer(request, "limit", default=5)
from_token_str = parse_string(request, "from")
to_token_str = parse_string(request, "to")
# Return the relations
from_token = None
if from_token_str:
from_token = AggregationPaginationToken.from_string(from_token_str)
to_token = None
if to_token_str:
to_token = AggregationPaginationToken.from_string(to_token_str)
pagination_chunk = await self.store.get_aggregation_groups_for_event(
event_id=parent_id,
room_id=room_id,
event_type=event_type,
limit=limit,
from_token=from_token,
to_token=to_token,
)
return 200, await pagination_chunk.to_dict(self.store)
class RelationAggregationGroupPaginationServlet(RestServlet):
"""API to paginate within an aggregation group of relations, e.g. paginate
all the 👍 reactions on an event.
Example request and response:
GET /rooms/{room_id}/aggregations/{parent_id}/m.annotation/m.reaction/👍
{
chunk: [
{
"type": "m.reaction",
"content": {
"m.relates_to": {
"rel_type": "m.annotation",
"key": "👍"
}
}
},
...
]
}
"""
PATTERNS = client_patterns(
"/rooms/(?P<room_id>[^/]*)/aggregations/(?P<parent_id>[^/]*)"
"/(?P<relation_type>[^/]*)/(?P<event_type>[^/]*)/(?P<key>[^/]*)$",
releases=(),
)
def __init__(self, hs: "HomeServer"):
super().__init__()
self.auth = hs.get_auth()
self.store = hs.get_datastores().main
self._relations_handler = hs.get_relations_handler()
async def on_GET(
self,
request: SynapseRequest,
room_id: str,
parent_id: str,
relation_type: str,
event_type: str,
key: str,
) -> Tuple[int, JsonDict]:
requester = await self.auth.get_user_by_req(request, allow_guest=True)
if relation_type != RelationTypes.ANNOTATION:
raise SynapseError(400, "Relation type must be 'annotation'")
limit = parse_integer(request, "limit", default=5)
from_token_str = parse_string(request, "from")
to_token_str = parse_string(request, "to")
from_token = None
if from_token_str:
from_token = await StreamToken.from_string(self.store, from_token_str)
to_token = None
if to_token_str:
to_token = await StreamToken.from_string(self.store, to_token_str)
result = await self._relations_handler.get_relations(
requester=requester,
event_id=parent_id,
room_id=room_id,
relation_type=relation_type,
event_type=event_type,
aggregation_key=key,
limit=limit,
from_token=from_token,
to_token=to_token,
)
return 200, result
def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None:
RelationPaginationServlet(hs).register(http_server)
RelationAggregationPaginationServlet(hs).register(http_server)
RelationAggregationGroupPaginationServlet(hs).register(http_server)

View file

@ -123,6 +123,19 @@ class RoomBatchSendEventRestServlet(RestServlet):
errcode=Codes.INVALID_PARAM,
)
# Make sure that the prev_event_ids exist and aren't outliers - ie, they are
# regular parts of the room DAG where we know the state.
non_outlier_prev_events = await self.store.have_events_in_timeline(
prev_event_ids_from_query
)
for prev_event_id in prev_event_ids_from_query:
if prev_event_id not in non_outlier_prev_events:
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"prev_event %s does not exist, or is an outlier" % (prev_event_id,),
errcode=Codes.INVALID_PARAM,
)
# For the event we are inserting next to (`prev_event_ids_from_query`),
# find the most recent state events that allowed that message to be
# sent. We will use that as a base to auth our historical messages
@ -131,14 +144,6 @@ class RoomBatchSendEventRestServlet(RestServlet):
prev_event_ids_from_query
)
if not state_event_ids:
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"No auth events found for given prev_event query parameter. The prev_event=%s probably does not exist."
% prev_event_ids_from_query,
errcode=Codes.INVALID_PARAM,
)
state_event_ids_at_start = []
# Create and persist all of the state events that float off on their own
# before the batch. These will most likely be all of the invite/member

View file

@ -99,6 +99,7 @@ class SyncRestServlet(RestServlet):
self.presence_handler = hs.get_presence_handler()
self._server_notices_sender = hs.get_server_notices_sender()
self._event_serializer = hs.get_event_client_serializer()
self._msc2654_enabled = hs.config.experimental.msc2654_enabled
async def on_GET(self, request: SynapseRequest) -> Tuple[int, JsonDict]:
# This will always be set by the time Twisted calls us.
@ -300,14 +301,13 @@ class SyncRestServlet(RestServlet):
if archived:
response["rooms"][Membership.LEAVE] = archived
# By the time we get here groups is no longer optional.
assert sync_result.groups is not None
if sync_result.groups.join:
response["groups"][Membership.JOIN] = sync_result.groups.join
if sync_result.groups.invite:
response["groups"][Membership.INVITE] = sync_result.groups.invite
if sync_result.groups.leave:
response["groups"][Membership.LEAVE] = sync_result.groups.leave
if sync_result.groups is not None:
if sync_result.groups.join:
response["groups"][Membership.JOIN] = sync_result.groups.join
if sync_result.groups.invite:
response["groups"][Membership.INVITE] = sync_result.groups.invite
if sync_result.groups.leave:
response["groups"][Membership.LEAVE] = sync_result.groups.leave
return response
@ -521,7 +521,8 @@ class SyncRestServlet(RestServlet):
result["ephemeral"] = {"events": ephemeral_events}
result["unread_notifications"] = room.unread_notifications
result["summary"] = room.summary
result["org.matrix.msc2654.unread_count"] = room.unread_count
if self._msc2654_enabled:
result["org.matrix.msc2654.unread_count"] = room.unread_count
return result

View file

@ -76,17 +76,17 @@ class LocalKey(Resource):
def response_json_object(self) -> JsonDict:
verify_keys = {}
for key in self.config.key.signing_key:
verify_key_bytes = key.verify_key.encode()
key_id = "%s:%s" % (key.alg, key.version)
for signing_key in self.config.key.signing_key:
verify_key_bytes = signing_key.verify_key.encode()
key_id = "%s:%s" % (signing_key.alg, signing_key.version)
verify_keys[key_id] = {"key": encode_base64(verify_key_bytes)}
old_verify_keys = {}
for key_id, key in self.config.key.old_signing_keys.items():
verify_key_bytes = key.encode()
for key_id, old_signing_key in self.config.key.old_signing_keys.items():
verify_key_bytes = old_signing_key.encode()
old_verify_keys[key_id] = {
"key": encode_base64(verify_key_bytes),
"expired_ts": key.expired_ts,
"expired_ts": old_signing_key.expired,
}
json_object = {

View file

@ -13,7 +13,7 @@
# limitations under the License.
import logging
from typing import TYPE_CHECKING, Dict
from typing import TYPE_CHECKING, Dict, Set
from signedjson.sign import sign_json
@ -149,7 +149,7 @@ class RemoteKey(DirectServeJsonResource):
cached = await self.store.get_server_keys_json(store_queries)
json_results = set()
json_results: Set[bytes] = set()
time_now_ms = self.clock.time_msec()
@ -234,8 +234,8 @@ class RemoteKey(DirectServeJsonResource):
await self.query_keys(request, query, query_remote_on_cache_miss=False)
else:
signed_keys = []
for key_json in json_results:
key_json = json_decoder.decode(key_json.decode("utf-8"))
for key_json_raw in json_results:
key_json = json_decoder.decode(key_json_raw.decode("utf-8"))
for signing_key in self.config.key.key_server_signing_keys:
key_json = sign_json(
key_json, self.config.server.server_name, signing_key

View file

@ -258,7 +258,7 @@ class MediaRepository:
# We linearize here to ensure that we don't try and download remote
# media multiple times concurrently
key = (server_name, media_id)
with (await self.remote_media_linearizer.queue(key)):
async with self.remote_media_linearizer.queue(key):
responder, media_info = await self._get_remote_media_impl(
server_name, media_id
)
@ -294,7 +294,7 @@ class MediaRepository:
# We linearize here to ensure that we don't try and download remote
# media multiple times concurrently
key = (server_name, media_id)
with (await self.remote_media_linearizer.queue(key)):
async with self.remote_media_linearizer.queue(key):
responder, media_info = await self._get_remote_media_impl(
server_name, media_id
)
@ -850,7 +850,7 @@ class MediaRepository:
# TODO: Should we delete from the backup store
with (await self.remote_media_linearizer.queue(key)):
async with self.remote_media_linearizer.queue(key):
full_path = self.filepaths.remote_media_filepath(origin, file_id)
try:
os.remove(full_path)

View file

@ -200,12 +200,17 @@ class PreviewUrlResource(DirectServeJsonResource):
match = False
continue
# Some attributes might not be parsed as strings by urlsplit (such as the
# port, which is parsed as an int). Because we use match functions that
# expect strings, we want to make sure that's what we give them.
value_str = str(value)
if pattern.startswith("^"):
if not re.match(pattern, getattr(url_tuple, attrib)):
if not re.match(pattern, value_str):
match = False
continue
else:
if not fnmatch.fnmatch(getattr(url_tuple, attrib), pattern):
if not fnmatch.fnmatch(value_str, pattern):
match = False
continue
if match: