From 82c8799ec7f6676555033f5d804cbed443a1ea3e Mon Sep 17 00:00:00 2001 From: Neil Johnson Date: Sat, 19 Oct 2019 09:06:15 +0100 Subject: [PATCH 001/133] Set room version default to 5 --- changelog.d/6220.feature | 1 + docs/sample_config.yaml | 2 +- synapse/config/server.py | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6220.feature diff --git a/changelog.d/6220.feature b/changelog.d/6220.feature new file mode 100644 index 000000000..8343e9912 --- /dev/null +++ b/changelog.d/6220.feature @@ -0,0 +1 @@ +Increase default room version from 4 to 5, thereby enforcing server key validity period checks. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 8226978ba..af3ca0f72 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -72,7 +72,7 @@ pid_file: DATADIR/homeserver.pid # For example, for room version 1, default_room_version should be set # to "1". # -#default_room_version: "4" +#default_room_version: "5" # The GC threshold parameters to pass to `gc.set_threshold`, if defined # diff --git a/synapse/config/server.py b/synapse/config/server.py index afc4d6a4a..26e6d84c0 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -41,7 +41,7 @@ logger = logging.Logger(__name__) # in the list. DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"] -DEFAULT_ROOM_VERSION = "4" +DEFAULT_ROOM_VERSION = "5" ROOM_COMPLEXITY_TOO_GREAT = ( "Your homeserver is unable to join rooms this large or complex. " From 7c8c97e635811609c5a7ae4c0bb94e6573c30753 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 30 Oct 2019 15:12:49 +0000 Subject: [PATCH 002/133] Split purge API into events vs state --- synapse/handlers/pagination.py | 7 +- synapse/storage/__init__.py | 2 + synapse/storage/data_stores/main/events.py | 328 ++++++++++----------- synapse/storage/data_stores/main/state.py | 23 ++ synapse/storage/purge_events.py | 117 ++++++++ tests/storage/test_purge.py | 15 +- 6 files changed, 308 insertions(+), 184 deletions(-) create mode 100644 synapse/storage/purge_events.py diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 5744f4579..9088ba14c 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -69,6 +69,7 @@ class PaginationHandler(object): self.hs = hs self.auth = hs.get_auth() self.store = hs.get_datastore() + self.storage = hs.get_storage() self.clock = hs.get_clock() self._server_name = hs.hostname @@ -125,7 +126,9 @@ class PaginationHandler(object): self._purges_in_progress_by_room.add(room_id) try: with (yield self.pagination_lock.write(room_id)): - yield self.store.purge_history(room_id, token, delete_local_events) + yield self.storage.purge_events.purge_history( + room_id, token, delete_local_events + ) logger.info("[purge] complete") self._purges_by_id[purge_id].status = PurgeStatus.STATUS_COMPLETE except Exception: @@ -168,7 +171,7 @@ class PaginationHandler(object): if joined: raise SynapseError(400, "Users are still joined to this room") - await self.store.purge_room(room_id) + await self.storage.purge_events.purge_room(room_id) @defer.inlineCallbacks def get_messages( diff --git a/synapse/storage/__init__.py b/synapse/storage/__init__.py index a6429d17e..3646ebd00 100644 --- a/synapse/storage/__init__.py +++ b/synapse/storage/__init__.py @@ -30,6 +30,7 @@ stored in `synapse.storage.schema`. from synapse.storage.data_stores import DataStores from synapse.storage.data_stores.main import DataStore from synapse.storage.persist_events import EventsPersistenceStorage +from synapse.storage.purge_events import PurgeEventsStorage __all__ = ["DataStores", "DataStore"] @@ -45,6 +46,7 @@ class Storage(object): self.main = stores.main self.persistence = EventsPersistenceStorage(hs, stores) + self.purge_events = PurgeEventsStorage(hs, stores) def are_all_users_on_domain(txn, database_engine, domain): diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index 7c3607f30..4eacba805 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -1368,6 +1368,10 @@ class EventsStore( if True, we will delete local events as well as remote ones (instead of just marking them as outliers and deleting their state groups). + + Returns: + Deferred[set[int]]: The set of state groups that reference deleted + events. """ return self.runInteraction( @@ -1521,60 +1525,6 @@ class EventsStore( "[purge] found %i referenced state groups", len(referenced_state_groups) ) - logger.info("[purge] finding state groups that can be deleted") - - _ = self._find_unreferenced_groups_during_purge(txn, referenced_state_groups) - state_groups_to_delete, remaining_state_groups = _ - - logger.info( - "[purge] found %i state groups to delete", len(state_groups_to_delete) - ) - - logger.info( - "[purge] de-delta-ing %i remaining state groups", - len(remaining_state_groups), - ) - - # Now we turn the state groups that reference to-be-deleted state - # groups to non delta versions. - for sg in remaining_state_groups: - logger.info("[purge] de-delta-ing remaining state group %s", sg) - curr_state = self._get_state_groups_from_groups_txn(txn, [sg]) - curr_state = curr_state[sg] - - self._simple_delete_txn( - txn, table="state_groups_state", keyvalues={"state_group": sg} - ) - - self._simple_delete_txn( - txn, table="state_group_edges", keyvalues={"state_group": sg} - ) - - self._simple_insert_many_txn( - txn, - table="state_groups_state", - values=[ - { - "state_group": sg, - "room_id": room_id, - "type": key[0], - "state_key": key[1], - "event_id": state_id, - } - for key, state_id in iteritems(curr_state) - ], - ) - - logger.info("[purge] removing redundant state groups") - txn.executemany( - "DELETE FROM state_groups_state WHERE state_group = ?", - ((sg,) for sg in state_groups_to_delete), - ) - txn.executemany( - "DELETE FROM state_groups WHERE id = ?", - ((sg,) for sg in state_groups_to_delete), - ) - logger.info("[purge] removing events from event_to_state_groups") txn.execute( "DELETE FROM event_to_state_groups " @@ -1661,87 +1611,7 @@ class EventsStore( logger.info("[purge] done") - def _find_unreferenced_groups_during_purge(self, txn, state_groups): - """Used when purging history to figure out which state groups can be - deleted and which need to be de-delta'ed (due to one of its prev groups - being scheduled for deletion). - - Args: - txn - state_groups (set[int]): Set of state groups referenced by events - that are going to be deleted. - - Returns: - tuple[set[int], set[int]]: The set of state groups that can be - deleted and the set of state groups that need to be de-delta'ed - """ - # Graph of state group -> previous group - graph = {} - - # Set of events that we have found to be referenced by events - referenced_groups = set() - - # Set of state groups we've already seen - state_groups_seen = set(state_groups) - - # Set of state groups to handle next. - next_to_search = set(state_groups) - while next_to_search: - # We bound size of groups we're looking up at once, to stop the - # SQL query getting too big - if len(next_to_search) < 100: - current_search = next_to_search - next_to_search = set() - else: - current_search = set(itertools.islice(next_to_search, 100)) - next_to_search -= current_search - - # Check if state groups are referenced - sql = """ - SELECT DISTINCT state_group FROM event_to_state_groups - LEFT JOIN events_to_purge AS ep USING (event_id) - WHERE ep.event_id IS NULL AND - """ - clause, args = make_in_list_sql_clause( - txn.database_engine, "state_group", current_search - ) - txn.execute(sql + clause, list(args)) - - referenced = set(sg for sg, in txn) - referenced_groups |= referenced - - # We don't continue iterating up the state group graphs for state - # groups that are referenced. - current_search -= referenced - - rows = self._simple_select_many_txn( - txn, - table="state_group_edges", - column="prev_state_group", - iterable=current_search, - keyvalues={}, - retcols=("prev_state_group", "state_group"), - ) - - prevs = set(row["state_group"] for row in rows) - # We don't bother re-handling groups we've already seen - prevs -= state_groups_seen - next_to_search |= prevs - state_groups_seen |= prevs - - for row in rows: - # Note: Each state group can have at most one prev group - graph[row["state_group"]] = row["prev_state_group"] - - to_delete = state_groups_seen - referenced_groups - - to_dedelta = set() - for sg in referenced_groups: - prev_sg = graph.get(sg) - if prev_sg and prev_sg in to_delete: - to_dedelta.add(sg) - - return to_delete, to_dedelta + return referenced_state_groups def purge_room(self, room_id): """Deletes all record of a room @@ -1753,46 +1623,7 @@ class EventsStore( return self.runInteraction("purge_room", self._purge_room_txn, room_id) def _purge_room_txn(self, txn, room_id): - # first we have to delete the state groups states - logger.info("[purge] removing %s from state_groups_state", room_id) - - txn.execute( - """ - DELETE FROM state_groups_state WHERE state_group IN ( - SELECT state_group FROM events JOIN event_to_state_groups USING(event_id) - WHERE events.room_id=? - ) - """, - (room_id,), - ) - - # ... and the state group edges - logger.info("[purge] removing %s from state_group_edges", room_id) - - txn.execute( - """ - DELETE FROM state_group_edges WHERE state_group IN ( - SELECT state_group FROM events JOIN event_to_state_groups USING(event_id) - WHERE events.room_id=? - ) - """, - (room_id,), - ) - - # ... and the state groups - logger.info("[purge] removing %s from state_groups", room_id) - - txn.execute( - """ - DELETE FROM state_groups WHERE id IN ( - SELECT state_group FROM events JOIN event_to_state_groups USING(event_id) - WHERE events.room_id=? - ) - """, - (room_id,), - ) - - # and then tables which lack an index on room_id but have one on event_id + # First delete tables which lack an index on room_id but have one on event_id for table in ( "event_auth", "event_edges", @@ -1881,6 +1712,153 @@ class EventsStore( logger.info("[purge] done") + def purge_unreferenced_state_groups(self, room_id, state_groups_to_delete): + """Deletes no longer referenced state groups and de-deltas any state + groups that reference them. + """ + + return self.runInteraction( + "purge_unreferenced_state_groups", + self._purge_unreferenced_state_groups, + room_id, + state_groups_to_delete, + ) + + def _purge_unreferenced_state_groups(self, txn, room_id, state_groups_to_delete): + logger.info( + "[purge] found %i state groups to delete", len(state_groups_to_delete) + ) + + rows = self._simple_select_many_txn( + txn, + table="state_group_edges", + column="prev_state_group", + iterable=state_groups_to_delete, + keyvalues={}, + retcols=("state_group",), + ) + + remaining_state_groups = set( + row["state_group"] + for row in rows + if row["state_group"] not in state_groups_to_delete + ) + + logger.info( + "[purge] de-delta-ing %i remaining state groups", + len(remaining_state_groups), + ) + + # Now we turn the state groups that reference to-be-deleted state + # groups to non delta versions. + for sg in remaining_state_groups: + logger.info("[purge] de-delta-ing remaining state group %s", sg) + curr_state = self._get_state_groups_from_groups_txn(txn, [sg]) + curr_state = curr_state[sg] + + self._simple_delete_txn( + txn, table="state_groups_state", keyvalues={"state_group": sg} + ) + + self._simple_delete_txn( + txn, table="state_group_edges", keyvalues={"state_group": sg} + ) + + self._simple_insert_many_txn( + txn, + table="state_groups_state", + values=[ + { + "state_group": sg, + "room_id": room_id, + "type": key[0], + "state_key": key[1], + "event_id": state_id, + } + for key, state_id in iteritems(curr_state) + ], + ) + + logger.info("[purge] removing redundant state groups") + txn.executemany( + "DELETE FROM state_groups_state WHERE state_group = ?", + ((sg,) for sg in state_groups_to_delete), + ) + txn.executemany( + "DELETE FROM state_groups WHERE id = ?", + ((sg,) for sg in state_groups_to_delete), + ) + + @defer.inlineCallbacks + def get_previous_state_groups(self, state_groups): + """Fetch the previous groups of the given state groups. + + Args: + state_groups (Iterable[int]) + + Returns: + Deferred[dict[int, int]]: mapping from state group to previous + state group. + """ + + rows = yield self._simple_select_many_batch( + table="state_group_edges", + column="prev_state_group", + iterable=state_groups, + keyvalues={}, + retcols=("prev_state_group", "state_group"), + desc="get_previous_state_groups", + ) + + return {row["state_group"]: row["prev_state_group"] for row in rows} + + def purge_room_state(self, room_id): + """Deletes all record of a room from state tables + + Args: + room_id (str): + """ + + return self.runInteraction( + "purge_room_state", self._purge_room_state_txn, room_id + ) + + def _purge_room_state_txn(self, txn, room_id): + # first we have to delete the state groups states + logger.info("[purge] removing %s from state_groups_state", room_id) + + txn.execute( + """ + DELETE FROM state_groups_state + INNER JOIN state_groups USING (event_id) + WHEREE state_groups.room_id = ? + """, + (room_id,), + ) + + # ... and the state group edges + logger.info("[purge] removing %s from state_group_edges", room_id) + + txn.execute( + """ + DELETE FROM state_group_edges + INNER JOIN state_groups USING (event_id) + WHEREE state_groups.room_id = ? + ) + """, + (room_id,), + ) + + # ... and the state groups + logger.info("[purge] removing %s from state_groups", room_id) + + txn.execute( + """ + DELETE FROM state_groups WHEREE room_id = ? + """, + (room_id,), + ) + async def is_event_after(self, event_id1, event_id2): """Returns True if event_id1 is after event_id2 in the stream """ diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py index 9b2207075..36be8f0a9 100644 --- a/synapse/storage/data_stores/main/state.py +++ b/synapse/storage/data_stores/main/state.py @@ -989,6 +989,29 @@ class StateGroupWorkerStore( return self.runInteraction("store_state_group", _store_state_group_txn) + @defer.inlineCallbacks + def get_referenced_state_groups(self, state_groups): + """Check if the state groups are referenced by events. + + Args: + state_groups (Iterable[int]) + + Returns: + Deferred[set[int]]: The subset of state groups that are + referenced. + """ + + rows = yield self._simple_select_many_batch( + table="event_to_state_groups", + column="state_group", + iterable=state_groups, + keyvalues={}, + retcols=("DISTINCT state_group",), + desc="get_referenced_state_groups", + ) + + return set(row["state_group"] for row in rows) + class StateBackgroundUpdateStore( StateGroupBackgroundUpdateStore, BackgroundUpdateStore diff --git a/synapse/storage/purge_events.py b/synapse/storage/purge_events.py new file mode 100644 index 000000000..dd45df0c8 --- /dev/null +++ b/synapse/storage/purge_events.py @@ -0,0 +1,117 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import itertools +import logging + +from twisted.internet import defer + +logger = logging.getLogger(__name__) + + +class PurgeEventsStorage(object): + """High level interface for purging rooms and event history. + """ + + def __init__(self, hs, stores): + self.stores = stores + + @defer.inlineCallbacks + def purge_room(self, room_id: str): + """Deletes all record of a room + """ + + yield self.stores.main.purge_room(room_id) + yield self.stores.main.purge_room_state(room_id) + + @defer.inlineCallbacks + def purge_history(self, room_id, token, delete_local_events): + """Deletes room history before a certain point + + Args: + room_id (str): + + token (str): A topological token to delete events before + + delete_local_events (bool): + if True, we will delete local events as well as remote ones + (instead of just marking them as outliers and deleting their + state groups). + """ + state_groups = yield self.stores.main.purge_history( + room_id, token, delete_local_events + ) + + logger.info("[purge] finding state groups that can be deleted") + + sg_to_delete = yield self._find_unreferenced_groups(state_groups) + + yield self.stores.main.purge_unreferenced_state_groups(room_id, sg_to_delete) + + @defer.inlineCallbacks + def _find_unreferenced_groups(self, state_groups): + """Used when purging history to figure out which state groups can be + deleted. + + Args: + state_groups (set[int]): Set of state groups referenced by events + that are going to be deleted. + + Returns: + Deferred[set[int]] The set of state groups that can be deleted. + """ + # Graph of state group -> previous group + graph = {} + + # Set of events that we have found to be referenced by events + referenced_groups = set() + + # Set of state groups we've already seen + state_groups_seen = set(state_groups) + + # Set of state groups to handle next. + next_to_search = set(state_groups) + while next_to_search: + # We bound size of groups we're looking up at once, to stop the + # SQL query getting too big + if len(next_to_search) < 100: + current_search = next_to_search + next_to_search = set() + else: + current_search = set(itertools.islice(next_to_search, 100)) + next_to_search -= current_search + + referenced = yield self.stores.main.get_referenced_state_groups( + current_search + ) + referenced_groups |= referenced + + # We don't continue iterating up the state group graphs for state + # groups that are referenced. + current_search -= referenced + + edges = yield self.stores.main.get_previous_state_groups(current_search) + + prevs = set(edges.values()) + # We don't bother re-handling groups we've already seen + prevs -= state_groups_seen + next_to_search |= prevs + state_groups_seen |= prevs + + graph.update(edges) + + to_delete = state_groups_seen - referenced_groups + + return to_delete diff --git a/tests/storage/test_purge.py b/tests/storage/test_purge.py index f671599cb..b9fafaa1a 100644 --- a/tests/storage/test_purge.py +++ b/tests/storage/test_purge.py @@ -40,23 +40,24 @@ class PurgeTests(HomeserverTestCase): third = self.helper.send(self.room_id, body="test3") last = self.helper.send(self.room_id, body="test4") - storage = self.hs.get_datastore() + store = self.hs.get_datastore() + storage = self.hs.get_storage() # Get the topological token - event = storage.get_topological_token_for_event(last["event_id"]) + event = store.get_topological_token_for_event(last["event_id"]) self.pump() event = self.successResultOf(event) # Purge everything before this topological token - purge = storage.purge_history(self.room_id, event, True) + purge = storage.purge_events.purge_history(self.room_id, event, True) self.pump() self.assertEqual(self.successResultOf(purge), None) # Try and get the events - get_first = storage.get_event(first["event_id"]) - get_second = storage.get_event(second["event_id"]) - get_third = storage.get_event(third["event_id"]) - get_last = storage.get_event(last["event_id"]) + get_first = store.get_event(first["event_id"]) + get_second = store.get_event(second["event_id"]) + get_third = store.get_event(third["event_id"]) + get_last = store.get_event(last["event_id"]) self.pump() # 1-3 should fail and last will succeed, meaning that 1-3 are deleted From ecfba89a784db12b48074ade3a44092267ba9cf7 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 30 Oct 2019 15:14:29 +0000 Subject: [PATCH 003/133] Newsfile --- changelog.d/6295.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6295.misc diff --git a/changelog.d/6295.misc b/changelog.d/6295.misc new file mode 100644 index 000000000..a3e6b8296 --- /dev/null +++ b/changelog.d/6295.misc @@ -0,0 +1 @@ +Split out state storage into separate data store. From 8f5bbdb987c92ee3e9b4170cc8ce296d87b1555d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 31 Oct 2019 15:22:08 +0000 Subject: [PATCH 004/133] Fix purge room API --- synapse/storage/data_stores/main/events.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index 63b09a09e..a904a7157 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -1829,8 +1829,10 @@ class EventsStore( txn.execute( """ DELETE FROM state_groups_state - INNER JOIN state_groups USING (event_id) - WHEREE state_groups.room_id = ? + WHERE state_group IN ( + SELECT state_group FROM state_groups + WHERE room_id = ? + ) """, (room_id,), ) @@ -1841,8 +1843,9 @@ class EventsStore( txn.execute( """ DELETE FROM state_group_edges - INNER JOIN state_groups USING (event_id) - WHEREE state_groups.room_id = ? + WHERE state_group IN ( + SELECT state_group FROM state_groups + WHERE room_id = ? ) """, (room_id,), @@ -1853,7 +1856,7 @@ class EventsStore( txn.execute( """ - DELETE FROM state_groups WHEREE room_id = ? + DELETE FROM state_groups WHERE room_id = ? """, (room_id,), ) From f91f2a1f92ee2eb5758184ef0df66490592587d1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 31 Oct 2019 15:25:21 +0000 Subject: [PATCH 005/133] Docstrings --- synapse/storage/data_stores/main/events.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index a904a7157..108b2e8bd 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -19,6 +19,7 @@ import itertools import logging from collections import Counter as c_counter, OrderedDict, namedtuple from functools import wraps +from typing import Set from six import iteritems, text_type from six.moves import range @@ -1370,8 +1371,8 @@ class EventsStore( state groups). Returns: - Deferred[set[int]]: The set of state groups that reference deleted - events. + Deferred[set[int]]: The set of state groups that are referenced by + deleted events. """ return self.runInteraction( @@ -1711,9 +1712,16 @@ class EventsStore( logger.info("[purge] done") - def purge_unreferenced_state_groups(self, room_id, state_groups_to_delete): + def purge_unreferenced_state_groups( + self, room_id: str, state_groups_to_delete: Set[int] + ) -> defer.Deferred: """Deletes no longer referenced state groups and de-deltas any state groups that reference them. + + Args: + room_id: The room the state groups belong to (must all be in the + same room). + state_groups_to_delete: Set of all state groups to delete. """ return self.runInteraction( From 61be1a29262a9a3553537f257a4187ef355684f5 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 31 Oct 2019 15:39:26 +0000 Subject: [PATCH 006/133] Add state_groups.room_id index --- .../schema/delta/56/state_group_room_idx.sql | 17 +++++++++++++++++ synapse/storage/data_stores/main/state.py | 7 +++++++ 2 files changed, 24 insertions(+) create mode 100644 synapse/storage/data_stores/main/schema/delta/56/state_group_room_idx.sql diff --git a/synapse/storage/data_stores/main/schema/delta/56/state_group_room_idx.sql b/synapse/storage/data_stores/main/schema/delta/56/state_group_room_idx.sql new file mode 100644 index 000000000..7916ef18b --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/56/state_group_room_idx.sql @@ -0,0 +1,17 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('state_groups_room_id_idx', '{}'); diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py index 36be8f0a9..bf6de4ca2 100644 --- a/synapse/storage/data_stores/main/state.py +++ b/synapse/storage/data_stores/main/state.py @@ -1021,6 +1021,7 @@ class StateBackgroundUpdateStore( STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index" CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index" + STATE_GROUPS_ROOM_INDEX_UPDATE_NAME = "state_groups_room_id_idx" def __init__(self, db_conn, hs): super(StateBackgroundUpdateStore, self).__init__(db_conn, hs) @@ -1044,6 +1045,12 @@ class StateBackgroundUpdateStore( table="event_to_state_groups", columns=["state_group"], ) + self.register_background_index_update( + self.STATE_GROUPS_ROOM_INDEX_UPDATE_NAME, + index_name="state_groups_room_id_idx", + table="state_groups", + columns=["room_id"], + ) @defer.inlineCallbacks def _background_deduplicate_state(self, progress, batch_size): From fb1a6914cf953ed237235274a9aab62aafec5b4f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 31 Oct 2019 15:45:48 +0000 Subject: [PATCH 007/133] Update log line to lie a little less --- synapse/storage/data_stores/main/events.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index 108b2e8bd..3a9b6bed4 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -1509,7 +1509,7 @@ class EventsStore( [(room_id, event_id) for event_id, in new_backwards_extrems], ) - logger.info("[purge] finding redundant state groups") + logger.info("[purge] finding state groups referenced by deleted events") # Get all state groups that are referenced by events that are to be # deleted. We then go and check if they are referenced by other events From 669b6cbda3e545f277def545954948090aec8980 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 1 Nov 2019 11:32:20 +0000 Subject: [PATCH 008/133] Fix up comment --- synapse/storage/data_stores/main/events.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index 3a9b6bed4..a71d7346d 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -1512,8 +1512,7 @@ class EventsStore( logger.info("[purge] finding state groups referenced by deleted events") # Get all state groups that are referenced by events that are to be - # deleted. We then go and check if they are referenced by other events - # or state groups, and if not we delete them. + # deleted. txn.execute( """ SELECT DISTINCT state_group FROM events_to_purge From c9a1b80a74863b98b5dce2954b8486a068a0151f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 31 Oct 2019 14:41:28 +0000 Subject: [PATCH 009/133] MSC2326: Add background update to take previous events into account --- .../data_stores/main/events_bg_updates.py | 55 +++++++++++++++++++ .../56/event_labels_background_update.sql | 17 ++++++ 2 files changed, 72 insertions(+) create mode 100644 synapse/storage/data_stores/main/schema/delta/56/event_labels_background_update.sql diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index 51352b996..e702fca14 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -21,6 +21,7 @@ from canonicaljson import json from twisted.internet import defer +from synapse.api.constants import LabelsField from synapse.storage._base import make_in_list_sql_clause from synapse.storage.background_updates import BackgroundUpdateStore @@ -85,6 +86,10 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): "event_fix_redactions_bytes", self._event_fix_redactions_bytes ) + self.register_background_update_handler( + "event_store_labels", self._event_store_labels + ) + @defer.inlineCallbacks def _background_reindex_fields_sender(self, progress, batch_size): target_min_stream_id = progress["target_min_stream_id_inclusive"] @@ -503,3 +508,53 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): yield self._end_background_update("event_fix_redactions_bytes") return 1 + + @defer.inlineCallbacks + def _event_store_labels(self, progress, batch_size): + """Stores labels for events.""" + last_event_id = progress.get("last_event_id", "") + + def _event_store_labels_txn(txn): + txn.execute( + """ + SELECT event_id, json FROM event_json + WHERE event_id > ? + LIMIT ? + """, + (last_event_id, batch_size) + ) + + rows = txn.fetchall() + if not rows: + return True + + for row in rows: + event_id = row["event_id"] + event_json = json.loads(row["json"]) + + self._simple_insert_many_txn( + txn=txn, + table="event_labels", + values=[ + { + "event_id": event_id, + "label": label, + } + for label in event_json["content"].get(LabelsField) + ] + ) + + self._background_update_progress_txn( + txn, "event_store_labels", {"last_event_id": event_id} + ) + + return len(rows) == batch_size + + end = yield self.runInteraction( + desc="event_store_labels", func=_event_store_labels_txn + ) + + if end: + yield self._end_background_update("event_store_labels") + + return batch_size diff --git a/synapse/storage/data_stores/main/schema/delta/56/event_labels_background_update.sql b/synapse/storage/data_stores/main/schema/delta/56/event_labels_background_update.sql new file mode 100644 index 000000000..5f5e0499a --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/56/event_labels_background_update.sql @@ -0,0 +1,17 @@ +/* Copyright 2019 The Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('event_store_labels', '{}'); From a46574281d90d45e4a5f3ecede3261d17726719e Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 31 Oct 2019 14:46:16 +0000 Subject: [PATCH 010/133] Use the right format for rows --- synapse/storage/data_stores/main/events_bg_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index e702fca14..013242b04 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -524,7 +524,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): (last_event_id, batch_size) ) - rows = txn.fetchall() + rows = self.cursor_to_dict(txn) if not rows: return True From 07cb38e965a29624b3cc8a07d4c86f61dbe7b72a Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 31 Oct 2019 14:47:09 +0000 Subject: [PATCH 011/133] Use a sensible default value for labels --- synapse/storage/data_stores/main/events_bg_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index 013242b04..448048e11 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -540,7 +540,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): "event_id": event_id, "label": label, } - for label in event_json["content"].get(LabelsField) + for label in event_json["content"].get(LabelsField, []) ] ) From 911b03ca312160bd8f80f010da4eaf63d7f602da Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 31 Oct 2019 14:49:41 +0000 Subject: [PATCH 012/133] Don't try to process events we already have a label for --- synapse/storage/data_stores/main/events_bg_updates.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index 448048e11..c8168f692 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -518,7 +518,8 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): txn.execute( """ SELECT event_id, json FROM event_json - WHERE event_id > ? + LEFT JOIN event_labels USING (event_id) + WHERE event_id > ? AND label IS NULL LIMIT ? """, (last_event_id, batch_size) From 416c7baee64e0d7145c2447c51326af1c901a176 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 31 Oct 2019 15:00:40 +0000 Subject: [PATCH 013/133] Changelog --- changelog.d/6310.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6310.feature diff --git a/changelog.d/6310.feature b/changelog.d/6310.feature new file mode 100644 index 000000000..b7ff3fad3 --- /dev/null +++ b/changelog.d/6310.feature @@ -0,0 +1 @@ +Implement label-based filtering. From 1c1268245d501e3edefbe3a4eb40c238124bb2e6 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 31 Oct 2019 15:01:29 +0000 Subject: [PATCH 014/133] Lint --- synapse/storage/data_stores/main/events_bg_updates.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index c8168f692..10ebc6d86 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -522,7 +522,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): WHERE event_id > ? AND label IS NULL LIMIT ? """, - (last_event_id, batch_size) + (last_event_id, batch_size), ) rows = self.cursor_to_dict(txn) @@ -537,12 +537,9 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): txn=txn, table="event_labels", values=[ - { - "event_id": event_id, - "label": label, - } + {"event_id": event_id, "label": label} for label in event_json["content"].get(LabelsField, []) - ] + ], ) self._background_update_progress_txn( From 1586f2c7e716fb066aa5551bdce654be8e35b01d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 31 Oct 2019 23:01:26 +0000 Subject: [PATCH 015/133] Fix exit condition --- synapse/storage/data_stores/main/events_bg_updates.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index 10ebc6d86..72d600c51 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -546,7 +546,9 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): txn, "event_store_labels", {"last_event_id": event_id} ) - return len(rows) == batch_size + # We want to return true (to end the background update) only when + # the query returned with less rows than we asked for. + return len(rows) != batch_size end = yield self.runInteraction( desc="event_store_labels", func=_event_store_labels_txn From 49008e674f6c5463168e970349ce84c488407834 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 31 Oct 2019 23:12:15 +0000 Subject: [PATCH 016/133] TODO --- synapse/storage/data_stores/main/events_bg_updates.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index 72d600c51..b7b390485 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -548,6 +548,8 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): # We want to return true (to end the background update) only when # the query returned with less rows than we asked for. + # TODO: this currently doesn't work, i.e. it only runs once whereas + # its opposite does the whole thing, investigate. return len(rows) != batch_size end = yield self.runInteraction( From 824bba2f78f86a9364a87c6ce79a40bff0f73cbf Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 1 Nov 2019 12:03:04 +0000 Subject: [PATCH 017/133] Correctly order results --- synapse/storage/data_stores/main/events_bg_updates.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index b7b390485..5ba1cff46 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -21,7 +21,7 @@ from canonicaljson import json from twisted.internet import defer -from synapse.api.constants import LabelsField +from synapse.api.constants import EventContentFields from synapse.storage._base import make_in_list_sql_clause from synapse.storage.background_updates import BackgroundUpdateStore @@ -520,7 +520,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): SELECT event_id, json FROM event_json LEFT JOIN event_labels USING (event_id) WHERE event_id > ? AND label IS NULL - LIMIT ? + ORDER BY event_id LIMIT ? """, (last_event_id, batch_size), ) @@ -538,7 +538,9 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): table="event_labels", values=[ {"event_id": event_id, "label": label} - for label in event_json["content"].get(LabelsField, []) + for label in event_json["content"].get( + EventContentFields.Labels, [] + ) ], ) @@ -548,8 +550,6 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): # We want to return true (to end the background update) only when # the query returned with less rows than we asked for. - # TODO: this currently doesn't work, i.e. it only runs once whereas - # its opposite does the whole thing, investigate. return len(rows) != batch_size end = yield self.runInteraction( From 3b29a73f9fc18912a6b775a083d9449d426fa790 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 1 Nov 2019 12:07:05 +0000 Subject: [PATCH 018/133] Print out the actual number of affected rows --- synapse/storage/data_stores/main/events_bg_updates.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index 5ba1cff46..a70392747 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -527,7 +527,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): rows = self.cursor_to_dict(txn) if not rows: - return True + return True, 0 for row in rows: event_id = row["event_id"] @@ -550,13 +550,13 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): # We want to return true (to end the background update) only when # the query returned with less rows than we asked for. - return len(rows) != batch_size + return len(rows) != batch_size, len(rows) - end = yield self.runInteraction( + end, num_rows = yield self.runInteraction( desc="event_store_labels", func=_event_store_labels_txn ) if end: yield self._end_background_update("event_store_labels") - return batch_size + return num_rows From 7134ca7daa7ead8104e3c51ba4bc730d99d098e3 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 4 Nov 2019 13:36:57 +0000 Subject: [PATCH 019/133] Change to not require a state_groups.room_id index. This does mean that we won't clean up orphaned state groups (i.e. state groups that were persisted but the associated event wasn't). --- synapse/storage/data_stores/main/events.py | 70 ++++++++++++------- .../schema/delta/56/state_group_room_idx.sql | 17 ----- synapse/storage/data_stores/main/state.py | 7 -- synapse/storage/purge_events.py | 4 +- 4 files changed, 45 insertions(+), 53 deletions(-) delete mode 100644 synapse/storage/data_stores/main/schema/delta/56/state_group_room_idx.sql diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index 68f27078c..3049a21dc 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -1624,7 +1624,10 @@ class EventsStore( """Deletes all record of a room Args: - room_id (str): + room_id (str) + + Returns: + Deferred[List[int]]: The list of state groups to delete. """ return self.runInteraction("purge_room", self._purge_room_txn, room_id) @@ -1714,10 +1717,24 @@ class EventsStore( # index on them. In any case we should be clearing out 'stream' tables # periodically anyway (#5888) + # Now we fetch all the state groups that should be deleted. + txn.execute( + """ + SELECT DISTINCT state_group FROM events + INNER JOIN event_to_state_groups USING(event_id) + WHERE events.room_id = ? + """, + (room_id,), + ) + + state_groups = [row[0] for row in txn] + # TODO: we could probably usefully do a bunch of cache invalidation here logger.info("[purge] done") + return state_groups + def purge_unreferenced_state_groups( self, room_id: str, state_groups_to_delete: Set[int] ) -> defer.Deferred: @@ -1825,54 +1842,53 @@ class EventsStore( return {row["state_group"]: row["prev_state_group"] for row in rows} - def purge_room_state(self, room_id): + def purge_room_state(self, room_id, state_groups_to_delete): """Deletes all record of a room from state tables Args: room_id (str): + state_groups_to_delete (list[int]): State groups to delete """ return self.runInteraction( - "purge_room_state", self._purge_room_state_txn, room_id + "purge_room_state", + self._purge_room_state_txn, + room_id, + state_groups_to_delete, ) - def _purge_room_state_txn(self, txn, room_id): + def _purge_room_state_txn(self, txn, room_id, state_groups_to_delete): # first we have to delete the state groups states logger.info("[purge] removing %s from state_groups_state", room_id) - txn.execute( - """ - DELETE FROM state_groups_state - WHERE state_group IN ( - SELECT state_group FROM state_groups - WHERE room_id = ? - ) - """, - (room_id,), + self._simple_delete_many_txn( + txn, + table="state_groups_state", + column="state_group", + iterable=state_groups_to_delete, + keyvalues={}, ) # ... and the state group edges logger.info("[purge] removing %s from state_group_edges", room_id) - txn.execute( - """ - DELETE FROM state_group_edges - WHERE state_group IN ( - SELECT state_group FROM state_groups - WHERE room_id = ? - ) - """, - (room_id,), + self._simple_delete_many_txn( + txn, + table="state_group_edges", + column="state_group", + iterable=state_groups_to_delete, + keyvalues={}, ) # ... and the state groups logger.info("[purge] removing %s from state_groups", room_id) - txn.execute( - """ - DELETE FROM state_groups WHERE room_id = ? - """, - (room_id,), + self._simple_delete_many_txn( + txn, + table="state_groups", + column="id", + iterable=state_groups_to_delete, + keyvalues={}, ) async def is_event_after(self, event_id1, event_id2): diff --git a/synapse/storage/data_stores/main/schema/delta/56/state_group_room_idx.sql b/synapse/storage/data_stores/main/schema/delta/56/state_group_room_idx.sql deleted file mode 100644 index 7916ef18b..000000000 --- a/synapse/storage/data_stores/main/schema/delta/56/state_group_room_idx.sql +++ /dev/null @@ -1,17 +0,0 @@ -/* Copyright 2019 The Matrix.org Foundation C.I.C. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -INSERT INTO background_updates (update_name, progress_json) VALUES - ('state_groups_room_id_idx', '{}'); diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py index e1d3041c7..5c5b15840 100644 --- a/synapse/storage/data_stores/main/state.py +++ b/synapse/storage/data_stores/main/state.py @@ -1023,7 +1023,6 @@ class StateBackgroundUpdateStore( STATE_GROUP_INDEX_UPDATE_NAME = "state_group_state_type_index" CURRENT_STATE_INDEX_UPDATE_NAME = "current_state_members_idx" EVENT_STATE_GROUP_INDEX_UPDATE_NAME = "event_to_state_groups_sg_index" - STATE_GROUPS_ROOM_INDEX_UPDATE_NAME = "state_groups_room_id_idx" def __init__(self, db_conn, hs): super(StateBackgroundUpdateStore, self).__init__(db_conn, hs) @@ -1047,12 +1046,6 @@ class StateBackgroundUpdateStore( table="event_to_state_groups", columns=["state_group"], ) - self.register_background_index_update( - self.STATE_GROUPS_ROOM_INDEX_UPDATE_NAME, - index_name="state_groups_room_id_idx", - table="state_groups", - columns=["room_id"], - ) @defer.inlineCallbacks def _background_deduplicate_state(self, progress, batch_size): diff --git a/synapse/storage/purge_events.py b/synapse/storage/purge_events.py index dd45df0c8..a36818203 100644 --- a/synapse/storage/purge_events.py +++ b/synapse/storage/purge_events.py @@ -33,8 +33,8 @@ class PurgeEventsStorage(object): """Deletes all record of a room """ - yield self.stores.main.purge_room(room_id) - yield self.stores.main.purge_room_state(room_id) + state_groups_to_delete = yield self.stores.main.purge_room(room_id) + yield self.stores.main.purge_room_state(room_id, state_groups_to_delete) @defer.inlineCallbacks def purge_history(self, room_id, token, delete_local_events): From 09957ce0e4dcfd84c2de4039653059faae03065b Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Mon, 4 Nov 2019 17:09:22 +0000 Subject: [PATCH 020/133] Implement per-room message retention policies --- changelog.d/5815.feature | 1 + docs/sample_config.yaml | 63 ++++ synapse/api/constants.py | 2 + synapse/config/server.py | 172 ++++++++++ synapse/events/validator.py | 100 +++++- synapse/handlers/federation.py | 2 +- synapse/handlers/message.py | 4 +- synapse/handlers/pagination.py | 111 ++++++ synapse/storage/data_stores/main/events.py | 3 + synapse/storage/data_stores/main/room.py | 252 ++++++++++++++ .../main/schema/delta/56/room_retention.sql | 33 ++ synapse/visibility.py | 17 + tests/rest/client/test_retention.py | 320 ++++++++++++++++++ 13 files changed, 1074 insertions(+), 6 deletions(-) create mode 100644 changelog.d/5815.feature create mode 100644 synapse/storage/data_stores/main/schema/delta/56/room_retention.sql create mode 100644 tests/rest/client/test_retention.py diff --git a/changelog.d/5815.feature b/changelog.d/5815.feature new file mode 100644 index 000000000..ca4df4e7f --- /dev/null +++ b/changelog.d/5815.feature @@ -0,0 +1 @@ +Implement per-room message retention policies. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index d2f4aff82..87fba27d1 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -328,6 +328,69 @@ listeners: # #user_ips_max_age: 14d +# Message retention policy at the server level. +# +# Room admins and mods can define a retention period for their rooms using the +# 'm.room.retention' state event, and server admins can cap this period by setting +# the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options. +# +# If this feature is enabled, Synapse will regularly look for and purge events +# which are older than the room's maximum retention period. Synapse will also +# filter events received over federation so that events that should have been +# purged are ignored and not stored again. +# +retention: + # The message retention policies feature is disabled by default. Uncomment the + # following line to enable it. + # + #enabled: true + + # Default retention policy. If set, Synapse will apply it to rooms that lack the + # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't + # matter much because Synapse doesn't take it into account yet. + # + #default_policy: + # min_lifetime: 1d + # max_lifetime: 1y + + # Retention policy limits. If set, a user won't be able to send a + # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime' + # that's not within this range. This is especially useful in closed federations, + # in which server admins can make sure every federating server applies the same + # rules. + # + #allowed_lifetime_min: 1d + #allowed_lifetime_max: 1y + + # Server admins can define the settings of the background jobs purging the + # events which lifetime has expired under the 'purge_jobs' section. + # + # If no configuration is provided, a single job will be set up to delete expired + # events in every room daily. + # + # Each job's configuration defines which range of message lifetimes the job + # takes care of. For example, if 'shortest_max_lifetime' is '2d' and + # 'longest_max_lifetime' is '3d', the job will handle purging expired events in + # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and + # lower than or equal to 3 days. Both the minimum and the maximum value of a + # range are optional, e.g. a job with no 'shortest_max_lifetime' and a + # 'longest_max_lifetime' of '3d' will handle every room with a retention policy + # which 'max_lifetime' is lower than or equal to three days. + # + # The rationale for this per-job configuration is that some rooms might have a + # retention policy with a low 'max_lifetime', where history needs to be purged + # of outdated messages on a very frequent basis (e.g. every 5min), but not want + # that purge to be performed by a job that's iterating over every room it knows, + # which would be quite heavy on the server. + # + #purge_jobs: + # - shortest_max_lifetime: 1d + # longest_max_lifetime: 3d + # interval: 5m: + # - shortest_max_lifetime: 3d + # longest_max_lifetime: 1y + # interval: 24h + ## TLS ## diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 49c4b8505..e3f086f1c 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -94,6 +94,8 @@ class EventTypes(object): ServerACL = "m.room.server_acl" Pinned = "m.room.pinned_events" + Retention = "m.room.retention" + class RejectedReason(object): AUTH_ERROR = "auth_error" diff --git a/synapse/config/server.py b/synapse/config/server.py index d556df308..aa93a416f 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -246,6 +246,115 @@ class ServerConfig(Config): # events with profile information that differ from the target's global profile. self.allow_per_room_profiles = config.get("allow_per_room_profiles", True) + retention_config = config.get("retention") + if retention_config is None: + retention_config = {} + + self.retention_enabled = retention_config.get("enabled", False) + + retention_default_policy = retention_config.get("default_policy") + + if retention_default_policy is not None: + self.retention_default_min_lifetime = retention_default_policy.get( + "min_lifetime" + ) + if self.retention_default_min_lifetime is not None: + self.retention_default_min_lifetime = self.parse_duration( + self.retention_default_min_lifetime + ) + + self.retention_default_max_lifetime = retention_default_policy.get( + "max_lifetime" + ) + if self.retention_default_max_lifetime is not None: + self.retention_default_max_lifetime = self.parse_duration( + self.retention_default_max_lifetime + ) + + if ( + self.retention_default_min_lifetime is not None + and self.retention_default_max_lifetime is not None + and ( + self.retention_default_min_lifetime + > self.retention_default_max_lifetime + ) + ): + raise ConfigError( + "The default retention policy's 'min_lifetime' can not be greater" + " than its 'max_lifetime'" + ) + else: + self.retention_default_min_lifetime = None + self.retention_default_max_lifetime = None + + self.retention_allowed_lifetime_min = retention_config.get("allowed_lifetime_min") + if self.retention_allowed_lifetime_min is not None: + self.retention_allowed_lifetime_min = self.parse_duration( + self.retention_allowed_lifetime_min + ) + + self.retention_allowed_lifetime_max = retention_config.get("allowed_lifetime_max") + if self.retention_allowed_lifetime_max is not None: + self.retention_allowed_lifetime_max = self.parse_duration( + self.retention_allowed_lifetime_max + ) + + if ( + self.retention_allowed_lifetime_min is not None + and self.retention_allowed_lifetime_max is not None + and self.retention_allowed_lifetime_min > self.retention_allowed_lifetime_max + ): + raise ConfigError( + "Invalid retention policy limits: 'allowed_lifetime_min' can not be" + " greater than 'allowed_lifetime_max'" + ) + + self.retention_purge_jobs = [] + for purge_job_config in retention_config.get("purge_jobs", []): + interval_config = purge_job_config.get("interval") + + if interval_config is None: + raise ConfigError( + "A retention policy's purge jobs configuration must have the" + " 'interval' key set." + ) + + interval = self.parse_duration(interval_config) + + shortest_max_lifetime = purge_job_config.get("shortest_max_lifetime") + + if shortest_max_lifetime is not None: + shortest_max_lifetime = self.parse_duration(shortest_max_lifetime) + + longest_max_lifetime = purge_job_config.get("longest_max_lifetime") + + if longest_max_lifetime is not None: + longest_max_lifetime = self.parse_duration(longest_max_lifetime) + + if ( + shortest_max_lifetime is not None + and longest_max_lifetime is not None + and shortest_max_lifetime > longest_max_lifetime + ): + raise ConfigError( + "A retention policy's purge jobs configuration's" + " 'shortest_max_lifetime' value can not be greater than its" + " 'longest_max_lifetime' value." + ) + + self.retention_purge_jobs.append({ + "interval": interval, + "shortest_max_lifetime": shortest_max_lifetime, + "longest_max_lifetime": longest_max_lifetime, + }) + + if not self.retention_purge_jobs: + self.retention_purge_jobs = [{ + "interval": self.parse_duration("1d"), + "shortest_max_lifetime": None, + "longest_max_lifetime": None, + }] + self.listeners = [] # type: List[dict] for listener in config.get("listeners", []): if not isinstance(listener.get("port", None), int): @@ -761,6 +870,69 @@ class ServerConfig(Config): # Defaults to `28d`. Set to `null` to disable clearing out of old rows. # #user_ips_max_age: 14d + + # Message retention policy at the server level. + # + # Room admins and mods can define a retention period for their rooms using the + # 'm.room.retention' state event, and server admins can cap this period by setting + # the 'allowed_lifetime_min' and 'allowed_lifetime_max' config options. + # + # If this feature is enabled, Synapse will regularly look for and purge events + # which are older than the room's maximum retention period. Synapse will also + # filter events received over federation so that events that should have been + # purged are ignored and not stored again. + # + retention: + # The message retention policies feature is disabled by default. Uncomment the + # following line to enable it. + # + #enabled: true + + # Default retention policy. If set, Synapse will apply it to rooms that lack the + # 'm.room.retention' state event. Currently, the value of 'min_lifetime' doesn't + # matter much because Synapse doesn't take it into account yet. + # + #default_policy: + # min_lifetime: 1d + # max_lifetime: 1y + + # Retention policy limits. If set, a user won't be able to send a + # 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime' + # that's not within this range. This is especially useful in closed federations, + # in which server admins can make sure every federating server applies the same + # rules. + # + #allowed_lifetime_min: 1d + #allowed_lifetime_max: 1y + + # Server admins can define the settings of the background jobs purging the + # events which lifetime has expired under the 'purge_jobs' section. + # + # If no configuration is provided, a single job will be set up to delete expired + # events in every room daily. + # + # Each job's configuration defines which range of message lifetimes the job + # takes care of. For example, if 'shortest_max_lifetime' is '2d' and + # 'longest_max_lifetime' is '3d', the job will handle purging expired events in + # rooms whose state defines a 'max_lifetime' that's both higher than 2 days, and + # lower than or equal to 3 days. Both the minimum and the maximum value of a + # range are optional, e.g. a job with no 'shortest_max_lifetime' and a + # 'longest_max_lifetime' of '3d' will handle every room with a retention policy + # which 'max_lifetime' is lower than or equal to three days. + # + # The rationale for this per-job configuration is that some rooms might have a + # retention policy with a low 'max_lifetime', where history needs to be purged + # of outdated messages on a very frequent basis (e.g. every 5min), but not want + # that purge to be performed by a job that's iterating over every room it knows, + # which would be quite heavy on the server. + # + #purge_jobs: + # - shortest_max_lifetime: 1d + # longest_max_lifetime: 3d + # interval: 5m: + # - shortest_max_lifetime: 3d + # longest_max_lifetime: 1y + # interval: 24h """ % locals() ) diff --git a/synapse/events/validator.py b/synapse/events/validator.py index 272426e10..9b90c9ce0 100644 --- a/synapse/events/validator.py +++ b/synapse/events/validator.py @@ -13,7 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from six import string_types +from six import integer_types, string_types from synapse.api.constants import MAX_ALIAS_LENGTH, EventTypes, Membership from synapse.api.errors import Codes, SynapseError @@ -22,11 +22,12 @@ from synapse.types import EventID, RoomID, UserID class EventValidator(object): - def validate_new(self, event): + def validate_new(self, event, config): """Validates the event has roughly the right format Args: - event (FrozenEvent) + event (FrozenEvent): The event to validate. + config (Config): The homeserver's configuration. """ self.validate_builder(event) @@ -67,6 +68,99 @@ class EventValidator(object): Codes.INVALID_PARAM, ) + if event.type == EventTypes.Retention: + self._validate_retention(event, config) + + def _validate_retention(self, event, config): + """Checks that an event that defines the retention policy for a room respects the + boundaries imposed by the server's administrator. + + Args: + event (FrozenEvent): The event to validate. + config (Config): The homeserver's configuration. + """ + min_lifetime = event.content.get("min_lifetime") + max_lifetime = event.content.get("max_lifetime") + + if min_lifetime is not None: + if not isinstance(min_lifetime, integer_types): + raise SynapseError( + code=400, + msg="'min_lifetime' must be an integer", + errcode=Codes.BAD_JSON, + ) + + if ( + config.retention_allowed_lifetime_min is not None + and min_lifetime < config.retention_allowed_lifetime_min + ): + raise SynapseError( + code=400, + msg=( + "'min_lifetime' can't be lower than the minimum allowed" + " value enforced by the server's administrator" + ), + errcode=Codes.BAD_JSON, + ) + + if ( + config.retention_allowed_lifetime_max is not None + and min_lifetime > config.retention_allowed_lifetime_max + ): + raise SynapseError( + code=400, + msg=( + "'min_lifetime' can't be greater than the maximum allowed" + " value enforced by the server's administrator" + ), + errcode=Codes.BAD_JSON, + ) + + if max_lifetime is not None: + if not isinstance(max_lifetime, integer_types): + raise SynapseError( + code=400, + msg="'max_lifetime' must be an integer", + errcode=Codes.BAD_JSON, + ) + + if ( + config.retention_allowed_lifetime_min is not None + and max_lifetime < config.retention_allowed_lifetime_min + ): + raise SynapseError( + code=400, + msg=( + "'max_lifetime' can't be lower than the minimum allowed value" + " enforced by the server's administrator" + ), + errcode=Codes.BAD_JSON, + ) + + if ( + config.retention_allowed_lifetime_max is not None + and max_lifetime > config.retention_allowed_lifetime_max + ): + raise SynapseError( + code=400, + msg=( + "'max_lifetime' can't be greater than the maximum allowed" + " value enforced by the server's administrator" + ), + errcode=Codes.BAD_JSON, + ) + + if ( + min_lifetime is not None + and max_lifetime is not None + and min_lifetime > max_lifetime + ): + raise SynapseError( + code=400, + msg="'min_lifetime' can't be greater than 'max_lifetime", + errcode=Codes.BAD_JSON, + ) + def validate_builder(self, event): """Validates that the builder/event has roughly the right format. Only checks values that we expect a proto event to have, rather than all the diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 8cafcfdab..3994137d1 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2454,7 +2454,7 @@ class FederationHandler(BaseHandler): room_version, event_dict, event, context ) - EventValidator().validate_new(event) + EventValidator().validate_new(event, self.config) # We need to tell the transaction queue to send this out, even # though the sender isn't a local user. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index d682dc2b7..155ed6e06 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -417,7 +417,7 @@ class EventCreationHandler(object): 403, "You must be in the room to create an alias for it" ) - self.validator.validate_new(event) + self.validator.validate_new(event, self.config) return (event, context) @@ -634,7 +634,7 @@ class EventCreationHandler(object): if requester: context.app_service = requester.app_service - self.validator.validate_new(event) + self.validator.validate_new(event, self.config) # If this event is an annotation then we check that that the sender # can't annotate the same way twice (e.g. stops users from liking an diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 97f15a1c3..e1800177f 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -15,12 +15,15 @@ # limitations under the License. import logging +from six import iteritems + from twisted.internet import defer from twisted.python.failure import Failure from synapse.api.constants import EventTypes, Membership from synapse.api.errors import SynapseError from synapse.logging.context import run_in_background +from synapse.metrics.background_process_metrics import run_as_background_process from synapse.storage.state import StateFilter from synapse.types import RoomStreamToken from synapse.util.async_helpers import ReadWriteLock @@ -80,6 +83,114 @@ class PaginationHandler(object): self._purges_by_id = {} self._event_serializer = hs.get_event_client_serializer() + self._retention_default_max_lifetime = hs.config.retention_default_max_lifetime + + if hs.config.retention_enabled: + # Run the purge jobs described in the configuration file. + for job in hs.config.retention_purge_jobs: + self.clock.looping_call( + run_as_background_process, + job["interval"], + "purge_history_for_rooms_in_range", + self.purge_history_for_rooms_in_range, + job["shortest_max_lifetime"], + job["longest_max_lifetime"], + ) + + @defer.inlineCallbacks + def purge_history_for_rooms_in_range(self, min_ms, max_ms): + """Purge outdated events from rooms within the given retention range. + + If a default retention policy is defined in the server's configuration and its + 'max_lifetime' is within this range, also targets rooms which don't have a + retention policy. + + Args: + min_ms (int|None): Duration in milliseconds that define the lower limit of + the range to handle (exclusive). If None, it means that the range has no + lower limit. + max_ms (int|None): Duration in milliseconds that define the upper limit of + the range to handle (inclusive). If None, it means that the range has no + upper limit. + """ + # We want the storage layer to to include rooms with no retention policy in its + # return value only if a default retention policy is defined in the server's + # configuration and that policy's 'max_lifetime' is either lower (or equal) than + # max_ms or higher than min_ms (or both). + if self._retention_default_max_lifetime is not None: + include_null = True + + if min_ms is not None and min_ms >= self._retention_default_max_lifetime: + # The default max_lifetime is lower than (or equal to) min_ms. + include_null = False + + if max_ms is not None and max_ms < self._retention_default_max_lifetime: + # The default max_lifetime is higher than max_ms. + include_null = False + else: + include_null = False + + rooms = yield self.store.get_rooms_for_retention_period_in_range( + min_ms, max_ms, include_null + ) + + for room_id, retention_policy in iteritems(rooms): + if room_id in self._purges_in_progress_by_room: + logger.warning( + "[purge] not purging room %s as there's an ongoing purge running" + " for this room", + room_id, + ) + continue + + max_lifetime = retention_policy["max_lifetime"] + + if max_lifetime is None: + # If max_lifetime is None, it means that include_null equals True, + # therefore we can safely assume that there is a default policy defined + # in the server's configuration. + max_lifetime = self._retention_default_max_lifetime + + # Figure out what token we should start purging at. + ts = self.clock.time_msec() - max_lifetime + + stream_ordering = ( + yield self.store.find_first_stream_ordering_after_ts(ts) + ) + + r = ( + yield self.store.get_room_event_after_stream_ordering( + room_id, stream_ordering, + ) + ) + if not r: + logger.warning( + "[purge] purging events not possible: No event found " + "(ts %i => stream_ordering %i)", + ts, stream_ordering, + ) + continue + + (stream, topo, _event_id) = r + token = "t%d-%d" % (topo, stream) + + purge_id = random_string(16) + + self._purges_by_id[purge_id] = PurgeStatus() + + logger.info( + "Starting purging events in room %s (purge_id %s)" % (room_id, purge_id) + ) + + # We want to purge everything, including local events, and to run the purge in + # the background so that it's not blocking any other operation apart from + # other purges in the same room. + run_as_background_process( + "_purge_history", + self._purge_history, + purge_id, room_id, token, True, + ) + def start_purge_history(self, room_id, token, delete_local_events=False): """Start off a history purge on a room. diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index 301f8ea12..b332a42d8 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -929,6 +929,9 @@ class EventsStore( elif event.type == EventTypes.Redaction: # Insert into the redactions table. self._store_redaction(txn, event) + elif event.type == EventTypes.Retention: + # Update the room_retention table. + self._store_retention_policy_for_room_txn(txn, event) self._handle_event_relations(txn, event) diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py index 67bb1b6f6..54a7d24c7 100644 --- a/synapse/storage/data_stores/main/room.py +++ b/synapse/storage/data_stores/main/room.py @@ -19,10 +19,13 @@ import logging import re from typing import Optional, Tuple +from six import integer_types + from canonicaljson import json from twisted.internet import defer +from synapse.api.constants import EventTypes from synapse.api.errors import StoreError from synapse.storage._base import SQLBaseStore from synapse.storage.data_stores.main.search import SearchStore @@ -302,6 +305,85 @@ class RoomWorkerStore(SQLBaseStore): class RoomStore(RoomWorkerStore, SearchStore): + def __init__(self, db_conn, hs): + super(RoomStore, self).__init__(db_conn, hs) + + self.config = hs.config + + self.register_background_update_handler( + "insert_room_retention", self._background_insert_retention, + ) + + @defer.inlineCallbacks + def _background_insert_retention(self, progress, batch_size): + """Retrieves a list of all rooms within a range and inserts an entry for each of + them into the room_retention table. + NULLs the property's columns if missing from the retention event in the room's + state (or NULLs all of them if there's no retention event in the room's state), + so that we fall back to the server's retention policy. + """ + + last_room = progress.get("room_id", "") + + def _background_insert_retention_txn(txn): + txn.execute( + """ + SELECT state.room_id, state.event_id, events.json + FROM current_state_events as state + LEFT JOIN event_json AS events ON (state.event_id = events.event_id) + WHERE state.room_id > ? AND state.type = '%s' + ORDER BY state.room_id ASC + LIMIT ?; + """ % EventTypes.Retention, + (last_room, batch_size) + ) + + rows = self.cursor_to_dict(txn) + + if not rows: + return True + + for row in rows: + if not row["json"]: + retention_policy = {} + else: + ev = json.loads(row["json"]) + retention_policy = json.dumps(ev["content"]) + + self._simple_insert_txn( + txn=txn, + table="room_retention", + values={ + "room_id": row["room_id"], + "event_id": row["event_id"], + "min_lifetime": retention_policy.get("min_lifetime"), + "max_lifetime": retention_policy.get("max_lifetime"), + } + ) + + logger.info("Inserted %d rows into room_retention", len(rows)) + + self._background_update_progress_txn( + txn, "insert_room_retention", { + "room_id": rows[-1]["room_id"], + } + ) + + if batch_size > len(rows): + return True + else: + return False + + end = yield self.runInteraction( + "insert_room_retention", + _background_insert_retention_txn, + ) + + if end: + yield self._end_background_update("insert_room_retention") + + defer.returnValue(batch_size) + @defer.inlineCallbacks def store_room(self, room_id, room_creator_user_id, is_public): """Stores a room. @@ -502,6 +584,37 @@ class RoomStore(RoomWorkerStore, SearchStore): txn, event, "content.body", event.content["body"] ) + def _store_retention_policy_for_room_txn(self, txn, event): + if ( + hasattr(event, "content") + and ("min_lifetime" in event.content or "max_lifetime" in event.content) + ): + if ( + ("min_lifetime" in event.content and not isinstance( + event.content.get("min_lifetime"), integer_types + )) + or ("max_lifetime" in event.content and not isinstance( + event.content.get("max_lifetime"), integer_types + )) + ): + # Ignore the event if one of the value isn't an integer. + return + + self._simple_insert_txn( + txn=txn, + table="room_retention", + values={ + "room_id": event.room_id, + "event_id": event.event_id, + "min_lifetime": event.content.get("min_lifetime"), + "max_lifetime": event.content.get("max_lifetime"), + }, + ) + + self._invalidate_cache_and_stream( + txn, self.get_retention_policy_for_room, (event.room_id,) + ) + def add_event_report( self, room_id, event_id, user_id, reason, content, received_ts ): @@ -683,3 +796,142 @@ class RoomStore(RoomWorkerStore, SearchStore): remote_media_mxcs.append((hostname, media_id)) return local_media_mxcs, remote_media_mxcs + + @defer.inlineCallbacks + def get_rooms_for_retention_period_in_range(self, min_ms, max_ms, include_null=False): + """Retrieves all of the rooms within the given retention range. + + Optionally includes the rooms which don't have a retention policy. + + Args: + min_ms (int|None): Duration in milliseconds that define the lower limit of + the range to handle (exclusive). If None, doesn't set a lower limit. + max_ms (int|None): Duration in milliseconds that define the upper limit of + the range to handle (inclusive). If None, doesn't set an upper limit. + include_null (bool): Whether to include rooms which retention policy is NULL + in the returned set. + + Returns: + dict[str, dict]: The rooms within this range, along with their retention + policy. The key is "room_id", and maps to a dict describing the retention + policy associated with this room ID. The keys for this nested dict are + "min_lifetime" (int|None), and "max_lifetime" (int|None). + """ + + def get_rooms_for_retention_period_in_range_txn(txn): + range_conditions = [] + args = [] + + if min_ms is not None: + range_conditions.append("max_lifetime > ?") + args.append(min_ms) + + if max_ms is not None: + range_conditions.append("max_lifetime <= ?") + args.append(max_ms) + + # Do a first query which will retrieve the rooms that have a retention policy + # in their current state. + sql = """ + SELECT room_id, min_lifetime, max_lifetime FROM room_retention + INNER JOIN current_state_events USING (event_id, room_id) + """ + + if len(range_conditions): + sql += " WHERE (" + " AND ".join(range_conditions) + ")" + + if include_null: + sql += " OR max_lifetime IS NULL" + + txn.execute(sql, args) + + rows = self.cursor_to_dict(txn) + rooms_dict = {} + + for row in rows: + rooms_dict[row["room_id"]] = { + "min_lifetime": row["min_lifetime"], + "max_lifetime": row["max_lifetime"], + } + + if include_null: + # If required, do a second query that retrieves all of the rooms we know + # of so we can handle rooms with no retention policy. + sql = "SELECT DISTINCT room_id FROM current_state_events" + + txn.execute(sql) + + rows = self.cursor_to_dict(txn) + + # If a room isn't already in the dict (i.e. it doesn't have a retention + # policy in its state), add it with a null policy. + for row in rows: + if row["room_id"] not in rooms_dict: + rooms_dict[row["room_id"]] = { + "min_lifetime": None, + "max_lifetime": None, + } + + return rooms_dict + + rooms = yield self.runInteraction( + "get_rooms_for_retention_period_in_range", + get_rooms_for_retention_period_in_range_txn, + ) + + defer.returnValue(rooms) + + @cachedInlineCallbacks() + def get_retention_policy_for_room(self, room_id): + """Get the retention policy for a given room. + + If no retention policy has been found for this room, returns a policy defined + by the configured default policy (which has None as both the 'min_lifetime' and + the 'max_lifetime' if no default policy has been defined in the server's + configuration). + + Args: + room_id (str): The ID of the room to get the retention policy of. + + Returns: + dict[int, int]: "min_lifetime" and "max_lifetime" for this room. + """ + + def get_retention_policy_for_room_txn(txn): + txn.execute( + """ + SELECT min_lifetime, max_lifetime FROM room_retention + INNER JOIN current_state_events USING (event_id, room_id) + WHERE room_id = ?; + """, + (room_id,) + ) + + return self.cursor_to_dict(txn) + + ret = yield self.runInteraction( + "get_retention_policy_for_room", + get_retention_policy_for_room_txn, + ) + + # If we don't know this room ID, ret will be None, in this case return the default + # policy. + if not ret: + defer.returnValue({ + "min_lifetime": self.config.retention_default_min_lifetime, + "max_lifetime": self.config.retention_default_max_lifetime, + }) + + row = ret[0] + + # If one of the room's policy's attributes isn't defined, use the matching + # attribute from the default policy. + # The default values will be None if no default policy has been defined, or if one + # of the attributes is missing from the default policy. + if row["min_lifetime"] is None: + row["min_lifetime"] = self.config.retention_default_min_lifetime + + if row["max_lifetime"] is None: + row["max_lifetime"] = self.config.retention_default_max_lifetime + + defer.returnValue(row) diff --git a/synapse/storage/data_stores/main/schema/delta/56/room_retention.sql b/synapse/storage/data_stores/main/schema/delta/56/room_retention.sql new file mode 100644 index 000000000..ee6cdf7a1 --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/56/room_retention.sql @@ -0,0 +1,33 @@ +/* Copyright 2019 New Vector Ltd + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- Tracks the retention policy of a room. +-- A NULL max_lifetime or min_lifetime means that the matching property is not defined in +-- the room's retention policy state event. +-- If a room doesn't have a retention policy state event in its state, both max_lifetime +-- and min_lifetime are NULL. +CREATE TABLE IF NOT EXISTS room_retention( + room_id TEXT, + event_id TEXT, + min_lifetime BIGINT, + max_lifetime BIGINT, + + PRIMARY KEY(room_id, event_id) +); + +CREATE INDEX room_retention_max_lifetime_idx on room_retention(max_lifetime); + +INSERT INTO background_updates (update_name, progress_json) VALUES + ('insert_room_retention', '{}'); diff --git a/synapse/visibility.py b/synapse/visibility.py index 8c843febd..4498c156b 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -86,6 +86,14 @@ def filter_events_for_client( erased_senders = yield storage.main.are_users_erased((e.sender for e in events)) + room_ids = set(e.room_id for e in events) + retention_policies = {} + + for room_id in room_ids: + retention_policies[room_id] = yield storage.main.get_retention_policy_for_room( + room_id + ) + def allowed(event): """ Args: @@ -103,6 +111,15 @@ def filter_events_for_client( if not event.is_state() and event.sender in ignore_list: return None + retention_policy = retention_policies[event.room_id] + max_lifetime = retention_policy.get("max_lifetime") + + if max_lifetime is not None: + oldest_allowed_ts = storage.main.clock.time_msec() - max_lifetime + + if event.origin_server_ts < oldest_allowed_ts: + return None + if event.event_id in always_include_ids: return event diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py new file mode 100644 index 000000000..41ea9db68 --- /dev/null +++ b/tests/rest/client/test_retention.py @@ -0,0 +1,320 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from mock import Mock + +from synapse.api.constants import EventTypes +from synapse.rest import admin +from synapse.rest.client.v1 import login, room +from synapse.visibility import filter_events_for_client + +from tests import unittest + +one_hour_ms = 3600000 +one_day_ms = one_hour_ms * 24 + + +class RetentionTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + config["default_room_version"] = "1" + config["retention"] = { + "enabled": True, + "default_policy": { + "min_lifetime": one_day_ms, + "max_lifetime": one_day_ms * 3, + }, + "allowed_lifetime_min": one_day_ms, + "allowed_lifetime_max": one_day_ms * 3, + } + + self.hs = self.setup_test_homeserver(config=config) + return self.hs + + def prepare(self, reactor, clock, homeserver): + self.user_id = self.register_user("user", "password") + self.token = self.login("user", "password") + + def test_retention_state_event(self): + """Tests that the server configuration can limit the values a user can set to the + room's retention policy. + """ + room_id = self.helper.create_room_as(self.user_id, tok=self.token) + + self.helper.send_state( + room_id=room_id, + event_type=EventTypes.Retention, + body={ + "max_lifetime": one_day_ms * 4, + }, + tok=self.token, + expect_code=400, + ) + + self.helper.send_state( + room_id=room_id, + event_type=EventTypes.Retention, + body={ + "max_lifetime": one_hour_ms, + }, + tok=self.token, + expect_code=400, + ) + + def test_retention_event_purged_with_state_event(self): + """Tests that expired events are correctly purged when the room's retention policy + is defined by a state event. + """ + room_id = self.helper.create_room_as(self.user_id, tok=self.token) + + # Set the room's retention period to 2 days. + lifetime = one_day_ms * 2 + self.helper.send_state( + room_id=room_id, + event_type=EventTypes.Retention, + body={ + "max_lifetime": lifetime, + }, + tok=self.token, + ) + + self._test_retention_event_purged(room_id, one_day_ms * 1.5) + + def test_retention_event_purged_without_state_event(self): + """Tests that expired events are correctly purged when the room's retention policy + is defined by the server's configuration's default retention policy. + """ + room_id = self.helper.create_room_as(self.user_id, tok=self.token) + + self._test_retention_event_purged(room_id, one_day_ms * 2) + + def test_visibility(self): + """Tests that synapse.visibility.filter_events_for_client correctly filters out + outdated events + """ + store = self.hs.get_datastore() + storage = self.hs.get_storage() + room_id = self.helper.create_room_as(self.user_id, tok=self.token) + events = [] + + # Send a first event, which should be filtered out at the end of the test. + resp = self.helper.send( + room_id=room_id, + body="1", + tok=self.token, + ) + + # Get the event from the store so that we end up with a FrozenEvent that we can + # give to filter_events_for_client. We need to do this now because the event won't + # be in the database anymore after it has expired. + events.append(self.get_success( + store.get_event( + resp.get("event_id") + ) + )) + + # Advance the time by 2 days. We're using the default retention policy, therefore + # after this the first event will still be valid. + self.reactor.advance(one_day_ms * 2 / 1000) + + # Send another event, which shouldn't get filtered out. + resp = self.helper.send( + room_id=room_id, + body="2", + tok=self.token, + ) + + valid_event_id = resp.get("event_id") + + events.append(self.get_success( + store.get_event( + valid_event_id + ) + )) + + # Advance the time by anothe 2 days. After this, the first event should be + # outdated but not the second one. + self.reactor.advance(one_day_ms * 2 / 1000) + + # Run filter_events_for_client with our list of FrozenEvents. + filtered_events = self.get_success(filter_events_for_client( + storage, self.user_id, events + )) + + # We should only get one event back. + self.assertEqual(len(filtered_events), 1, filtered_events) + # That event should be the second, not outdated event. + self.assertEqual(filtered_events[0].event_id, valid_event_id, filtered_events) + + def _test_retention_event_purged(self, room_id, increment): + # Send a first event to the room. This is the event we'll want to be purged at the + # end of the test. + resp = self.helper.send( + room_id=room_id, + body="1", + tok=self.token, + ) + + expired_event_id = resp.get("event_id") + + # Check that we can retrieve the event. + expired_event = self.get_event(room_id, expired_event_id) + self.assertEqual(expired_event.get("content", {}).get("body"), "1", expired_event) + + # Advance the time. + self.reactor.advance(increment / 1000) + + # Send another event. We need this because the purge job won't purge the most + # recent event in the room. + resp = self.helper.send( + room_id=room_id, + body="2", + tok=self.token, + ) + + valid_event_id = resp.get("event_id") + + # Advance the time again. Now our first event should have expired but our second + # one should still be kept. + self.reactor.advance(increment / 1000) + + # Check that the event has been purged from the database. + self.get_event(room_id, expired_event_id, expected_code=404) + + # Check that the event that hasn't been purged can still be retrieved. + valid_event = self.get_event(room_id, valid_event_id) + self.assertEqual(valid_event.get("content", {}).get("body"), "2", valid_event) + + def get_event(self, room_id, event_id, expected_code=200): + url = "/_matrix/client/r0/rooms/%s/event/%s" % (room_id, event_id) + + request, channel = self.make_request("GET", url, access_token=self.token) + self.render(request) + + self.assertEqual(channel.code, expected_code, channel.result) + + return channel.json_body + + +class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase): + servlets = [ + admin.register_servlets, + login.register_servlets, + room.register_servlets, + ] + + def make_homeserver(self, reactor, clock): + config = self.default_config() + config["default_room_version"] = "1" + config["retention"] = { + "enabled": True, + } + + mock_federation_client = Mock(spec=["backfill"]) + + self.hs = self.setup_test_homeserver( + config=config, + federation_client=mock_federation_client, + ) + return self.hs + + def prepare(self, reactor, clock, homeserver): + self.user_id = self.register_user("user", "password") + self.token = self.login("user", "password") + + def test_no_default_policy(self): + """Tests that an event doesn't get expired if there is neither a default retention + policy nor a policy specific to the room. + """ + room_id = self.helper.create_room_as(self.user_id, tok=self.token) + + self._test_retention(room_id) + + def test_state_policy(self): + """Tests that an event gets correctly expired if there is no default retention + policy but there's a policy specific to the room. + """ + room_id = self.helper.create_room_as(self.user_id, tok=self.token) + + # Set the maximum lifetime to 35 days so that the first event gets expired but not + # the second one. + self.helper.send_state( + room_id=room_id, + event_type=EventTypes.Retention, + body={ + "max_lifetime": one_day_ms * 35, + }, + tok=self.token, + ) + + self._test_retention(room_id, expected_code_for_first_event=404) + + def _test_retention(self, room_id, expected_code_for_first_event=200): + # Send a first event to the room. This is the event we'll want to be purged at the + # end of the test. + resp = self.helper.send( + room_id=room_id, + body="1", + tok=self.token, + ) + + first_event_id = resp.get("event_id") + + # Check that we can retrieve the event. + expired_event = self.get_event(room_id, first_event_id) + self.assertEqual(expired_event.get("content", {}).get("body"), "1", expired_event) + + # Advance the time by a month. + self.reactor.advance(one_day_ms * 30 / 1000) + + # Send another event. We need this because the purge job won't purge the most + # recent event in the room. + resp = self.helper.send( + room_id=room_id, + body="2", + tok=self.token, + ) + + second_event_id = resp.get("event_id") + + # Advance the time by another month. + self.reactor.advance(one_day_ms * 30 / 1000) + + # Check if the event has been purged from the database. + first_event = self.get_event( + room_id, first_event_id, expected_code=expected_code_for_first_event + ) + + if expected_code_for_first_event == 200: + self.assertEqual(first_event.get("content", {}).get("body"), "1", first_event) + + # Check that the event that hasn't been purged can still be retrieved. + second_event = self.get_event(room_id, second_event_id) + self.assertEqual(second_event.get("content", {}).get("body"), "2", second_event) + + def get_event(self, room_id, event_id, expected_code=200): + url = "/_matrix/client/r0/rooms/%s/event/%s" % (room_id, event_id) + + request, channel = self.make_request("GET", url, access_token=self.token) + self.render(request) + + self.assertEqual(channel.code, expected_code, channel.result) + + return channel.json_body From 0287d033eec86fb7f6bb84f929e756c99caf2113 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 4 Nov 2019 18:08:50 +0000 Subject: [PATCH 021/133] Transfer upgraded rooms on groups --- changelog.d/6235.bugfix | 1 + synapse/handlers/room_member.py | 9 +++++++++ synapse/storage/data_stores/main/group_server.py | 15 +++++++++++++++ 3 files changed, 25 insertions(+) create mode 100644 changelog.d/6235.bugfix diff --git a/changelog.d/6235.bugfix b/changelog.d/6235.bugfix new file mode 100644 index 000000000..12718ba93 --- /dev/null +++ b/changelog.d/6235.bugfix @@ -0,0 +1 @@ +Remove a room from a server's public rooms list on room upgrade. \ No newline at end of file diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 06d09c294..01c65ee22 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -514,6 +514,15 @@ class RoomMemberHandler(object): if old_room and old_room["is_public"]: yield self.store.set_room_is_public(old_room_id, False) yield self.store.set_room_is_public(room_id, True) + + # Check if any groups we own contain the predecessor room + local_group_ids = yield self.store.get_local_groups_for_room(old_room_id) + for group_id in local_group_ids: + # Add new the new room to those groups + yield self.store.add_room_to_group(group_id, room_id, old_room["is_public"]) + + # Remove the old room from those groups + yield self.store.remove_room_from_group(group_id, old_room_id) @defer.inlineCallbacks def copy_user_state_on_room_upgrade(self, old_room_id, new_room_id, user_ids): diff --git a/synapse/storage/data_stores/main/group_server.py b/synapse/storage/data_stores/main/group_server.py index b3a2771f1..13ad71a49 100644 --- a/synapse/storage/data_stores/main/group_server.py +++ b/synapse/storage/data_stores/main/group_server.py @@ -552,6 +552,21 @@ class GroupServerStore(SQLBaseStore): keyvalues={"group_id": group_id, "role_id": role_id, "user_id": user_id}, desc="remove_user_from_summary", ) + + def get_local_groups_for_room(self, room_id): + """Get all of the local group that contain a given room + Args: + room_id (str): The ID of a room + Returns: + Deferred[list[str]]: A twisted.Deferred containing a list of group ids + containing this room + """ + return self._simple_select_onecol( + table="group_rooms", + keyvalues={"room_id": room_id}, + retcol="group_id", + desc="get_local_groups_for_room", + ) def get_users_for_summary_by_role(self, group_id, include_private=False): """Get the users and roles that should be included in a summary request From c2203bea57fd34de9be994f5e117da2c24338708 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Mon, 4 Nov 2019 18:17:11 +0000 Subject: [PATCH 022/133] Re-add docstring, with caveats detailed --- synapse/handlers/room_member.py | 2 +- synapse/storage/data_stores/main/group_server.py | 2 +- synapse/storage/data_stores/main/state.py | 6 +++++- 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 01c65ee22..6cfee4b36 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -514,7 +514,7 @@ class RoomMemberHandler(object): if old_room and old_room["is_public"]: yield self.store.set_room_is_public(old_room_id, False) yield self.store.set_room_is_public(room_id, True) - + # Check if any groups we own contain the predecessor room local_group_ids = yield self.store.get_local_groups_for_room(old_room_id) for group_id in local_group_ids: diff --git a/synapse/storage/data_stores/main/group_server.py b/synapse/storage/data_stores/main/group_server.py index 13ad71a49..5ded539af 100644 --- a/synapse/storage/data_stores/main/group_server.py +++ b/synapse/storage/data_stores/main/group_server.py @@ -552,7 +552,7 @@ class GroupServerStore(SQLBaseStore): keyvalues={"group_id": group_id, "role_id": role_id, "user_id": user_id}, desc="remove_user_from_summary", ) - + def get_local_groups_for_room(self, room_id): """Get all of the local group that contain a given room Args: diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py index 313284803..a41cac7b3 100644 --- a/synapse/storage/data_stores/main/state.py +++ b/synapse/storage/data_stores/main/state.py @@ -285,7 +285,11 @@ class StateGroupWorkerStore( room_id (str) Returns: - Deferred[unicode|None]: predecessor room id + Deferred[dict|None]: A dictionary containing the structure of the predecessor + field from the room's create event. The structure is subject to other servers, + but it is expected to be: + * room_id (str): The room ID of the predecessor room + * event_id (str): The ID of the tombstone event in the predecessor room Raises: NotFoundError if the room is unknown From 408600282774391fade9e4a6606f4967184865c0 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 5 Nov 2019 13:23:25 +0000 Subject: [PATCH 023/133] Improve documentation for EventContext fields (#6319) --- changelog.d/6319.misc | 1 + synapse/events/snapshot.py | 85 +++++++++++++++++++++++++++----------- synapse/state/__init__.py | 3 ++ 3 files changed, 66 insertions(+), 23 deletions(-) create mode 100644 changelog.d/6319.misc diff --git a/changelog.d/6319.misc b/changelog.d/6319.misc new file mode 100644 index 000000000..9711ef21e --- /dev/null +++ b/changelog.d/6319.misc @@ -0,0 +1 @@ +Improve documentation for EventContext fields. diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index a269de548..5f07f6fe4 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -12,6 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from typing import Dict, Optional, Tuple, Union + from six import iteritems import attr @@ -19,45 +21,82 @@ from frozendict import frozendict from twisted.internet import defer +from synapse.appservice import ApplicationService from synapse.logging.context import make_deferred_yieldable, run_in_background @attr.s(slots=True) class EventContext: """ + Holds information relevant to persisting an event + Attributes: - state_group (int|None): state group id, if the state has been stored - as a state group. This is usually only None if e.g. the event is - an outlier. - rejected (bool|str): A rejection reason if the event was rejected, else - False + rejected: A rejection reason if the event was rejected, else False - prev_group (int): Previously persisted state group. ``None`` for an - outlier. - delta_ids (dict[(str, str), str]): Delta from ``prev_group``. - (type, state_key) -> event_id. ``None`` for an outlier. + state_group: The ID of the state group for this event. Note that state events + are persisted with a state group which includes the new event, so this is + effectively the state *after* the event in question. - app_service: FIXME + For a *rejected* state event, where the state of the rejected event is + ignored, this state_group should never make it into the + event_to_state_groups table. Indeed, inspecting this value for a rejected + state event is almost certainly incorrect. + + For an outlier, where we don't have the state at the event, this will be + None. + + prev_group: If it is known, ``state_group``'s prev_group. Note that this being + None does not necessarily mean that ``state_group`` does not have + a prev_group! + + If ``state_group`` is None (ie, the event is an outlier), ``prev_group`` + will always also be ``None``. + + Note that this *not* (necessarily) the state group associated with + ``_prev_state_ids``. + + delta_ids: If ``prev_group`` is not None, the state delta between ``prev_group`` + and ``state_group``. + + app_service: If this event is being sent by a (local) application service, that + app service. + + _current_state_ids: The room state map, including this event - ie, the state + in ``state_group``. - _current_state_ids (dict[(str, str), str]|None): - The current state map including the current event. None if outlier - or we haven't fetched the state from DB yet. (type, state_key) -> event_id - _prev_state_ids (dict[(str, str), str]|None): - The current state map excluding the current event. None if outlier - or we haven't fetched the state from DB yet. + FIXME: what is this for an outlier? it seems ill-defined. It seems like + it could be either {}, or the state we were given by the remote + server, depending on $THINGS + + Note that this is a private attribute: it should be accessed via + ``get_current_state_ids``. _AsyncEventContext impl calculates this + on-demand: it will be None until that happens. + + _prev_state_ids: The room state map, excluding this event. For a non-state + event, this will be the same as _current_state_events. + + Note that it is a completely different thing to prev_group! + (type, state_key) -> event_id + + FIXME: again, what is this for an outlier? + + As with _current_state_ids, this is a private attribute. It should be + accessed via get_prev_state_ids. """ - state_group = attr.ib(default=None) - rejected = attr.ib(default=False) - prev_group = attr.ib(default=None) - delta_ids = attr.ib(default=None) - app_service = attr.ib(default=None) + rejected = attr.ib(default=False, type=Union[bool, str]) + state_group = attr.ib(default=None, type=Optional[int]) + prev_group = attr.ib(default=None, type=Optional[int]) + delta_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]]) + app_service = attr.ib(default=None, type=Optional[ApplicationService]) - _prev_state_ids = attr.ib(default=None) - _current_state_ids = attr.ib(default=None) + _current_state_ids = attr.ib( + default=None, type=Optional[Dict[Tuple[str, str], str]] + ) + _prev_state_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]]) @staticmethod def with_state( diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 4e91eb66f..2c04ab185 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -232,6 +232,9 @@ class StateHandler(object): # If this is an outlier, then we know it shouldn't have any current # state. Certainly store.get_current_state won't return any, and # persisting the event won't store the state group. + + # FIXME: why do we populate current_state_ids? I thought the point was + # that we weren't supposed to have any state for outliers? if old_state: prev_state_ids = {(s.type, s.state_key): s.event_id for s in old_state} if event.is_state(): From f5d8fdf0a71cadd4ded81e276cc57e9c0c195a2f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 5 Nov 2019 14:44:25 +0000 Subject: [PATCH 024/133] Update changelog --- changelog.d/6310.feature | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/6310.feature b/changelog.d/6310.feature index b7ff3fad3..78a187a1d 100644 --- a/changelog.d/6310.feature +++ b/changelog.d/6310.feature @@ -1 +1 @@ -Implement label-based filtering. +Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)). From c16e192e2f9970cc62adfd758034244631968102 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Nov 2019 15:49:43 +0000 Subject: [PATCH 025/133] Fix caching devices for remote servers in worker. When the `/keys/query` API is hit on client_reader worker Synapse may decide that it needs to resync some remote deivces. Usually this happens on master, and then gets cached. However, that fails on workers and so it falls back to fetching devices from remotes directly, which may in turn fail if the remote is down. --- synapse/handlers/e2e_keys.py | 19 ++++++-- synapse/replication/http/__init__.py | 10 +++- synapse/replication/http/devices.py | 69 ++++++++++++++++++++++++++++ 3 files changed, 94 insertions(+), 4 deletions(-) create mode 100644 synapse/replication/http/devices.py diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index f09a0b73c..28c12753c 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -30,6 +30,7 @@ from twisted.internet import defer from synapse.api.errors import CodeMessageException, Codes, NotFoundError, SynapseError from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.logging.opentracing import log_kv, set_tag, tag_args, trace +from synapse.replication.http.devices import ReplicationUserDevicesResyncRestServlet from synapse.types import ( UserID, get_domain_from_id, @@ -53,6 +54,12 @@ class E2eKeysHandler(object): self._edu_updater = SigningKeyEduUpdater(hs, self) + self._is_master = hs.config.worker_app is None + if not self._is_master: + self._user_device_resync_client = ReplicationUserDevicesResyncRestServlet.make_client( + hs + ) + federation_registry = hs.get_federation_registry() # FIXME: switch to m.signing_key_update when MSC1756 is merged into the spec @@ -191,9 +198,15 @@ class E2eKeysHandler(object): # probably be tracking their device lists. However, we haven't # done an initial sync on the device list so we do it now. try: - user_devices = yield self.device_handler.device_list_updater.user_device_resync( - user_id - ) + if self._is_master: + user_devices = yield self.device_handler.device_list_updater.user_device_resync( + user_id + ) + else: + user_devices = yield self._user_device_resync_client( + user_id=user_id + ) + user_devices = user_devices["devices"] for device in user_devices: results[user_id] = {device["device_id"]: device["keys"]} diff --git a/synapse/replication/http/__init__.py b/synapse/replication/http/__init__.py index 81b85352b..28dbc6fcb 100644 --- a/synapse/replication/http/__init__.py +++ b/synapse/replication/http/__init__.py @@ -14,7 +14,14 @@ # limitations under the License. from synapse.http.server import JsonResource -from synapse.replication.http import federation, login, membership, register, send_event +from synapse.replication.http import ( + devices, + federation, + login, + membership, + register, + send_event, +) REPLICATION_PREFIX = "/_synapse/replication" @@ -30,3 +37,4 @@ class ReplicationRestResource(JsonResource): federation.register_servlets(hs, self) login.register_servlets(hs, self) register.register_servlets(hs, self) + devices.register_servlets(hs, self) diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py new file mode 100644 index 000000000..795ca7b65 --- /dev/null +++ b/synapse/replication/http/devices.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- +# Copyright 2018 New Vector Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from synapse.replication.http._base import ReplicationEndpoint + +logger = logging.getLogger(__name__) + + +class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint): + """Notifies that a user has joined or left the room + + Request format: + + POST /_synapse/replication/user_device_resync/:user_id + + {} + + Response is equivalent to ` /_matrix/federation/v1/user/devices/:user_id` + response, e.g.: + + { + "user_id": "@alice:example.org", + "devices": [ + { + "device_id": "JLAFKJWSCS", + "keys": { ... }, + "device_display_name": "Alice's Mobile Phone" + } + ] + } + """ + + NAME = "user_device_resync" + PATH_ARGS = ("user_id",) + CACHE = False + + def __init__(self, hs): + super(ReplicationUserDevicesResyncRestServlet, self).__init__(hs) + + self.device_list_updater = hs.get_device_handler().device_list_updater + self.store = hs.get_datastore() + self.clock = hs.get_clock() + + @staticmethod + def _serialize_payload(user_id): + return {} + + async def _handle_request(self, request, user_id): + user_devices = await self.device_list_updater.user_device_resync(user_id) + + return 200, user_devices + + +def register_servlets(hs, http_server): + ReplicationUserDevicesResyncRestServlet(hs).register(http_server) From e9bfe719ba1928dc191cea93120c5c8a89584434 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 5 Nov 2019 15:45:17 +0000 Subject: [PATCH 026/133] Strip overlong OpenGraph data from url preview ... to stop people causing DoSes with malicious web pages --- changelog.d/6331.feature | 1 + synapse/rest/media/v1/preview_url_resource.py | 20 ++++++++++- tests/rest/media/v1/test_url_preview.py | 34 +++++++++++++++++++ 3 files changed, 54 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6331.feature diff --git a/changelog.d/6331.feature b/changelog.d/6331.feature new file mode 100644 index 000000000..eaf69ef3f --- /dev/null +++ b/changelog.d/6331.feature @@ -0,0 +1 @@ +Limit the length of data returned by url previews, to prevent DoS attacks. diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 0c68c3aad..6d8c39a41 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -56,6 +56,9 @@ logger = logging.getLogger(__name__) _charset_match = re.compile(br"<\s*meta[^>]*charset\s*=\s*([a-z0-9-]+)", flags=re.I) _content_type_match = re.compile(r'.*; *charset="?(.*?)"?(;|$)', flags=re.I) +OG_TAG_NAME_MAXLEN = 50 +OG_TAG_VALUE_MAXLEN = 1000 + class PreviewUrlResource(DirectServeResource): isLeaf = True @@ -167,7 +170,7 @@ class PreviewUrlResource(DirectServeResource): ts (int): Returns: - Deferred[str]: json-encoded og data + Deferred[bytes]: json-encoded og data """ # check the URL cache in the DB (which will also provide us with # historical previews, if we have any) @@ -268,6 +271,17 @@ class PreviewUrlResource(DirectServeResource): logger.warn("Failed to find any OG data in %s", url) og = {} + # filter out any stupidly long values + keys_to_remove = [] + for k, v in og.items(): + if len(k) > OG_TAG_NAME_MAXLEN or len(v) > OG_TAG_VALUE_MAXLEN: + logger.warning( + "Pruning overlong tag %s from OG data", k[:OG_TAG_NAME_MAXLEN] + ) + keys_to_remove.append(k) + for k in keys_to_remove: + del og[k] + logger.debug("Calculated OG for %s as %s" % (url, og)) jsonog = json.dumps(og) @@ -502,6 +516,10 @@ def _calc_og(tree, media_uri): og = {} for tag in tree.xpath("//*/meta[starts-with(@property, 'og:')]"): if "content" in tag.attrib: + # if we've got more than 50 tags, someone is taking the piss + if len(og) >= 50: + logger.warning("skipping OG for page with too many og: tags") + return {} og[tag.attrib["property"]] = tag.attrib["content"] # TODO: grab article: meta tags too, e.g.: diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index 976652aee..da19a8e86 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -247,6 +247,40 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.assertEqual(channel.code, 200) self.assertEqual(channel.json_body["og:title"], "\u0434\u043a\u0430") + def test_overlong_title(self): + self.lookups["matrix.org"] = [(IPv4Address, "8.8.8.8")] + + end_content = ( + b"" + b"" + b"x" * 2000 + b"" + b'' + b"" + ) + + request, channel = self.make_request( + "GET", "url_preview?url=http://matrix.org", shorthand=False + ) + request.render(self.preview_url) + self.pump() + + client = self.reactor.tcpClients[0][2].buildProtocol(None) + server = AccumulatingProtocol() + server.makeConnection(FakeTransport(client, self.reactor)) + client.makeConnection(FakeTransport(server, self.reactor)) + client.dataReceived( + ( + b"HTTP/1.0 200 OK\r\nContent-Length: %d\r\n" + b'Content-Type: text/html; charset="windows-1251"\r\n\r\n' + ) + % (len(end_content),) + + end_content + ) + + self.pump() + self.assertEqual(channel.code, 200) + res = channel.json_body + self.assertCountEqual(["og:description"], res.keys()) + def test_ipaddr(self): """ IP addresses can be previewed directly. From 248111bae8600a3e2d4da49c6e64b72c76219850 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Nov 2019 15:54:23 +0000 Subject: [PATCH 027/133] Newsfile --- changelog.d/6332.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6332.bugfix diff --git a/changelog.d/6332.bugfix b/changelog.d/6332.bugfix new file mode 100644 index 000000000..b14bd7e43 --- /dev/null +++ b/changelog.d/6332.bugfix @@ -0,0 +1 @@ +Fix caching devices for remote users when using workers. From e78167c94b3f63136f7d0e4f32a05ad1befdc0ec Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 5 Nov 2019 16:46:39 +0000 Subject: [PATCH 028/133] Apply suggestions from code review Co-Authored-By: Brendan Abolivier Co-Authored-By: Erik Johnston --- synapse/rest/media/v1/preview_url_resource.py | 2 +- tests/rest/media/v1/test_url_preview.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 6d8c39a41..4d4b3c146 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -518,7 +518,7 @@ def _calc_og(tree, media_uri): if "content" in tag.attrib: # if we've got more than 50 tags, someone is taking the piss if len(og) >= 50: - logger.warning("skipping OG for page with too many og: tags") + logger.warning("Skipping OG for page with too many 'og:' tags") return {} og[tag.attrib["property"]] = tag.attrib["content"] diff --git a/tests/rest/media/v1/test_url_preview.py b/tests/rest/media/v1/test_url_preview.py index da19a8e86..852b8ab11 100644 --- a/tests/rest/media/v1/test_url_preview.py +++ b/tests/rest/media/v1/test_url_preview.py @@ -279,6 +279,7 @@ class URLPreviewTests(unittest.HomeserverTestCase): self.pump() self.assertEqual(channel.code, 200) res = channel.json_body + # We should only see the `og:description` field, as `title` is too long and should be stripped out self.assertCountEqual(["og:description"], res.keys()) def test_ipaddr(self): From 81d49cbb07a4dc5a673e31a8a626af6e8a18f801 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 5 Nov 2019 17:22:58 +0000 Subject: [PATCH 029/133] Fix exception when OpenGraph tag values are ints --- changelog.d/6334.feature | 1 + synapse/rest/media/v1/preview_url_resource.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6334.feature diff --git a/changelog.d/6334.feature b/changelog.d/6334.feature new file mode 100644 index 000000000..eaf69ef3f --- /dev/null +++ b/changelog.d/6334.feature @@ -0,0 +1 @@ +Limit the length of data returned by url previews, to prevent DoS attacks. diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 4d4b3c146..ec9c4619c 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -274,7 +274,8 @@ class PreviewUrlResource(DirectServeResource): # filter out any stupidly long values keys_to_remove = [] for k, v in og.items(): - if len(k) > OG_TAG_NAME_MAXLEN or len(v) > OG_TAG_VALUE_MAXLEN: + # values can be numeric as well as strings, hence the cast to str + if len(k) > OG_TAG_NAME_MAXLEN or len(str(v)) > OG_TAG_VALUE_MAXLEN: logger.warning( "Pruning overlong tag %s from OG data", k[:OG_TAG_NAME_MAXLEN] ) From 052513958de214eed9508e60fef707641fb34da4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Nov 2019 17:44:09 +0000 Subject: [PATCH 030/133] Fix phone home stats --- synapse/app/homeserver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 00a7f8330..73e2c29d0 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -636,7 +636,7 @@ def run(hs): if hs.config.report_stats: logger.info("Scheduling stats reporting for 3 hour intervals") - clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000, hs, stats) + clock.looping_call(start_phone_stats_home, 3 * 60 * 60 * 1000) # We need to defer this init for the cases that we daemonize # otherwise the process ID we get is that of the non-daemon process @@ -644,7 +644,7 @@ def run(hs): # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes - clock.call_later(5 * 60, start_phone_stats_home, hs, stats) + clock.call_later(5 * 60, start_phone_stats_home) _base.start_reactor( "synapse-homeserver", From b437eb48b6bde1cd908d472893c5638e021c6a8f Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Nov 2019 17:45:29 +0000 Subject: [PATCH 031/133] Newsfile --- changelog.d/6336.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6336.misc diff --git a/changelog.d/6336.misc b/changelog.d/6336.misc new file mode 100644 index 000000000..63527ccef --- /dev/null +++ b/changelog.d/6336.misc @@ -0,0 +1 @@ +Remove the dependency on psutil and replace functionality with the stdlib `resource` module. From 0e3ab8afdc2b89ac2f47878112d93dd03d01f7ef Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 5 Nov 2019 22:13:37 +0000 Subject: [PATCH 032/133] Add some checks that we aren't using state from rejected events (#6330) * Raise an exception if accessing state for rejected events Add some sanity checks on accessing state_group etc for rejected events. * Skip calculating push actions for rejected events It didn't actually cause any bugs, because rejected events get filtered out at various later points, but there's not point in trying to calculate the push actions for a rejected event. --- changelog.d/6330.misc | 1 + synapse/events/snapshot.py | 49 ++++++++++++++++++++++++++++++---- synapse/handlers/federation.py | 6 ++++- 3 files changed, 50 insertions(+), 6 deletions(-) create mode 100644 changelog.d/6330.misc diff --git a/changelog.d/6330.misc b/changelog.d/6330.misc new file mode 100644 index 000000000..6239cba26 --- /dev/null +++ b/changelog.d/6330.misc @@ -0,0 +1 @@ +Add some checks that we aren't using state from rejected events. diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 5f07f6fe4..0f3c5989c 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -33,7 +33,7 @@ class EventContext: Attributes: rejected: A rejection reason if the event was rejected, else False - state_group: The ID of the state group for this event. Note that state events + _state_group: The ID of the state group for this event. Note that state events are persisted with a state group which includes the new event, so this is effectively the state *after* the event in question. @@ -45,6 +45,9 @@ class EventContext: For an outlier, where we don't have the state at the event, this will be None. + Note that this is a private attribute: it should be accessed via + the ``state_group`` property. + prev_group: If it is known, ``state_group``'s prev_group. Note that this being None does not necessarily mean that ``state_group`` does not have a prev_group! @@ -88,7 +91,7 @@ class EventContext: """ rejected = attr.ib(default=False, type=Union[bool, str]) - state_group = attr.ib(default=None, type=Optional[int]) + _state_group = attr.ib(default=None, type=Optional[int]) prev_group = attr.ib(default=None, type=Optional[int]) delta_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]]) app_service = attr.ib(default=None, type=Optional[ApplicationService]) @@ -136,7 +139,7 @@ class EventContext: "prev_state_id": prev_state_id, "event_type": event.type, "event_state_key": event.state_key if event.is_state() else None, - "state_group": self.state_group, + "state_group": self._state_group, "rejected": self.rejected, "prev_group": self.prev_group, "delta_ids": _encode_state_dict(self.delta_ids), @@ -173,22 +176,52 @@ class EventContext: return context + @property + def state_group(self) -> Optional[int]: + """The ID of the state group for this event. + + Note that state events are persisted with a state group which includes the new + event, so this is effectively the state *after* the event in question. + + For an outlier, where we don't have the state at the event, this will be None. + + It is an error to access this for a rejected event, since rejected state should + not make it into the room state. Accessing this property will raise an exception + if ``rejected`` is set. + """ + if self.rejected: + raise RuntimeError("Attempt to access state_group of rejected event") + + return self._state_group + @defer.inlineCallbacks def get_current_state_ids(self, store): - """Gets the current state IDs + """ + Gets the room state map, including this event - ie, the state in ``state_group`` + + It is an error to access this for a rejected event, since rejected state should + not make it into the room state. This method will raise an exception if + ``rejected`` is set. Returns: Deferred[dict[(str, str), str]|None]: Returns None if state_group is None, which happens when the associated event is an outlier. + Maps a (type, state_key) to the event ID of the state event matching this tuple. """ + if self.rejected: + raise RuntimeError("Attempt to access state_ids of rejected event") + yield self._ensure_fetched(store) return self._current_state_ids @defer.inlineCallbacks def get_prev_state_ids(self, store): - """Gets the prev state IDs + """ + Gets the room state map, excluding this event. + + For a non-state event, this will be the same as get_current_state_ids(). Returns: Deferred[dict[(str, str), str]|None]: Returns None if state_group @@ -202,11 +235,17 @@ class EventContext: def get_cached_current_state_ids(self): """Gets the current state IDs if we have them already cached. + It is an error to access this for a rejected event, since rejected state should + not make it into the room state. This method will raise an exception if + ``rejected`` is set. + Returns: dict[(str, str), str]|None: Returns None if we haven't cached the state or if state_group is None, which happens when the associated event is an outlier. """ + if self.rejected: + raise RuntimeError("Attempt to access state_ids of rejected event") return self._current_state_ids diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 8cafcfdab..b7916de90 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1688,7 +1688,11 @@ class FederationHandler(BaseHandler): # hack around with a try/finally instead. success = False try: - if not event.internal_metadata.is_outlier() and not backfilled: + if ( + not event.internal_metadata.is_outlier() + and not backfilled + and not context.rejected + ): yield self.action_generator.handle_push_actions_for_event( event, context ) From 807ec3bd99908d2991d2b3d0615b0862610c6dc3 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 6 Nov 2019 10:01:39 +0000 Subject: [PATCH 033/133] Fix bug which caused rejected events to be stored with the wrong room state (#6320) Fixes a bug where rejected events were persisted with the wrong state group. Also fixes an occasional internal-server-error when receiving events over federation which are rejected and (possibly because they are backwards-extremities) have no prev_group. Fixes #6289. --- changelog.d/6320.bugfix | 1 + synapse/events/snapshot.py | 25 +++- synapse/handlers/federation.py | 1 + synapse/state/__init__.py | 168 +++++++++++----------- synapse/storage/data_stores/main/state.py | 2 +- tests/handlers/test_federation.py | 126 ++++++++++++++++ tests/test_state.py | 61 ++++++-- 7 files changed, 283 insertions(+), 101 deletions(-) create mode 100644 changelog.d/6320.bugfix diff --git a/changelog.d/6320.bugfix b/changelog.d/6320.bugfix new file mode 100644 index 000000000..2c3fad565 --- /dev/null +++ b/changelog.d/6320.bugfix @@ -0,0 +1 @@ +Fix bug which casued rejected events to be persisted with the wrong room state. diff --git a/synapse/events/snapshot.py b/synapse/events/snapshot.py index 0f3c5989c..64e898f40 100644 --- a/synapse/events/snapshot.py +++ b/synapse/events/snapshot.py @@ -48,10 +48,21 @@ class EventContext: Note that this is a private attribute: it should be accessed via the ``state_group`` property. + state_group_before_event: The ID of the state group representing the state + of the room before this event. + + If this is a non-state event, this will be the same as ``state_group``. If + it's a state event, it will be the same as ``prev_group``. + + If ``state_group`` is None (ie, the event is an outlier), + ``state_group_before_event`` will always also be ``None``. + prev_group: If it is known, ``state_group``'s prev_group. Note that this being None does not necessarily mean that ``state_group`` does not have a prev_group! + If the event is a state event, this is normally the same as ``prev_group``. + If ``state_group`` is None (ie, the event is an outlier), ``prev_group`` will always also be ``None``. @@ -77,7 +88,8 @@ class EventContext: ``get_current_state_ids``. _AsyncEventContext impl calculates this on-demand: it will be None until that happens. - _prev_state_ids: The room state map, excluding this event. For a non-state + _prev_state_ids: The room state map, excluding this event - ie, the state + in ``state_group_before_event``. For a non-state event, this will be the same as _current_state_events. Note that it is a completely different thing to prev_group! @@ -92,6 +104,7 @@ class EventContext: rejected = attr.ib(default=False, type=Union[bool, str]) _state_group = attr.ib(default=None, type=Optional[int]) + state_group_before_event = attr.ib(default=None, type=Optional[int]) prev_group = attr.ib(default=None, type=Optional[int]) delta_ids = attr.ib(default=None, type=Optional[Dict[Tuple[str, str], str]]) app_service = attr.ib(default=None, type=Optional[ApplicationService]) @@ -103,12 +116,18 @@ class EventContext: @staticmethod def with_state( - state_group, current_state_ids, prev_state_ids, prev_group=None, delta_ids=None + state_group, + state_group_before_event, + current_state_ids, + prev_state_ids, + prev_group=None, + delta_ids=None, ): return EventContext( current_state_ids=current_state_ids, prev_state_ids=prev_state_ids, state_group=state_group, + state_group_before_event=state_group_before_event, prev_group=prev_group, delta_ids=delta_ids, ) @@ -140,6 +159,7 @@ class EventContext: "event_type": event.type, "event_state_key": event.state_key if event.is_state() else None, "state_group": self._state_group, + "state_group_before_event": self.state_group_before_event, "rejected": self.rejected, "prev_group": self.prev_group, "delta_ids": _encode_state_dict(self.delta_ids), @@ -165,6 +185,7 @@ class EventContext: event_type=input["event_type"], event_state_key=input["event_state_key"], state_group=input["state_group"], + state_group_before_event=input["state_group_before_event"], prev_group=input["prev_group"], delta_ids=_decode_state_dict(input["delta_ids"]), rejected=input["rejected"], diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index b7916de90..05dd8d267 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2280,6 +2280,7 @@ class FederationHandler(BaseHandler): return EventContext.with_state( state_group=state_group, + state_group_before_event=context.state_group_before_event, current_state_ids=current_state_ids, prev_state_ids=prev_state_ids, prev_group=prev_group, diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 2c04ab185..139beef8e 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -16,6 +16,7 @@ import logging from collections import namedtuple +from typing import Iterable, Optional from six import iteritems, itervalues @@ -27,6 +28,7 @@ from twisted.internet import defer from synapse.api.constants import EventTypes from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, StateResolutionVersions +from synapse.events import EventBase from synapse.events.snapshot import EventContext from synapse.logging.utils import log_function from synapse.state import v1, v2 @@ -212,15 +214,17 @@ class StateHandler(object): return joined_hosts @defer.inlineCallbacks - def compute_event_context(self, event, old_state=None): + def compute_event_context( + self, event: EventBase, old_state: Optional[Iterable[EventBase]] = None + ): """Build an EventContext structure for the event. This works out what the current state should be for the event, and generates a new state group if necessary. Args: - event (synapse.events.EventBase): - old_state (dict|None): The state at the event if it can't be + event: + old_state: The state at the event if it can't be calculated from existing events. This is normally only specified when receiving an event from federation where we don't have the prev events for, e.g. when backfilling. @@ -251,113 +255,103 @@ class StateHandler(object): # group for it. context = EventContext.with_state( state_group=None, + state_group_before_event=None, current_state_ids=current_state_ids, prev_state_ids=prev_state_ids, ) return context + # + # first of all, figure out the state before the event + # + if old_state: - # We already have the state, so we don't need to calculate it. - # Let's just correctly fill out the context and create a - # new state group for it. + # if we're given the state before the event, then we use that + state_ids_before_event = { + (s.type, s.state_key): s.event_id for s in old_state + } + state_group_before_event = None + state_group_before_event_prev_group = None + deltas_to_state_group_before_event = None - prev_state_ids = {(s.type, s.state_key): s.event_id for s in old_state} + else: + # otherwise, we'll need to resolve the state across the prev_events. + logger.debug("calling resolve_state_groups from compute_event_context") - if event.is_state(): - key = (event.type, event.state_key) - if key in prev_state_ids: - replaces = prev_state_ids[key] - if replaces != event.event_id: # Paranoia check - event.unsigned["replaces_state"] = replaces - current_state_ids = dict(prev_state_ids) - current_state_ids[key] = event.event_id - else: - current_state_ids = prev_state_ids + entry = yield self.resolve_state_groups_for_events( + event.room_id, event.prev_event_ids() + ) - state_group = yield self.state_store.store_state_group( + state_ids_before_event = entry.state + state_group_before_event = entry.state_group + state_group_before_event_prev_group = entry.prev_group + deltas_to_state_group_before_event = entry.delta_ids + + # + # make sure that we have a state group at that point. If it's not a state event, + # that will be the state group for the new event. If it *is* a state event, + # it might get rejected (in which case we'll need to persist it with the + # previous state group) + # + + if not state_group_before_event: + state_group_before_event = yield self.state_store.store_state_group( event.event_id, event.room_id, - prev_group=None, - delta_ids=None, - current_state_ids=current_state_ids, + prev_group=state_group_before_event_prev_group, + delta_ids=deltas_to_state_group_before_event, + current_state_ids=state_ids_before_event, ) - context = EventContext.with_state( - state_group=state_group, - current_state_ids=current_state_ids, - prev_state_ids=prev_state_ids, + # XXX: can we update the state cache entry for the new state group? or + # could we set a flag on resolve_state_groups_for_events to tell it to + # always make a state group? + + # + # now if it's not a state event, we're done + # + + if not event.is_state(): + return EventContext.with_state( + state_group_before_event=state_group_before_event, + state_group=state_group_before_event, + current_state_ids=state_ids_before_event, + prev_state_ids=state_ids_before_event, + prev_group=state_group_before_event_prev_group, + delta_ids=deltas_to_state_group_before_event, ) - return context + # + # otherwise, we'll need to create a new state group for after the event + # - logger.debug("calling resolve_state_groups from compute_event_context") - - entry = yield self.resolve_state_groups_for_events( - event.room_id, event.prev_event_ids() - ) - - prev_state_ids = entry.state - prev_group = None - delta_ids = None - - if event.is_state(): - # If this is a state event then we need to create a new state - # group for the state after this event. - - key = (event.type, event.state_key) - if key in prev_state_ids: - replaces = prev_state_ids[key] + key = (event.type, event.state_key) + if key in state_ids_before_event: + replaces = state_ids_before_event[key] + if replaces != event.event_id: event.unsigned["replaces_state"] = replaces - current_state_ids = dict(prev_state_ids) - current_state_ids[key] = event.event_id + state_ids_after_event = dict(state_ids_before_event) + state_ids_after_event[key] = event.event_id + delta_ids = {key: event.event_id} - if entry.state_group: - # If the state at the event has a state group assigned then - # we can use that as the prev group - prev_group = entry.state_group - delta_ids = {key: event.event_id} - elif entry.prev_group: - # If the state at the event only has a prev group, then we can - # use that as a prev group too. - prev_group = entry.prev_group - delta_ids = dict(entry.delta_ids) - delta_ids[key] = event.event_id - - state_group = yield self.state_store.store_state_group( - event.event_id, - event.room_id, - prev_group=prev_group, - delta_ids=delta_ids, - current_state_ids=current_state_ids, - ) - else: - current_state_ids = prev_state_ids - prev_group = entry.prev_group - delta_ids = entry.delta_ids - - if entry.state_group is None: - entry.state_group = yield self.state_store.store_state_group( - event.event_id, - event.room_id, - prev_group=entry.prev_group, - delta_ids=entry.delta_ids, - current_state_ids=current_state_ids, - ) - entry.state_id = entry.state_group - - state_group = entry.state_group - - context = EventContext.with_state( - state_group=state_group, - current_state_ids=current_state_ids, - prev_state_ids=prev_state_ids, - prev_group=prev_group, + state_group_after_event = yield self.state_store.store_state_group( + event.event_id, + event.room_id, + prev_group=state_group_before_event, delta_ids=delta_ids, + current_state_ids=state_ids_after_event, ) - return context + return EventContext.with_state( + state_group=state_group_after_event, + state_group_before_event=state_group_before_event, + current_state_ids=state_ids_after_event, + prev_state_ids=state_ids_before_event, + prev_group=state_group_before_event, + delta_ids=delta_ids, + ) @measure_func() @defer.inlineCallbacks diff --git a/synapse/storage/data_stores/main/state.py b/synapse/storage/data_stores/main/state.py index 313284803..9e1541988 100644 --- a/synapse/storage/data_stores/main/state.py +++ b/synapse/storage/data_stores/main/state.py @@ -1231,7 +1231,7 @@ class StateStore(StateGroupWorkerStore, StateBackgroundUpdateStore): # if the event was rejected, just give it the same state as its # predecessor. if context.rejected: - state_groups[event.event_id] = context.prev_group + state_groups[event.event_id] = context.state_group_before_event continue state_groups[event.event_id] = context.state_group diff --git a/tests/handlers/test_federation.py b/tests/handlers/test_federation.py index d56220f40..b4d92cf73 100644 --- a/tests/handlers/test_federation.py +++ b/tests/handlers/test_federation.py @@ -12,13 +12,19 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import logging + from synapse.api.constants import EventTypes from synapse.api.errors import AuthError, Codes +from synapse.federation.federation_base import event_from_pdu_json +from synapse.logging.context import LoggingContext, run_in_background from synapse.rest import admin from synapse.rest.client.v1 import login, room from tests import unittest +logger = logging.getLogger(__name__) + class FederationTestCase(unittest.HomeserverTestCase): servlets = [ @@ -79,3 +85,123 @@ class FederationTestCase(unittest.HomeserverTestCase): self.assertEqual(failure.code, 403, failure) self.assertEqual(failure.errcode, Codes.FORBIDDEN, failure) self.assertEqual(failure.msg, "You are not invited to this room.") + + def test_rejected_message_event_state(self): + """ + Check that we store the state group correctly for rejected non-state events. + + Regression test for #6289. + """ + OTHER_SERVER = "otherserver" + OTHER_USER = "@otheruser:" + OTHER_SERVER + + # create the room + user_id = self.register_user("kermit", "test") + tok = self.login("kermit", "test") + room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) + + # pretend that another server has joined + join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id) + + # check the state group + sg = self.successResultOf( + self.store._get_state_group_for_event(join_event.event_id) + ) + + # build and send an event which will be rejected + ev = event_from_pdu_json( + { + "type": EventTypes.Message, + "content": {}, + "room_id": room_id, + "sender": "@yetanotheruser:" + OTHER_SERVER, + "depth": join_event["depth"] + 1, + "prev_events": [join_event.event_id], + "auth_events": [], + "origin_server_ts": self.clock.time_msec(), + }, + join_event.format_version, + ) + + with LoggingContext(request="send_rejected"): + d = run_in_background(self.handler.on_receive_pdu, OTHER_SERVER, ev) + self.get_success(d) + + # that should have been rejected + e = self.get_success(self.store.get_event(ev.event_id, allow_rejected=True)) + self.assertIsNotNone(e.rejected_reason) + + # ... and the state group should be the same as before + sg2 = self.successResultOf(self.store._get_state_group_for_event(ev.event_id)) + + self.assertEqual(sg, sg2) + + def test_rejected_state_event_state(self): + """ + Check that we store the state group correctly for rejected state events. + + Regression test for #6289. + """ + OTHER_SERVER = "otherserver" + OTHER_USER = "@otheruser:" + OTHER_SERVER + + # create the room + user_id = self.register_user("kermit", "test") + tok = self.login("kermit", "test") + room_id = self.helper.create_room_as(room_creator=user_id, tok=tok) + + # pretend that another server has joined + join_event = self._build_and_send_join_event(OTHER_SERVER, OTHER_USER, room_id) + + # check the state group + sg = self.successResultOf( + self.store._get_state_group_for_event(join_event.event_id) + ) + + # build and send an event which will be rejected + ev = event_from_pdu_json( + { + "type": "org.matrix.test", + "state_key": "test_key", + "content": {}, + "room_id": room_id, + "sender": "@yetanotheruser:" + OTHER_SERVER, + "depth": join_event["depth"] + 1, + "prev_events": [join_event.event_id], + "auth_events": [], + "origin_server_ts": self.clock.time_msec(), + }, + join_event.format_version, + ) + + with LoggingContext(request="send_rejected"): + d = run_in_background(self.handler.on_receive_pdu, OTHER_SERVER, ev) + self.get_success(d) + + # that should have been rejected + e = self.get_success(self.store.get_event(ev.event_id, allow_rejected=True)) + self.assertIsNotNone(e.rejected_reason) + + # ... and the state group should be the same as before + sg2 = self.successResultOf(self.store._get_state_group_for_event(ev.event_id)) + + self.assertEqual(sg, sg2) + + def _build_and_send_join_event(self, other_server, other_user, room_id): + join_event = self.get_success( + self.handler.on_make_join_request(other_server, room_id, other_user) + ) + # the auth code requires that a signature exists, but doesn't check that + # signature... go figure. + join_event.signatures[other_server] = {"x": "y"} + with LoggingContext(request="send_join"): + d = run_in_background( + self.handler.on_send_join_request, other_server, join_event + ) + self.get_success(d) + + # sanity-check: the room should show that the new user is a member + r = self.get_success(self.store.get_current_state_ids(room_id)) + self.assertEqual(r[(EventTypes.Member, other_user)], join_event.event_id) + + return join_event diff --git a/tests/test_state.py b/tests/test_state.py index 38246555b..176535947 100644 --- a/tests/test_state.py +++ b/tests/test_state.py @@ -21,6 +21,7 @@ from synapse.api.auth import Auth from synapse.api.constants import EventTypes, Membership from synapse.api.room_versions import RoomVersions from synapse.events import FrozenEvent +from synapse.events.snapshot import EventContext from synapse.state import StateHandler, StateResolutionHandler from tests import unittest @@ -198,16 +199,22 @@ class StateTestCase(unittest.TestCase): self.store.register_events(graph.walk()) - context_store = {} + context_store = {} # type: dict[str, EventContext] for event in graph.walk(): context = yield self.state.compute_event_context(event) self.store.register_event_context(event, context) context_store[event.event_id] = context - prev_state_ids = yield context_store["D"].get_prev_state_ids(self.store) + ctx_c = context_store["C"] + ctx_d = context_store["D"] + + prev_state_ids = yield ctx_d.get_prev_state_ids(self.store) self.assertEqual(2, len(prev_state_ids)) + self.assertEqual(ctx_c.state_group, ctx_d.state_group_before_event) + self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group) + @defer.inlineCallbacks def test_branch_basic_conflict(self): graph = Graph( @@ -241,12 +248,19 @@ class StateTestCase(unittest.TestCase): self.store.register_event_context(event, context) context_store[event.event_id] = context - prev_state_ids = yield context_store["D"].get_prev_state_ids(self.store) + # C ends up winning the resolution between B and C + ctx_c = context_store["C"] + ctx_d = context_store["D"] + + prev_state_ids = yield ctx_d.get_prev_state_ids(self.store) self.assertSetEqual( {"START", "A", "C"}, {e_id for e_id in prev_state_ids.values()} ) + self.assertEqual(ctx_c.state_group, ctx_d.state_group_before_event) + self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group) + @defer.inlineCallbacks def test_branch_have_banned_conflict(self): graph = Graph( @@ -292,11 +306,18 @@ class StateTestCase(unittest.TestCase): self.store.register_event_context(event, context) context_store[event.event_id] = context - prev_state_ids = yield context_store["E"].get_prev_state_ids(self.store) + # C ends up winning the resolution between C and D because bans win over other + # changes + ctx_c = context_store["C"] + ctx_e = context_store["E"] + + prev_state_ids = yield ctx_e.get_prev_state_ids(self.store) self.assertSetEqual( {"START", "A", "B", "C"}, {e for e in prev_state_ids.values()} ) + self.assertEqual(ctx_c.state_group, ctx_e.state_group_before_event) + self.assertEqual(ctx_e.state_group_before_event, ctx_e.state_group) @defer.inlineCallbacks def test_branch_have_perms_conflict(self): @@ -360,12 +381,20 @@ class StateTestCase(unittest.TestCase): self.store.register_event_context(event, context) context_store[event.event_id] = context - prev_state_ids = yield context_store["D"].get_prev_state_ids(self.store) + # B ends up winning the resolution between B and C because power levels + # win over other changes. + ctx_b = context_store["B"] + ctx_d = context_store["D"] + + prev_state_ids = yield ctx_d.get_prev_state_ids(self.store) self.assertSetEqual( {"A1", "A2", "A3", "A5", "B"}, {e for e in prev_state_ids.values()} ) + self.assertEqual(ctx_b.state_group, ctx_d.state_group_before_event) + self.assertEqual(ctx_d.state_group_before_event, ctx_d.state_group) + def _add_depths(self, nodes, edges): def _get_depth(ev): node = nodes[ev] @@ -390,13 +419,16 @@ class StateTestCase(unittest.TestCase): context = yield self.state.compute_event_context(event, old_state=old_state) - current_state_ids = yield context.get_current_state_ids(self.store) + prev_state_ids = yield context.get_prev_state_ids(self.store) + self.assertCountEqual((e.event_id for e in old_state), prev_state_ids.values()) - self.assertEqual( - set(e.event_id for e in old_state), set(current_state_ids.values()) + current_state_ids = yield context.get_current_state_ids(self.store) + self.assertCountEqual( + (e.event_id for e in old_state), current_state_ids.values() ) - self.assertIsNotNone(context.state_group) + self.assertIsNotNone(context.state_group_before_event) + self.assertEqual(context.state_group_before_event, context.state_group) @defer.inlineCallbacks def test_annotate_with_old_state(self): @@ -411,11 +443,18 @@ class StateTestCase(unittest.TestCase): context = yield self.state.compute_event_context(event, old_state=old_state) prev_state_ids = yield context.get_prev_state_ids(self.store) + self.assertCountEqual((e.event_id for e in old_state), prev_state_ids.values()) - self.assertEqual( - set(e.event_id for e in old_state), set(prev_state_ids.values()) + current_state_ids = yield context.get_current_state_ids(self.store) + self.assertCountEqual( + (e.event_id for e in old_state + [event]), current_state_ids.values() ) + self.assertIsNotNone(context.state_group_before_event) + self.assertNotEqual(context.state_group_before_event, context.state_group) + self.assertEqual(context.state_group_before_event, context.prev_group) + self.assertEqual({("state", ""): event.event_id}, context.delta_ids) + @defer.inlineCallbacks def test_trivial_annotate_message(self): prev_event_id = "prev_event_id" From feafd98aca3e72d27516c79f986a28ea39886ebc Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 6 Nov 2019 10:02:23 +0000 Subject: [PATCH 034/133] 1.5.1 --- CHANGES.md | 9 +++++++++ changelog.d/6331.feature | 1 - changelog.d/6334.feature | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 5 files changed, 16 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/6331.feature delete mode 100644 changelog.d/6334.feature diff --git a/CHANGES.md b/CHANGES.md index 6faa4b8dc..9312dc294 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.5.1 (2019-11-06) +========================== + +Features +-------- + +- Limit the length of data returned by url previews, to prevent DoS attacks. ([\#6331](https://github.com/matrix-org/synapse/issues/6331), [\#6334](https://github.com/matrix-org/synapse/issues/6334)) + + Synapse 1.5.0 (2019-10-29) ========================== diff --git a/changelog.d/6331.feature b/changelog.d/6331.feature deleted file mode 100644 index eaf69ef3f..000000000 --- a/changelog.d/6331.feature +++ /dev/null @@ -1 +0,0 @@ -Limit the length of data returned by url previews, to prevent DoS attacks. diff --git a/changelog.d/6334.feature b/changelog.d/6334.feature deleted file mode 100644 index eaf69ef3f..000000000 --- a/changelog.d/6334.feature +++ /dev/null @@ -1 +0,0 @@ -Limit the length of data returned by url previews, to prevent DoS attacks. diff --git a/debian/changelog b/debian/changelog index acda7e5c6..c4415f460 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.5.1) stable; urgency=medium + + * New synapse release 1.5.1. + + -- Synapse Packaging team Wed, 06 Nov 2019 10:02:14 +0000 + matrix-synapse-py3 (1.5.0) stable; urgency=medium * New synapse release 1.5.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index 8587ffa76..ec16f54a4 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.5.0" +__version__ = "1.5.1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 70d93cafdb4a3055849e7b84881cfd6225f0b6e5 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 6 Nov 2019 10:59:03 +0000 Subject: [PATCH 035/133] Update insert --- synapse/storage/data_stores/main/events_bg_updates.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index a70392747..2fcb0c33d 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -537,7 +537,12 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): txn=txn, table="event_labels", values=[ - {"event_id": event_id, "label": label} + { + "event_id": event_id, + "label": label, + "room_id": event_json["room_id"], + "topological_ordering": event_json["depth"], + } for label in event_json["content"].get( EventContentFields.Labels, [] ) From 24a214bd1bc396cbcc41c4c913938299ca9ffcf5 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 6 Nov 2019 11:04:19 +0000 Subject: [PATCH 036/133] Fix field name --- synapse/storage/data_stores/main/events_bg_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index 2fcb0c33d..309bfcafe 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -544,7 +544,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): "topological_ordering": event_json["depth"], } for label in event_json["content"].get( - EventContentFields.Labels, [] + EventContentFields.LABELS, [] ) ], ) From 541f1b92d946093fef17ea8b95a7cb595fc5ffc4 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Nov 2019 17:39:16 +0000 Subject: [PATCH 037/133] Only do `rc_login` ratelimiting on succesful login. We were doing this in a number of places which meant that some login code paths incremented the counter multiple times. It was also applying ratelimiting to UIA endpoints, which was probably not intentional. In particular, some custom auth modules were calling `check_user_exists`, which incremented the counters, meaning that people would fail to login sometimes. --- synapse/handlers/auth.py | 55 +--------------- synapse/rest/client/v1/login.py | 111 ++++++++++++++++++++++++++------ 2 files changed, 94 insertions(+), 72 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 7a0f54ca2..14c6387b6 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -35,7 +35,6 @@ from synapse.api.errors import ( SynapseError, UserDeactivatedError, ) -from synapse.api.ratelimiting import Ratelimiter from synapse.handlers.ui_auth import INTERACTIVE_AUTH_CHECKERS from synapse.handlers.ui_auth.checkers import UserInteractiveAuthChecker from synapse.logging.context import defer_to_thread @@ -102,9 +101,6 @@ class AuthHandler(BaseHandler): login_types.append(t) self._supported_login_types = login_types - self._account_ratelimiter = Ratelimiter() - self._failed_attempts_ratelimiter = Ratelimiter() - self._clock = self.hs.get_clock() @defer.inlineCallbacks @@ -501,11 +497,8 @@ class AuthHandler(BaseHandler): multiple matches Raises: - LimitExceededError if the ratelimiter's login requests count for this - user is too high too proceed. UserDeactivatedError if a user is found but is deactivated. """ - self.ratelimit_login_per_account(user_id) res = yield self._find_user_id_and_pwd_hash(user_id) if res is not None: return res[0] @@ -572,8 +565,6 @@ class AuthHandler(BaseHandler): StoreError if there was a problem accessing the database SynapseError if there was a problem with the request LoginError if there was an authentication problem. - LimitExceededError if the ratelimiter's login requests count for this - user is too high too proceed. """ if username.startswith("@"): @@ -581,8 +572,6 @@ class AuthHandler(BaseHandler): else: qualified_user_id = UserID(username, self.hs.hostname).to_string() - self.ratelimit_login_per_account(qualified_user_id) - login_type = login_submission.get("type") known_login_type = False @@ -650,15 +639,6 @@ class AuthHandler(BaseHandler): if not known_login_type: raise SynapseError(400, "Unknown login type %s" % login_type) - # unknown username or invalid password. - self._failed_attempts_ratelimiter.ratelimit( - qualified_user_id.lower(), - time_now_s=self._clock.time(), - rate_hz=self.hs.config.rc_login_failed_attempts.per_second, - burst_count=self.hs.config.rc_login_failed_attempts.burst_count, - update=True, - ) - # We raise a 403 here, but note that if we're doing user-interactive # login, it turns all LoginErrors into a 401 anyway. raise LoginError(403, "Invalid password", errcode=Codes.FORBIDDEN) @@ -710,10 +690,6 @@ class AuthHandler(BaseHandler): Returns: Deferred[unicode] the canonical_user_id, or Deferred[None] if unknown user/bad password - - Raises: - LimitExceededError if the ratelimiter's login requests count for this - user is too high too proceed. """ lookupres = yield self._find_user_id_and_pwd_hash(user_id) if not lookupres: @@ -742,7 +718,7 @@ class AuthHandler(BaseHandler): auth_api.validate_macaroon(macaroon, "login", user_id) except Exception: raise AuthError(403, "Invalid token", errcode=Codes.FORBIDDEN) - self.ratelimit_login_per_account(user_id) + yield self.auth.check_auth_blocking(user_id) return user_id @@ -912,35 +888,6 @@ class AuthHandler(BaseHandler): else: return defer.succeed(False) - def ratelimit_login_per_account(self, user_id): - """Checks whether the process must be stopped because of ratelimiting. - - Checks against two ratelimiters: the generic one for login attempts per - account and the one specific to failed attempts. - - Args: - user_id (unicode): complete @user:id - - Raises: - LimitExceededError if one of the ratelimiters' login requests count - for this user is too high too proceed. - """ - self._failed_attempts_ratelimiter.ratelimit( - user_id.lower(), - time_now_s=self._clock.time(), - rate_hz=self.hs.config.rc_login_failed_attempts.per_second, - burst_count=self.hs.config.rc_login_failed_attempts.burst_count, - update=False, - ) - - self._account_ratelimiter.ratelimit( - user_id.lower(), - time_now_s=self._clock.time(), - rate_hz=self.hs.config.rc_login_account.per_second, - burst_count=self.hs.config.rc_login_account.burst_count, - update=True, - ) - @attr.s class MacaroonGenerator(object): diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index 24a0ce74f..abc210da5 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -92,8 +92,11 @@ class LoginRestServlet(RestServlet): self.auth_handler = self.hs.get_auth_handler() self.registration_handler = hs.get_registration_handler() self.handlers = hs.get_handlers() + self._clock = hs.get_clock() self._well_known_builder = WellKnownBuilder(hs) self._address_ratelimiter = Ratelimiter() + self._account_ratelimiter = Ratelimiter() + self._failed_attempts_ratelimiter = Ratelimiter() def on_GET(self, request): flows = [] @@ -202,6 +205,16 @@ class LoginRestServlet(RestServlet): # (See add_threepid in synapse/handlers/auth.py) address = address.lower() + # We also apply account rate limiting using the 3PID as a key, as + # otherwise using 3PID bypasses the ratelimiting based on user ID. + self._failed_attempts_ratelimiter.ratelimit( + (medium, address), + time_now_s=self._clock.time(), + rate_hz=self.hs.config.rc_login_failed_attempts.per_second, + burst_count=self.hs.config.rc_login_failed_attempts.burst_count, + update=False, + ) + # Check for login providers that support 3pid login types ( canonical_user_id, @@ -211,7 +224,8 @@ class LoginRestServlet(RestServlet): ) if canonical_user_id: # Authentication through password provider and 3pid succeeded - result = yield self._register_device_with_callback( + + result = yield self._complete_login( canonical_user_id, login_submission, callback_3pid ) return result @@ -225,6 +239,21 @@ class LoginRestServlet(RestServlet): logger.warning( "unknown 3pid identifier medium %s, address %r", medium, address ) + # We mark that we've failed to log in here, as + # `check_password_provider_3pid` might have returned `None` due + # to an incorrect password, rather than the account not + # existing. + # + # If it returned None but the 3PID was bound then we won't hit + # this code path, which is fine as then the per-user ratelimit + # will kick in below. + self._failed_attempts_ratelimiter.can_do_action( + (medium, address), + time_now_s=self._clock.time(), + rate_hz=self.hs.config.rc_login_failed_attempts.per_second, + burst_count=self.hs.config.rc_login_failed_attempts.burst_count, + update=True, + ) raise LoginError(403, "", errcode=Codes.FORBIDDEN) identifier = {"type": "m.id.user", "user": user_id} @@ -236,29 +265,84 @@ class LoginRestServlet(RestServlet): if "user" not in identifier: raise SynapseError(400, "User identifier is missing 'user' key") - canonical_user_id, callback = yield self.auth_handler.validate_login( - identifier["user"], login_submission + if identifier["user"].startswith("@"): + qualified_user_id = identifier["user"] + else: + qualified_user_id = UserID(identifier["user"], self.hs.hostname).to_string() + + # Check if we've hit the failed ratelimit (but don't update it) + self._failed_attempts_ratelimiter.ratelimit( + qualified_user_id.lower(), + time_now_s=self._clock.time(), + rate_hz=self.hs.config.rc_login_failed_attempts.per_second, + burst_count=self.hs.config.rc_login_failed_attempts.burst_count, + update=False, ) - result = yield self._register_device_with_callback( + try: + canonical_user_id, callback = yield self.auth_handler.validate_login( + identifier["user"], login_submission + ) + except LoginError: + # The user has failed to log in, so we need to update the rate + # limiter. Using `can_do_action` avoids us raising a ratelimit + # exception and masking the LoginError. The actual ratelimiting + # should have happened above. + self._failed_attempts_ratelimiter.can_do_action( + qualified_user_id.lower(), + time_now_s=self._clock.time(), + rate_hz=self.hs.config.rc_login_failed_attempts.per_second, + burst_count=self.hs.config.rc_login_failed_attempts.burst_count, + update=True, + ) + raise + + result = yield self._complete_login( canonical_user_id, login_submission, callback ) return result @defer.inlineCallbacks - def _register_device_with_callback(self, user_id, login_submission, callback=None): - """ Registers a device with a given user_id. Optionally run a callback - function after registration has completed. + def _complete_login( + self, user_id, login_submission, callback=None, create_non_existant_users=False + ): + """Called when we've successfully authed the user and now need to + actually login them in (e.g. create devices). This gets called on + all succesful logins. + + Applies the ratelimiting for succesful login attempts against an + account. Args: user_id (str): ID of the user to register. login_submission (dict): Dictionary of login information. callback (func|None): Callback function to run after registration. + create_non_existant_users (bool): Whether to create the user if + they don't exist. Defaults to False. Returns: result (Dict[str,str]): Dictionary of account information after successful registration. """ + + # Before we actually log them in we check if they've already logged in + # too often. This happens here rather than before as we don't + # necessarily know the user before now. + self._account_ratelimiter.ratelimit( + user_id.lower(), + time_now_s=self._clock.time(), + rate_hz=self.hs.config.rc_login_account.per_second, + burst_count=self.hs.config.rc_login_account.burst_count, + update=True, + ) + + if create_non_existant_users: + user_id = yield self.auth_handler.check_user_exists(user_id) + if not user_id: + user_id = yield self.registration_handler.register_user( + localpart=UserID.from_string(user_id).localpart + ) + device_id = login_submission.get("device_id") initial_display_name = login_submission.get("initial_device_display_name") device_id, access_token = yield self.registration_handler.register_device( @@ -285,7 +369,7 @@ class LoginRestServlet(RestServlet): token ) - result = yield self._register_device_with_callback(user_id, login_submission) + result = yield self._complete_login(user_id, login_submission) return result @defer.inlineCallbacks @@ -313,16 +397,7 @@ class LoginRestServlet(RestServlet): raise LoginError(401, "Invalid JWT", errcode=Codes.UNAUTHORIZED) user_id = UserID(user, self.hs.hostname).to_string() - - registered_user_id = yield self.auth_handler.check_user_exists(user_id) - if not registered_user_id: - registered_user_id = yield self.registration_handler.register_user( - localpart=user - ) - - result = yield self._register_device_with_callback( - registered_user_id, login_submission - ) + result = yield self._complete_login(user_id, login_submission) return result From f697b4b4a2ca329a32105ddf83735737808306bf Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 6 Nov 2019 11:00:54 +0000 Subject: [PATCH 038/133] Add failed auth ratelimiting to UIA --- synapse/handlers/auth.py | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 14c6387b6..20c62bd78 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -35,6 +35,7 @@ from synapse.api.errors import ( SynapseError, UserDeactivatedError, ) +from synapse.api.ratelimiting import Ratelimiter from synapse.handlers.ui_auth import INTERACTIVE_AUTH_CHECKERS from synapse.handlers.ui_auth.checkers import UserInteractiveAuthChecker from synapse.logging.context import defer_to_thread @@ -101,6 +102,10 @@ class AuthHandler(BaseHandler): login_types.append(t) self._supported_login_types = login_types + # Ratelimiter for failed auth during UIA. Uses same ratelimit config + # as per `rc_login.failed_attempts`. + self._failed_uia_attempts_ratelimiter = Ratelimiter() + self._clock = self.hs.get_clock() @defer.inlineCallbacks @@ -129,12 +134,38 @@ class AuthHandler(BaseHandler): AuthError if the client has completed a login flow, and it gives a different user to `requester` + + LimitExceededError if the ratelimiter's failed requests count for this + user is too high too proceed + """ + user_id = requester.user.to_string() + + # Check if we should be ratelimited due to too many previous failed attempts + self._failed_uia_attempts_ratelimiter.ratelimit( + user_id, + time_now_s=self._clock.time(), + rate_hz=self.hs.config.rc_login_failed_attempts.per_second, + burst_count=self.hs.config.rc_login_failed_attempts.burst_count, + update=False, + ) + # build a list of supported flows flows = [[login_type] for login_type in self._supported_login_types] - result, params, _ = yield self.check_auth(flows, request_body, clientip) + try: + result, params, _ = yield self.check_auth(flows, request_body, clientip) + except LoginError: + # Update the ratelimite to say we failed (`can_do_action` doesn't raise). + self._failed_uia_attempts_ratelimiter.can_do_action( + user_id, + time_now_s=self._clock.time(), + rate_hz=self.hs.config.rc_login_failed_attempts.per_second, + burst_count=self.hs.config.rc_login_failed_attempts.burst_count, + update=True, + ) + raise # find the completed login type for login_type in self._supported_login_types: From 4fc53bf1fb97d52c19aa718e67f31d290218e3c1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 5 Nov 2019 17:42:42 +0000 Subject: [PATCH 039/133] Newsfile --- changelog.d/6335.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6335.bugfix diff --git a/changelog.d/6335.bugfix b/changelog.d/6335.bugfix new file mode 100644 index 000000000..a95f6b9ee --- /dev/null +++ b/changelog.d/6335.bugfix @@ -0,0 +1 @@ +Fix bug where `rc_login` ratelimiting would prematurely kick in. From b33c4f7a828e722d6115f73525e0456edb79a90f Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 6 Nov 2019 11:55:00 +0000 Subject: [PATCH 040/133] Numeric ID checker now checks @0, don't ratelimit on checking --- synapse/handlers/register.py | 41 +++++++++++-------- .../storage/data_stores/main/registration.py | 8 ++-- 2 files changed, 29 insertions(+), 20 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index cff6b0d37..3c142a439 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -168,6 +168,7 @@ class RegistrationHandler(BaseHandler): Raises: RegistrationError if there was a problem registering. """ + yield self._check_registration_ratelimit(address) yield self.auth.check_auth_blocking(threepid=threepid) password_hash = None @@ -414,6 +415,30 @@ class RegistrationHandler(BaseHandler): ratelimit=False, ) + def _check_registration_ratelimit(self, address): + """A simple helper method to check whether the registration rate limit has been hit + for a given IP address + + Args: + address (str): the IP address used to perform the registration. + + Raises: + LimitExceededError: If the rate limit has been exceeded. + """ + time_now = self.clock.time() + + allowed, time_allowed = self.ratelimiter.can_do_action( + address, + time_now_s=time_now, + rate_hz=self.hs.config.rc_registration.per_second, + burst_count=self.hs.config.rc_registration.burst_count, + ) + + if not allowed: + raise LimitExceededError( + retry_after_ms=int(1000 * (time_allowed - time_now)) + ) + def register_with_store( self, user_id, @@ -446,22 +471,6 @@ class RegistrationHandler(BaseHandler): Returns: Deferred """ - # Don't rate limit for app services - if appservice_id is None and address is not None: - time_now = self.clock.time() - - allowed, time_allowed = self.ratelimiter.can_do_action( - address, - time_now_s=time_now, - rate_hz=self.hs.config.rc_registration.per_second, - burst_count=self.hs.config.rc_registration.burst_count, - ) - - if not allowed: - raise LimitExceededError( - retry_after_ms=int(1000 * (time_allowed - time_now)) - ) - if self.hs.config.worker_app: return self._register_client( user_id=user_id, diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py index f70d41eca..ee1b2b2bb 100644 --- a/synapse/storage/data_stores/main/registration.py +++ b/synapse/storage/data_stores/main/registration.py @@ -488,14 +488,14 @@ class RegistrationWorkerStore(SQLBaseStore): we can. Unfortunately, it's possible some of them are already taken by existing users, and there may be gaps in the already taken range. This function returns the start of the first allocatable gap. This is to - avoid the case of ID 10000000 being pre-allocated, so us wasting the - first (and shortest) many generated user IDs. + avoid the case of ID 1000 being pre-allocated and starting at 1001 while + 0-999 are available. """ def _find_next_generated_user_id(txn): - # We bound between '@1' and '@a' to avoid pulling the entire table + # We bound between '@0' and '@a' to avoid pulling the entire table # out. - txn.execute("SELECT name FROM users WHERE '@1' <= name AND name < '@a'") + txn.execute("SELECT name FROM users WHERE '@0' <= name AND name < '@a'") regex = re.compile(r"^@(\d+):") From 4059d61e2608ac823ef04fe37f23fcac2387a37b Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 6 Nov 2019 12:01:54 +0000 Subject: [PATCH 041/133] Don't forget to ratelimit calls outside of RegistrationHandler --- synapse/handlers/register.py | 4 ++-- synapse/replication/http/register.py | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 3c142a439..8be82e375 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -168,7 +168,7 @@ class RegistrationHandler(BaseHandler): Raises: RegistrationError if there was a problem registering. """ - yield self._check_registration_ratelimit(address) + yield self.check_registration_ratelimit(address) yield self.auth.check_auth_blocking(threepid=threepid) password_hash = None @@ -415,7 +415,7 @@ class RegistrationHandler(BaseHandler): ratelimit=False, ) - def _check_registration_ratelimit(self, address): + def check_registration_ratelimit(self, address): """A simple helper method to check whether the registration rate limit has been hit for a given IP address diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index 915cfb943..6f4bba7aa 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -75,6 +75,8 @@ class ReplicationRegisterServlet(ReplicationEndpoint): async def _handle_request(self, request, user_id): content = parse_json_object_from_request(request) + await self.registration_handler.check_registration_ratelimit(content["address"]) + await self.registration_handler.register_with_store( user_id=user_id, password_hash=content["password_hash"], From d2f6a67cb4c8f1ea1a4ae563dd53139838b019c7 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 6 Nov 2019 12:03:12 +0000 Subject: [PATCH 042/133] Add changelog --- changelog.d/6338.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6338.bugfix diff --git a/changelog.d/6338.bugfix b/changelog.d/6338.bugfix new file mode 100644 index 000000000..8e469f0fb --- /dev/null +++ b/changelog.d/6338.bugfix @@ -0,0 +1 @@ +Prevent the server taking a long time to start up when guest registration is enabled. \ No newline at end of file From 4257feb20f328c83ac7cb27113f779e844623e30 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Wed, 6 Nov 2019 13:35:56 +0000 Subject: [PATCH 043/133] build debs for eoan and bullseye --- scripts-dev/build_debian_packages | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts-dev/build_debian_packages b/scripts-dev/build_debian_packages index 93305ee9b..84eaec6a9 100755 --- a/scripts-dev/build_debian_packages +++ b/scripts-dev/build_debian_packages @@ -20,11 +20,13 @@ from concurrent.futures import ThreadPoolExecutor DISTS = ( "debian:stretch", "debian:buster", + "debian:bullseye", "debian:sid", "ubuntu:xenial", "ubuntu:bionic", "ubuntu:cosmic", "ubuntu:disco", + "ubuntu:eoan", ) DESC = '''\ From 1fe3cc2c9c59001a6d3f7b28f81bd6681c3c03ac Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 6 Nov 2019 14:54:24 +0000 Subject: [PATCH 044/133] Address review comments --- synapse/handlers/register.py | 24 ++++++++++++------------ synapse/replication/http/register.py | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 8be82e375..47b9ae8d7 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -24,7 +24,6 @@ from synapse.api.errors import ( AuthError, Codes, ConsentNotGivenError, - LimitExceededError, RegistrationError, SynapseError, ) @@ -218,8 +217,8 @@ class RegistrationHandler(BaseHandler): else: # autogen a sequential user ID - user = None - while not user: + # Fail after being unable to find a suitable ID a few times + for x in range(10): localpart = yield self._generate_user_id() user = UserID(localpart, self.hs.hostname) user_id = user.to_string() @@ -234,10 +233,12 @@ class RegistrationHandler(BaseHandler): create_profile_with_displayname=default_display_name, address=address, ) + + # Successfully registered + break except SynapseError: # if user id is taken, just generate another - user = None - user_id = None + pass if not self.hs.config.user_consent_at_registration: yield self._auto_join_rooms(user_id) @@ -420,25 +421,24 @@ class RegistrationHandler(BaseHandler): for a given IP address Args: - address (str): the IP address used to perform the registration. + address (str|None): the IP address used to perform the registration. If this is + None, no ratelimiting will be performed. Raises: LimitExceededError: If the rate limit has been exceeded. """ + if not address: + return + time_now = self.clock.time() - allowed, time_allowed = self.ratelimiter.can_do_action( + self.ratelimiter.ratelimit( address, time_now_s=time_now, rate_hz=self.hs.config.rc_registration.per_second, burst_count=self.hs.config.rc_registration.burst_count, ) - if not allowed: - raise LimitExceededError( - retry_after_ms=int(1000 * (time_allowed - time_now)) - ) - def register_with_store( self, user_id, diff --git a/synapse/replication/http/register.py b/synapse/replication/http/register.py index 6f4bba7aa..0c4aca129 100644 --- a/synapse/replication/http/register.py +++ b/synapse/replication/http/register.py @@ -75,7 +75,7 @@ class ReplicationRegisterServlet(ReplicationEndpoint): async def _handle_request(self, request, user_id): content = parse_json_object_from_request(request) - await self.registration_handler.check_registration_ratelimit(content["address"]) + self.registration_handler.check_registration_ratelimit(content["address"]) await self.registration_handler.register_with_store( user_id=user_id, From 55bc8d531e0dfe6623d98a9e81ee9a63d1c2799a Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 6 Nov 2019 16:52:54 +0000 Subject: [PATCH 045/133] raise exception after multiple failures --- synapse/handlers/register.py | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 47b9ae8d7..235f11c32 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -217,8 +217,13 @@ class RegistrationHandler(BaseHandler): else: # autogen a sequential user ID - # Fail after being unable to find a suitable ID a few times - for x in range(10): + fail_count = 0 + user = None + while not user: + # Fail after being unable to find a suitable ID a few times + if fail_count > 10: + raise SynapseError(500, "Unable to find a suitable guest user ID") + localpart = yield self._generate_user_id() user = UserID(localpart, self.hs.hostname) user_id = user.to_string() @@ -238,7 +243,9 @@ class RegistrationHandler(BaseHandler): break except SynapseError: # if user id is taken, just generate another - pass + user = None + user_id = None + fail_count += 1 if not self.hs.config.user_consent_at_registration: yield self._auto_join_rooms(user_id) From 71f3bd734fe9f8f903c5a496720e7dcf926f2f4a Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 6 Nov 2019 17:00:18 +0000 Subject: [PATCH 046/133] Use correct type annotation --- synapse/storage/data_stores/main/events.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index 3049a21dc..d69c59f5a 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -19,7 +19,7 @@ import itertools import logging from collections import Counter as c_counter, OrderedDict, namedtuple from functools import wraps -from typing import Set +from typing import Collection from six import iteritems, text_type from six.moves import range @@ -1736,7 +1736,7 @@ class EventsStore( return state_groups def purge_unreferenced_state_groups( - self, room_id: str, state_groups_to_delete: Set[int] + self, room_id: str, state_groups_to_delete: Collection[int] ) -> defer.Deferred: """Deletes no longer referenced state groups and de-deltas any state groups that reference them. From 5c3363233cca7044a333b7e19ba239eaf5587ff8 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 6 Nov 2019 17:02:05 +0000 Subject: [PATCH 047/133] Fix deleting state groups during room purge. And fix the tests to actually test that things got deleted. --- synapse/storage/data_stores/main/events.py | 27 +++++++++++----------- tests/rest/admin/test_admin.py | 4 +++- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index d69c59f5a..946823876 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -1633,7 +1633,20 @@ class EventsStore( return self.runInteraction("purge_room", self._purge_room_txn, room_id) def _purge_room_txn(self, txn, room_id): - # First delete tables which lack an index on room_id but have one on event_id + # First we fetch all the state groups that should be deleted, before + # we delete that information. + txn.execute( + """ + SELECT DISTINCT state_group FROM events + INNER JOIN event_to_state_groups USING(event_id) + WHERE events.room_id = ? + """, + (room_id,), + ) + + state_groups = [row[0] for row in txn] + + # Now we delete tables which lack an index on room_id but have one on event_id for table in ( "event_auth", "event_edges", @@ -1717,18 +1730,6 @@ class EventsStore( # index on them. In any case we should be clearing out 'stream' tables # periodically anyway (#5888) - # Now we fetch all the state groups that should be deleted. - txn.execute( - """ - SELECT DISTINCT state_group FROM events - INNER JOIN event_to_state_groups USING(event_id) - WHERE events.room_id = ? - """, - (room_id,), - ) - - state_groups = [row[0] for row in txn] - # TODO: we could probably usefully do a bunch of cache invalidation here logger.info("[purge] done") diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index 8e1ca8b73..d9f1b95cb 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -628,10 +628,12 @@ class PurgeRoomTestCase(unittest.HomeserverTestCase): "local_invites", "room_account_data", "room_tags", + "state_groups", + "state_groups_state", ): count = self.get_success( self.store._simple_select_one_onecol( - table="events", + table=table, keyvalues={"room_id": room_id}, retcol="COUNT(*)", desc="test_purge_room", From f03c9d34442368c8f2c26d2ac16b770bc451c76d Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 6 Nov 2019 15:47:40 +0000 Subject: [PATCH 048/133] Don't apply retention policy based filtering on state events As per MSC1763, 'Retention is only considered for non-state events.', so don't filter out state events based on the room's retention policy. --- synapse/visibility.py | 15 +++++++++------ tests/rest/client/test_retention.py | 10 ++++++++++ 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/synapse/visibility.py b/synapse/visibility.py index 4498c156b..4d4141dac 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -111,14 +111,17 @@ def filter_events_for_client( if not event.is_state() and event.sender in ignore_list: return None - retention_policy = retention_policies[event.room_id] - max_lifetime = retention_policy.get("max_lifetime") + # Don't try to apply the room's retention policy if the event is a state event, as + # MSC1763 states that retention is only considered for non-state events. + if not event.is_state(): + retention_policy = retention_policies[event.room_id] + max_lifetime = retention_policy.get("max_lifetime") - if max_lifetime is not None: - oldest_allowed_ts = storage.main.clock.time_msec() - max_lifetime + if max_lifetime is not None: + oldest_allowed_ts = storage.main.clock.time_msec() - max_lifetime - if event.origin_server_ts < oldest_allowed_ts: - return None + if event.origin_server_ts < oldest_allowed_ts: + return None if event.event_id in always_include_ids: return event diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index 41ea9db68..7b6f25a83 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -164,6 +164,12 @@ class RetentionTestCase(unittest.HomeserverTestCase): self.assertEqual(filtered_events[0].event_id, valid_event_id, filtered_events) def _test_retention_event_purged(self, room_id, increment): + # Get the create event to, later, check that we can still access it. + message_handler = self.hs.get_message_handler() + create_event = self.get_success( + message_handler.get_room_data(self.user_id, room_id, EventTypes.Create) + ) + # Send a first event to the room. This is the event we'll want to be purged at the # end of the test. resp = self.helper.send( @@ -202,6 +208,10 @@ class RetentionTestCase(unittest.HomeserverTestCase): valid_event = self.get_event(room_id, valid_event_id) self.assertEqual(valid_event.get("content", {}).get("body"), "2", valid_event) + # Check that we can still access state events that were sent before the event that + # has been purged. + self.get_event(room_id, create_event.event_id) + def get_event(self, room_id, event_id, expected_code=200): url = "/_matrix/client/r0/rooms/%s/event/%s" % (room_id, event_id) From affcc2cc3655531351048a4ad8ac67e22d1e398d Mon Sep 17 00:00:00 2001 From: V02460 Date: Thu, 7 Nov 2019 10:43:51 +0100 Subject: [PATCH 049/133] Fix LruCache callback deduplication (#6213) --- changelog.d/6213.bugfix | 1 + synapse/util/caches/descriptors.py | 48 +++++++++++++++++++++++------- 2 files changed, 38 insertions(+), 11 deletions(-) create mode 100644 changelog.d/6213.bugfix diff --git a/changelog.d/6213.bugfix b/changelog.d/6213.bugfix new file mode 100644 index 000000000..072264fba --- /dev/null +++ b/changelog.d/6213.bugfix @@ -0,0 +1 @@ +Fix LruCache callback deduplication. diff --git a/synapse/util/caches/descriptors.py b/synapse/util/caches/descriptors.py index 0e8da27f5..84f5ae22c 100644 --- a/synapse/util/caches/descriptors.py +++ b/synapse/util/caches/descriptors.py @@ -17,8 +17,8 @@ import functools import inspect import logging import threading -from collections import namedtuple -from typing import Any, cast +from typing import Any, Tuple, Union, cast +from weakref import WeakValueDictionary from six import itervalues @@ -38,6 +38,8 @@ from . import register_cache logger = logging.getLogger(__name__) +CacheKey = Union[Tuple, Any] + class _CachedFunction(Protocol): invalidate = None # type: Any @@ -430,7 +432,7 @@ class CacheDescriptor(_CacheDescriptorBase): # Add our own `cache_context` to argument list if the wrapped function # has asked for one if self.add_cache_context: - kwargs["cache_context"] = _CacheContext(cache, cache_key) + kwargs["cache_context"] = _CacheContext.get_instance(cache, cache_key) try: cached_result_d = cache.get(cache_key, callback=invalidate_callback) @@ -624,14 +626,38 @@ class CacheListDescriptor(_CacheDescriptorBase): return wrapped -class _CacheContext(namedtuple("_CacheContext", ("cache", "key"))): - # We rely on _CacheContext implementing __eq__ and __hash__ sensibly, - # which namedtuple does for us (i.e. two _CacheContext are the same if - # their caches and keys match). This is important in particular to - # dedupe when we add callbacks to lru cache nodes, otherwise the number - # of callbacks would grow. - def invalidate(self): - self.cache.invalidate(self.key) +class _CacheContext: + """Holds cache information from the cached function higher in the calling order. + + Can be used to invalidate the higher level cache entry if something changes + on a lower level. + """ + + _cache_context_objects = ( + WeakValueDictionary() + ) # type: WeakValueDictionary[Tuple[Cache, CacheKey], _CacheContext] + + def __init__(self, cache, cache_key): # type: (Cache, CacheKey) -> None + self._cache = cache + self._cache_key = cache_key + + def invalidate(self): # type: () -> None + """Invalidates the cache entry referred to by the context.""" + self._cache.invalidate(self._cache_key) + + @classmethod + def get_instance(cls, cache, cache_key): # type: (Cache, CacheKey) -> _CacheContext + """Returns an instance constructed with the given arguments. + + A new instance is only created if none already exists. + """ + + # We make sure there are no identical _CacheContext instances. This is + # important in particular to dedupe when we add callbacks to lru cache + # nodes, otherwise the number of callbacks would grow. + return cls._cache_context_objects.setdefault( + (cache, cache_key), cls(cache, cache_key) + ) def cached( From b03cddaeb99437311094fbe617ae2a6bde4c5615 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 7 Nov 2019 09:46:25 +0000 Subject: [PATCH 050/133] tweak changelog --- changelog.d/6213.bugfix | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog.d/6213.bugfix b/changelog.d/6213.bugfix index 072264fba..2bb2d0885 100644 --- a/changelog.d/6213.bugfix +++ b/changelog.d/6213.bugfix @@ -1 +1 @@ -Fix LruCache callback deduplication. +Fix LruCache callback deduplication for Python 3.8. Contributed by @V02460. From 3f9b61ff9504dd88cac17fb3cb097e319babd2a3 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 7 Nov 2019 11:49:37 +0000 Subject: [PATCH 051/133] Fix the SQL SELECT query in _paginate_room_events_txn Doing a SELECT DISTINCT when paginating is quite expensive, because it requires the engine to do sorting on the entire events table. However, we only need to run it if we're filtering on 2+ labels, so this PR is changing the request so that DISTINCT is only used then. --- synapse/storage/data_stores/main/stream.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py index 616ef91d4..ef0b1426d 100644 --- a/synapse/storage/data_stores/main/stream.py +++ b/synapse/storage/data_stores/main/stream.py @@ -871,14 +871,25 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): args.append(int(limit)) + # Using DISTINCT in this SELECT query is quite expensive, because it requires the + # engine to sort on the entire (not limited) result set, i.e. the entire events + # table. We only need to use it when we're filtering on more than two labels, + # because that's the only scenario in which we can possibly to get multiple times + # the same event ID in the results. + if event_filter.labels and len(event_filter.labels) > 1: + select_keywords = "SELECT DISTINCT" + + else: + select_keywords = "SELECT" + sql = ( - "SELECT DISTINCT event_id, topological_ordering, stream_ordering" + "%(select_keywords)s event_id, topological_ordering, stream_ordering" " FROM events" " LEFT JOIN event_labels USING (event_id, room_id, topological_ordering)" " WHERE outlier = ? AND room_id = ? AND %(bounds)s" " ORDER BY topological_ordering %(order)s," " stream_ordering %(order)s LIMIT ?" - ) % {"bounds": bounds, "order": order} + ) % {"select_keywords": select_keywords, "bounds": bounds, "order": order} txn.execute(sql, args) From 4f519d556e32ac29a960977df4ed14c42290af5e Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 7 Nov 2019 11:51:54 +0000 Subject: [PATCH 052/133] Changelog --- changelog.d/6340.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6340.feature diff --git a/changelog.d/6340.feature b/changelog.d/6340.feature new file mode 100644 index 000000000..78a187a1d --- /dev/null +++ b/changelog.d/6340.feature @@ -0,0 +1 @@ +Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)). From 15a1a02e70ca18d8af9a4faaf1bb40427ea6a643 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 7 Nov 2019 12:04:37 +0000 Subject: [PATCH 053/133] Handle lack of filter --- synapse/storage/data_stores/main/stream.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py index ef0b1426d..bb70a0f38 100644 --- a/synapse/storage/data_stores/main/stream.py +++ b/synapse/storage/data_stores/main/stream.py @@ -876,11 +876,9 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # table. We only need to use it when we're filtering on more than two labels, # because that's the only scenario in which we can possibly to get multiple times # the same event ID in the results. - if event_filter.labels and len(event_filter.labels) > 1: - select_keywords = "SELECT DISTINCT" - - else: - select_keywords = "SELECT" + select_keywords = "SELECT" + if event_filter and event_filter.labels and len(event_filter.labels) > 1: + select_keywords += "DISTINCT" sql = ( "%(select_keywords)s event_id, topological_ordering, stream_ordering" From 70804392ae42949109e55e4e51566e2545e1df63 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 7 Nov 2019 14:55:10 +0000 Subject: [PATCH 054/133] Only join on event_labels if we're filtering on labels --- synapse/storage/data_stores/main/stream.py | 33 ++++++++++++++++------ 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py index bb70a0f38..064f602a6 100644 --- a/synapse/storage/data_stores/main/stream.py +++ b/synapse/storage/data_stores/main/stream.py @@ -871,23 +871,38 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): args.append(int(limit)) - # Using DISTINCT in this SELECT query is quite expensive, because it requires the - # engine to sort on the entire (not limited) result set, i.e. the entire events - # table. We only need to use it when we're filtering on more than two labels, - # because that's the only scenario in which we can possibly to get multiple times - # the same event ID in the results. select_keywords = "SELECT" - if event_filter and event_filter.labels and len(event_filter.labels) > 1: - select_keywords += "DISTINCT" + join_clause = "" + if event_filter and event_filter.labels: + # If we're not filtering on a label, then joining on event_labels will + # return as many row for a single event as the number of labels it has. To + # avoid this, only join if we're filtering on at least one label. + join_clause = ( + "LEFT JOIN event_labels" + " USING (event_id, room_id, topological_ordering)" + ) + if len(event_filter.labels) > 1: + # Using DISTINCT in this SELECT query is quite expensive, because it + # requires the engine to sort on the entire (not limited) result set, + # i.e. the entire events table. We only need to use it when we're + # filtering on more than two labels, because that's the only scenario + # in which we can possibly to get multiple times the same event ID in + # the results. + select_keywords += "DISTINCT" sql = ( "%(select_keywords)s event_id, topological_ordering, stream_ordering" " FROM events" - " LEFT JOIN event_labels USING (event_id, room_id, topological_ordering)" + " %(join_clause)s" " WHERE outlier = ? AND room_id = ? AND %(bounds)s" " ORDER BY topological_ordering %(order)s," " stream_ordering %(order)s LIMIT ?" - ) % {"select_keywords": select_keywords, "bounds": bounds, "order": order} + ) % { + "select_keywords": select_keywords, + "join_clause": join_clause, + "bounds": bounds, + "order": order + } txn.execute(sql, args) From b9cba079628db11951fc5ed30b03e8825eb954d7 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 7 Nov 2019 14:57:15 +0000 Subject: [PATCH 055/133] Lint --- synapse/storage/data_stores/main/stream.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py index 064f602a6..90bb2d8e8 100644 --- a/synapse/storage/data_stores/main/stream.py +++ b/synapse/storage/data_stores/main/stream.py @@ -876,7 +876,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): if event_filter and event_filter.labels: # If we're not filtering on a label, then joining on event_labels will # return as many row for a single event as the number of labels it has. To - # avoid this, only join if we're filtering on at least one label. + # avoid this, only join if we're filtering on at least one label. join_clause = ( "LEFT JOIN event_labels" " USING (event_id, room_id, topological_ordering)" @@ -901,7 +901,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): "select_keywords": select_keywords, "join_clause": join_clause, "bounds": bounds, - "order": order + "order": order, } txn.execute(sql, args) From bb78276bdc77c0462696529c27fd8a6062b37afc Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 7 Nov 2019 15:25:27 +0000 Subject: [PATCH 056/133] Incorporate review --- .../data_stores/main/events_bg_updates.py | 23 ++++++++----------- 1 file changed, 9 insertions(+), 14 deletions(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index 309bfcafe..9714662c1 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -525,43 +525,38 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): (last_event_id, batch_size), ) - rows = self.cursor_to_dict(txn) - if not rows: - return True, 0 - - for row in rows: - event_id = row["event_id"] - event_json = json.loads(row["json"]) - + nbrows = 0 + for (event_id, event_json) in txn: self._simple_insert_many_txn( txn=txn, table="event_labels", values=[ { "event_id": event_id, - "label": label, + "label": str(label), "room_id": event_json["room_id"], "topological_ordering": event_json["depth"], } for label in event_json["content"].get( EventContentFields.LABELS, [] ) + if label is not None ], ) + nbrows += 1 + self._background_update_progress_txn( txn, "event_store_labels", {"last_event_id": event_id} ) - # We want to return true (to end the background update) only when - # the query returned with less rows than we asked for. - return len(rows) != batch_size, len(rows) + return nbrows - end, num_rows = yield self.runInteraction( + num_rows = yield self.runInteraction( desc="event_store_labels", func=_event_store_labels_txn ) - if end: + if not num_rows: yield self._end_background_update("event_store_labels") return num_rows From ec2cb9f29829cf81f843124ef4289bc0eb88413e Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 7 Nov 2019 16:18:40 +0000 Subject: [PATCH 057/133] Initialise value before looping --- .../storage/data_stores/main/events_bg_updates.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index 9714662c1..bee8c9cfa 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -526,28 +526,32 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): ) nbrows = 0 - for (event_id, event_json) in txn: + last_row_event_id = "" + for (event_id, event_json_raw) in txn: + event_json = json.loads(event_json_raw) + self._simple_insert_many_txn( txn=txn, table="event_labels", values=[ { "event_id": event_id, - "label": str(label), + "label": label, "room_id": event_json["room_id"], "topological_ordering": event_json["depth"], } for label in event_json["content"].get( EventContentFields.LABELS, [] ) - if label is not None + if label is not None and isinstance(label, str) ], ) nbrows += 1 + last_row_event_id = event_id self._background_update_progress_txn( - txn, "event_store_labels", {"last_event_id": event_id} + txn, "event_store_labels", {"last_event_id": last_row_event_id} ) return nbrows From 1186612d6cd728e6b7ed7806579db3cea7410b54 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 7 Nov 2019 16:46:41 +0000 Subject: [PATCH 058/133] Back to using cursor_to_dict --- .../data_stores/main/events_bg_updates.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index bee8c9cfa..b858e3ac1 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -525,10 +525,13 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): (last_event_id, batch_size), ) - nbrows = 0 - last_row_event_id = "" - for (event_id, event_json_raw) in txn: - event_json = json.loads(event_json_raw) + rows = self.cursor_to_dict(txn) + if not len(rows): + return 0 + + for row in rows: + event_id = row["event_id"] + event_json = json.loads(row["event_json"]) self._simple_insert_many_txn( txn=txn, @@ -547,14 +550,11 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): ], ) - nbrows += 1 - last_row_event_id = event_id - self._background_update_progress_txn( - txn, "event_store_labels", {"last_event_id": last_row_event_id} + txn, "event_store_labels", {"last_event_id": event_id} ) - return nbrows + return len(rows) num_rows = yield self.runInteraction( desc="event_store_labels", func=_event_store_labels_txn From cd312012676d39da8d0383cba167d1211378fece Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 7 Nov 2019 16:47:15 +0000 Subject: [PATCH 059/133] Revert "Back to using cursor_to_dict" This reverts commit 1186612d6cd728e6b7ed7806579db3cea7410b54. --- .../data_stores/main/events_bg_updates.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index b858e3ac1..bee8c9cfa 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -525,13 +525,10 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): (last_event_id, batch_size), ) - rows = self.cursor_to_dict(txn) - if not len(rows): - return 0 - - for row in rows: - event_id = row["event_id"] - event_json = json.loads(row["event_json"]) + nbrows = 0 + last_row_event_id = "" + for (event_id, event_json_raw) in txn: + event_json = json.loads(event_json_raw) self._simple_insert_many_txn( txn=txn, @@ -550,11 +547,14 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): ], ) + nbrows += 1 + last_row_event_id = event_id + self._background_update_progress_txn( - txn, "event_store_labels", {"last_event_id": event_id} + txn, "event_store_labels", {"last_event_id": last_row_event_id} ) - return len(rows) + return nbrows num_rows = yield self.runInteraction( desc="event_store_labels", func=_event_store_labels_txn From c9b27d00445f84ebfbd4115e3177a10513aadcfc Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 7 Nov 2019 16:47:45 +0000 Subject: [PATCH 060/133] Copy results --- synapse/storage/data_stores/main/events_bg_updates.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index bee8c9cfa..a4cb64f47 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -525,9 +525,11 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): (last_event_id, batch_size), ) + results = list(txn) + nbrows = 0 last_row_event_id = "" - for (event_id, event_json_raw) in txn: + for (event_id, event_json_raw) in results: event_json = json.loads(event_json_raw) self._simple_insert_many_txn( From 6d360f099f3ea09c90795e5a6c4fc07dbffd874e Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 7 Nov 2019 17:01:43 +0000 Subject: [PATCH 061/133] Update synapse/storage/data_stores/main/events_bg_updates.py Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- synapse/storage/data_stores/main/events_bg_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index a4cb64f47..18862adb9 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -545,7 +545,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): for label in event_json["content"].get( EventContentFields.LABELS, [] ) - if label is not None and isinstance(label, str) + if isinstance(label, str) ], ) From dad8d68c9938e7d09eefd6aa1b633494ab89f719 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 7 Nov 2019 17:01:53 +0000 Subject: [PATCH 062/133] Update synapse/storage/data_stores/main/events_bg_updates.py Co-Authored-By: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> --- synapse/storage/data_stores/main/events_bg_updates.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index 18862adb9..0ed59ef48 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -511,7 +511,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): @defer.inlineCallbacks def _event_store_labels(self, progress, batch_size): - """Stores labels for events.""" + """Background update handler which will store labels for existing events.""" last_event_id = progress.get("last_event_id", "") def _event_store_labels_txn(txn): From c5abb67e432b9279c838f7b9318144ca8f2b7c0d Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 7 Nov 2019 17:14:13 +0000 Subject: [PATCH 063/133] Python 3.8 for tox (#6341) ... and update INSTALL.md to include py3.8. We'll also have to update the buildkite pipeline to run it --- INSTALL.md | 2 +- changelog.d/6341.misc | 1 + tox.ini | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6341.misc diff --git a/INSTALL.md b/INSTALL.md index e7b429c05..29e0abafd 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -36,7 +36,7 @@ that your email address is probably `user@example.com` rather than System requirements: - POSIX-compliant system (tested on Linux & OS X) -- Python 3.5, 3.6, or 3.7 +- Python 3.5, 3.6, 3.7 or 3.8. - At least 1GB of free RAM if you want to join large public rooms like #matrix:matrix.org Synapse is written in Python but some of the libraries it uses are written in diff --git a/changelog.d/6341.misc b/changelog.d/6341.misc new file mode 100644 index 000000000..359b9bf1d --- /dev/null +++ b/changelog.d/6341.misc @@ -0,0 +1 @@ +Add continuous integration for python 3.8. \ No newline at end of file diff --git a/tox.ini b/tox.ini index afe9bc909..62b350ea6 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = packaging, py35, py36, py37, check_codestyle, check_isort +envlist = packaging, py35, py36, py37, py38, check_codestyle, check_isort [base] basepython = python3.7 From e4ec82ce0fdd17f912fad76defd53ec99f782528 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 8 Nov 2019 09:50:48 +0000 Subject: [PATCH 064/133] Move type annotation into docstring --- synapse/storage/data_stores/main/events.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index 946823876..878f7568a 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -19,7 +19,6 @@ import itertools import logging from collections import Counter as c_counter, OrderedDict, namedtuple from functools import wraps -from typing import Collection from six import iteritems, text_type from six.moves import range @@ -1737,7 +1736,7 @@ class EventsStore( return state_groups def purge_unreferenced_state_groups( - self, room_id: str, state_groups_to_delete: Collection[int] + self, room_id: str, state_groups_to_delete ) -> defer.Deferred: """Deletes no longer referenced state groups and de-deltas any state groups that reference them. @@ -1745,7 +1744,8 @@ class EventsStore( Args: room_id: The room the state groups belong to (must all be in the same room). - state_groups_to_delete: Set of all state groups to delete. + state_groups_to_delete (Collection[int]): Set of all state groups + to delete. """ return self.runInteraction( From b16fa433860824560c64acf594aa6c891e5f1a0a Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Fri, 8 Nov 2019 10:34:09 +0000 Subject: [PATCH 065/133] Incorporate review --- synapse/storage/data_stores/main/stream.py | 24 +++++++++++----------- 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py index 90bb2d8e8..8780fdd98 100644 --- a/synapse/storage/data_stores/main/stream.py +++ b/synapse/storage/data_stores/main/stream.py @@ -877,10 +877,10 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # If we're not filtering on a label, then joining on event_labels will # return as many row for a single event as the number of labels it has. To # avoid this, only join if we're filtering on at least one label. - join_clause = ( - "LEFT JOIN event_labels" - " USING (event_id, room_id, topological_ordering)" - ) + join_clause = """ + LEFT JOIN event_labels + USING (event_id, room_id, topological_ordering) + """ if len(event_filter.labels) > 1: # Using DISTINCT in this SELECT query is quite expensive, because it # requires the engine to sort on the entire (not limited) result set, @@ -890,14 +890,14 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): # the results. select_keywords += "DISTINCT" - sql = ( - "%(select_keywords)s event_id, topological_ordering, stream_ordering" - " FROM events" - " %(join_clause)s" - " WHERE outlier = ? AND room_id = ? AND %(bounds)s" - " ORDER BY topological_ordering %(order)s," - " stream_ordering %(order)s LIMIT ?" - ) % { + sql = """ + %(select_keywords)s event_id, topological_ordering, stream_ordering + FROM events + %(join_clause)s + WHERE outlier = ? AND room_id = ? AND %(bounds)s + ORDER BY topological_ordering %(order)s, + stream_ordering %(order)s LIMIT ? + """ % { "select_keywords": select_keywords, "join_clause": join_clause, "bounds": bounds, From 772d414975608c03d5690e2d8f65c7f382403a99 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 11 Oct 2019 16:05:21 +0100 Subject: [PATCH 066/133] Simplify _update_auth_events_and_context_for_auth move event_key calculation into _update_context_for_auth_events, since it's only used there. --- synapse/handlers/federation.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 05dd8d267..ab152e8dc 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2098,11 +2098,6 @@ class FederationHandler(BaseHandler): """ event_auth_events = set(event.auth_event_ids()) - if event.is_state(): - event_key = (event.type, event.state_key) - else: - event_key = None - # if the event's auth_events refers to events which are not in our # calculated auth_events, we need to fetch those events from somewhere. # @@ -2231,13 +2226,13 @@ class FederationHandler(BaseHandler): auth_events.update(new_state) context = yield self._update_context_for_auth_events( - event, context, auth_events, event_key + event, context, auth_events ) return context @defer.inlineCallbacks - def _update_context_for_auth_events(self, event, context, auth_events, event_key): + def _update_context_for_auth_events(self, event, context, auth_events): """Update the state_ids in an event context after auth event resolution, storing the changes as a new state group. @@ -2246,18 +2241,21 @@ class FederationHandler(BaseHandler): context (synapse.events.snapshot.EventContext): initial event context - auth_events (dict[(str, str)->str]): Events to update in the event + auth_events (dict[(str, str)->EventBase]): Events to update in the event context. - event_key ((str, str)): (type, state_key) for the current event. - this will not be included in the current_state in the context. - Returns: Deferred[EventContext]: new event context """ + # exclude the state key of the new event from the current_state in the context. + if event.is_state(): + event_key = (event.type, event.state_key) + else: + event_key = None state_updates = { k: a.event_id for k, a in iteritems(auth_events) if k != event_key } + current_state_ids = yield context.get_current_state_ids(self.store) current_state_ids = dict(current_state_ids) From f8407975e7ad080d04a028771ae5a84590a19da1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 8 Nov 2019 12:18:20 +0000 Subject: [PATCH 067/133] Update some docstrings and comments --- synapse/handlers/federation.py | 39 +++++++++++++++++++++++++++------- 1 file changed, 31 insertions(+), 8 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index ab152e8dc..4bc4d57ef 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2040,8 +2040,10 @@ class FederationHandler(BaseHandler): auth_events (dict[(str, str)->synapse.events.EventBase]): Map from (event_type, state_key) to event - What we expect the event's auth_events to be, based on the event's - position in the dag. I think? maybe?? + Normally, our calculated auth_events based on the state of the room + at the event's position in the DAG, though occasionally (eg if the + event is an outlier), may be the auth events claimed by the remote + server. Also NB that this function adds entries to it. Returns: @@ -2091,25 +2093,35 @@ class FederationHandler(BaseHandler): origin (str): event (synapse.events.EventBase): context (synapse.events.snapshot.EventContext): + auth_events (dict[(str, str)->synapse.events.EventBase]): + Map from (event_type, state_key) to event + + Normally, our calculated auth_events based on the state of the room + at the event's position in the DAG, though occasionally (eg if the + event is an outlier), may be the auth events claimed by the remote + server. + + Also NB that this function adds entries to it. Returns: defer.Deferred[EventContext]: updated context """ event_auth_events = set(event.auth_event_ids()) - # if the event's auth_events refers to events which are not in our - # calculated auth_events, we need to fetch those events from somewhere. - # - # we start by fetching them from the store, and then try calling /event_auth/. + # missing_auth is the set of the event's auth_events which we don't yet have + # in auth_events. missing_auth = event_auth_events.difference( e.event_id for e in auth_events.values() ) + # if we have missing events, we need to fetch those events from somewhere. + # + # we start by checking if they are in the store, and then try calling /event_auth/. if missing_auth: # TODO: can we use store.have_seen_events here instead? have_events = yield self.store.get_seen_events_with_rejections(missing_auth) - logger.debug("Got events %s from store", have_events) + logger.debug("Found events %s in the store", have_events) missing_auth.difference_update(have_events.keys()) else: have_events = {} @@ -2164,15 +2176,23 @@ class FederationHandler(BaseHandler): event.auth_event_ids() ) except Exception: - # FIXME: logger.exception("Failed to get auth chain") if event.internal_metadata.is_outlier(): + # XXX: given that, for an outlier, we'll be working with the + # event's *claimed* auth events rather than those we calculated: + # (a) is there any point in this test, since different_auth below will + # obviously be empty + # (b) alternatively, why don't we do it earlier? logger.info("Skipping auth_event fetch for outlier") return context # FIXME: Assumes we have and stored all the state for all the # prev_events + # + # FIXME: what does the fixme above mean? where do prev_events come into + # it, why do we care about the state for those events, and what does "have and + # stored" mean? Seems erik wrote it in c1d860870b different_auth = event_auth_events.difference( e.event_id for e in auth_events.values() ) @@ -2186,6 +2206,9 @@ class FederationHandler(BaseHandler): different_auth, ) + # now we state-resolve between our own idea of the auth events, and the remote's + # idea of them. + room_version = yield self.store.get_room_version(event.room_id) different_events = yield make_deferred_yieldable( From f41027f74678f35ad9e9eb2531c416dd58a65127 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 8 Nov 2019 12:21:28 +0000 Subject: [PATCH 068/133] Use get_events_as_list rather than lots of calls to get_event It's more efficient and clearer. --- synapse/handlers/federation.py | 24 ++++++++---------------- 1 file changed, 8 insertions(+), 16 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 4bc4d57ef..3d4197ed6 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2210,26 +2210,18 @@ class FederationHandler(BaseHandler): # idea of them. room_version = yield self.store.get_room_version(event.room_id) + different_event_ids = [ + d for d in different_auth if d in have_events and not have_events[d] + ] - different_events = yield make_deferred_yieldable( - defer.gatherResults( - [ - run_in_background( - self.store.get_event, d, allow_none=True, allow_rejected=False - ) - for d in different_auth - if d in have_events and not have_events[d] - ], - consumeErrors=True, - ) - ).addErrback(unwrapFirstError) + if different_event_ids: + # XXX: currently this checks for redactions but I'm not convinced that is + # necessary? + different_events = yield self.store.get_events_as_list(different_event_ids) - if different_events: local_view = dict(auth_events) remote_view = dict(auth_events) - remote_view.update( - {(d.type, d.state_key): d for d in different_events if d} - ) + remote_view.update({(d.type, d.state_key): d for d in different_events}) new_state = yield self.state_handler.resolve_events( room_version, From c4bdf2d7855dcddb7e294e9dba458884db2be7e0 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Fri, 8 Nov 2019 11:42:55 +0000 Subject: [PATCH 069/133] Remove content from being sent for account data rdata stream --- synapse/replication/tcp/streams/_base.py | 6 +++--- synapse/storage/data_stores/main/account_data.py | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index 9e45429d4..dd8773384 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -89,7 +89,7 @@ TagAccountDataStreamRow = namedtuple( ) AccountDataStreamRow = namedtuple( "AccountDataStream", - ("user_id", "room_id", "data_type", "data"), # str # str # str # dict + ("user_id", "room_id", "data_type"), # str # str # str ) GroupsStreamRow = namedtuple( "GroupsStreamRow", @@ -421,8 +421,8 @@ class AccountDataStream(Stream): results = list(room_results) results.extend( - (stream_id, user_id, None, account_data_type, content) - for stream_id, user_id, account_data_type, content in global_results + (stream_id, user_id, None, account_data_type) + for stream_id, user_id, account_data_type in global_results ) return results diff --git a/synapse/storage/data_stores/main/account_data.py b/synapse/storage/data_stores/main/account_data.py index 6afbfc0d7..22093484e 100644 --- a/synapse/storage/data_stores/main/account_data.py +++ b/synapse/storage/data_stores/main/account_data.py @@ -184,14 +184,14 @@ class AccountDataWorkerStore(SQLBaseStore): current_id(int): The position to fetch up to. Returns: A deferred pair of lists of tuples of stream_id int, user_id string, - room_id string, type string, and content string. + room_id string, and type string. """ if last_room_id == current_id and last_global_id == current_id: return defer.succeed(([], [])) def get_updated_account_data_txn(txn): sql = ( - "SELECT stream_id, user_id, account_data_type, content" + "SELECT stream_id, user_id, account_data_type" " FROM account_data WHERE ? < stream_id AND stream_id <= ?" " ORDER BY stream_id ASC LIMIT ?" ) @@ -199,7 +199,7 @@ class AccountDataWorkerStore(SQLBaseStore): global_results = txn.fetchall() sql = ( - "SELECT stream_id, user_id, room_id, account_data_type, content" + "SELECT stream_id, user_id, room_id, account_data_type" " FROM room_account_data WHERE ? < stream_id AND stream_id <= ?" " ORDER BY stream_id ASC LIMIT ?" ) From 318dd21b4767039014f73917a4dddb9fc885bc56 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Fri, 8 Nov 2019 15:45:08 +0000 Subject: [PATCH 070/133] Add changelog --- changelog.d/6333.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6333.bugfix diff --git a/changelog.d/6333.bugfix b/changelog.d/6333.bugfix new file mode 100644 index 000000000..a25d6ef3c --- /dev/null +++ b/changelog.d/6333.bugfix @@ -0,0 +1 @@ +Prevent account data syncs getting lost across TCP replication. \ No newline at end of file From cd96b4586f8dfa8a6857e1bee7de2bd9660238d5 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Fri, 8 Nov 2019 15:45:45 +0000 Subject: [PATCH 071/133] lint --- synapse/replication/tcp/streams/_base.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/synapse/replication/tcp/streams/_base.py b/synapse/replication/tcp/streams/_base.py index dd8773384..8512923ea 100644 --- a/synapse/replication/tcp/streams/_base.py +++ b/synapse/replication/tcp/streams/_base.py @@ -88,8 +88,7 @@ TagAccountDataStreamRow = namedtuple( "TagAccountDataStreamRow", ("user_id", "room_id", "data") # str # str # dict ) AccountDataStreamRow = namedtuple( - "AccountDataStream", - ("user_id", "room_id", "data_type"), # str # str # str + "AccountDataStream", ("user_id", "room_id", "data_type") # str # str # str ) GroupsStreamRow = namedtuple( "GroupsStreamRow", From 20d687516fdc34a4be19dcbbadd8a9a9726203e4 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Fri, 8 Nov 2019 16:17:02 +0000 Subject: [PATCH 072/133] newsfile --- changelog.d/6343.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6343.misc diff --git a/changelog.d/6343.misc b/changelog.d/6343.misc new file mode 100644 index 000000000..d9a44389b --- /dev/null +++ b/changelog.d/6343.misc @@ -0,0 +1 @@ +Refactor some code in the event authentication path for clarity. From bc29a19731c518dbd70f3adefc66061fb4629cee Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 12 Nov 2019 13:08:12 +0000 Subject: [PATCH 073/133] Replace instance variations of homeserver with correct case/spacing --- synapse/__init__.py | 2 +- synapse/_scripts/register_new_matrix_user.py | 6 +++--- synapse/api/errors.py | 2 +- synapse/config/captcha.py | 4 ++-- synapse/config/emailconfig.py | 2 +- synapse/config/server.py | 2 +- synapse/federation/federation_client.py | 6 +++--- synapse/federation/transport/__init__.py | 4 ++-- synapse/federation/transport/client.py | 6 +++--- synapse/federation/transport/server.py | 2 +- synapse/handlers/auth.py | 4 ++-- synapse/handlers/directory.py | 2 +- synapse/handlers/federation.py | 4 ++-- synapse/handlers/profile.py | 6 +++--- synapse/handlers/register.py | 2 +- synapse/handlers/typing.py | 4 ++-- synapse/http/matrixfederationclient.py | 2 +- synapse/util/httpresourcetree.py | 2 +- 18 files changed, 31 insertions(+), 31 deletions(-) diff --git a/synapse/__init__.py b/synapse/__init__.py index ec16f54a4..1c27d6800 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -14,7 +14,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" This is a reference implementation of a Matrix home server. +""" This is a reference implementation of a Matrix homeserver. """ import os diff --git a/synapse/_scripts/register_new_matrix_user.py b/synapse/_scripts/register_new_matrix_user.py index bdcd915bb..d528450c7 100644 --- a/synapse/_scripts/register_new_matrix_user.py +++ b/synapse/_scripts/register_new_matrix_user.py @@ -144,8 +144,8 @@ def main(): logging.captureWarnings(True) parser = argparse.ArgumentParser( - description="Used to register new users with a given home server when" - " registration has been disabled. The home server must be" + description="Used to register new users with a given homeserver when" + " registration has been disabled. The homeserver must be" " configured with the 'registration_shared_secret' option" " set." ) @@ -202,7 +202,7 @@ def main(): "server_url", default="https://localhost:8448", nargs="?", - help="URL to use to talk to the home server. Defaults to " + help="URL to use to talk to the homeserver. Defaults to " " 'https://localhost:8448'.", ) diff --git a/synapse/api/errors.py b/synapse/api/errors.py index cca92c34b..5853a54c9 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -457,7 +457,7 @@ def cs_error(msg, code=Codes.UNKNOWN, **kwargs): class FederationError(RuntimeError): - """ This class is used to inform remote home servers about erroneous + """ This class is used to inform remote homeservers about erroneous PDUs they sent us. FATAL: The remote server could not interpret the source event. diff --git a/synapse/config/captcha.py b/synapse/config/captcha.py index 44bd5c679..f0171bb5b 100644 --- a/synapse/config/captcha.py +++ b/synapse/config/captcha.py @@ -35,11 +35,11 @@ class CaptchaConfig(Config): ## Captcha ## # See docs/CAPTCHA_SETUP for full details of configuring this. - # This Home Server's ReCAPTCHA public key. + # This homeserver's ReCAPTCHA public key. # #recaptcha_public_key: "YOUR_PUBLIC_KEY" - # This Home Server's ReCAPTCHA private key. + # This homeserver's ReCAPTCHA private key. # #recaptcha_private_key: "YOUR_PRIVATE_KEY" diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 39e7a1ddd..43fad0bf8 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -305,7 +305,7 @@ class EmailConfig(Config): # smtp_user: "exampleusername" # smtp_pass: "examplepassword" # require_transport_security: false - # notif_from: "Your Friendly %(app)s Home Server " + # notif_from: "Your Friendly %(app)s homeserver " # app_name: Matrix # # # Enable email notifications by default diff --git a/synapse/config/server.py b/synapse/config/server.py index d556df308..a04e600fd 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -781,7 +781,7 @@ class ServerConfig(Config): "--daemonize", action="store_true", default=None, - help="Daemonize the home server", + help="Daemonize the homeserver", ) server_group.add_argument( "--print-pidfile", diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 545d71965..27f6aff00 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -177,7 +177,7 @@ class FederationClient(FederationBase): given destination server. Args: - dest (str): The remote home server to ask. + dest (str): The remote homeserver to ask. room_id (str): The room_id to backfill. limit (int): The maximum number of PDUs to return. extremities (list): List of PDU id and origins of the first pdus @@ -227,7 +227,7 @@ class FederationClient(FederationBase): one succeeds. Args: - destinations (list): Which home servers to query + destinations (list): Which homeservers to query event_id (str): event to fetch room_version (str): version of the room outlier (bool): Indicates whether the PDU is an `outlier`, i.e. if @@ -312,7 +312,7 @@ class FederationClient(FederationBase): @defer.inlineCallbacks @log_function def get_state_for_room(self, destination, room_id, event_id): - """Requests all of the room state at a given event from a remote home server. + """Requests all of the room state at a given event from a remote homeserver. Args: destination (str): The remote homeserver to query for the state. diff --git a/synapse/federation/transport/__init__.py b/synapse/federation/transport/__init__.py index d9fcc520a..5db733af9 100644 --- a/synapse/federation/transport/__init__.py +++ b/synapse/federation/transport/__init__.py @@ -14,9 +14,9 @@ # limitations under the License. """The transport layer is responsible for both sending transactions to remote -home servers and receiving a variety of requests from other home servers. +homeservers and receiving a variety of requests from other homeservers. -By default this is done over HTTPS (and all home servers are required to +By default this is done over HTTPS (and all homeservers are required to support HTTPS), however individual pairings of servers may decide to communicate over a different (albeit still reliable) protocol. """ diff --git a/synapse/federation/transport/client.py b/synapse/federation/transport/client.py index 920fa8685..dc95ab211 100644 --- a/synapse/federation/transport/client.py +++ b/synapse/federation/transport/client.py @@ -44,7 +44,7 @@ class TransportLayerClient(object): given event. Args: - destination (str): The host name of the remote home server we want + destination (str): The host name of the remote homeserver we want to get the state from. context (str): The name of the context we want the state of event_id (str): The event we want the context at. @@ -68,7 +68,7 @@ class TransportLayerClient(object): given event. Returns the state's event_id's Args: - destination (str): The host name of the remote home server we want + destination (str): The host name of the remote homeserver we want to get the state from. context (str): The name of the context we want the state of event_id (str): The event we want the context at. @@ -91,7 +91,7 @@ class TransportLayerClient(object): """ Requests the pdu with give id and origin from the given server. Args: - destination (str): The host name of the remote home server we want + destination (str): The host name of the remote homeserver we want to get the state from. event_id (str): The id of the event being requested. timeout (int): How long to try (in ms) the destination for before diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index d6c23f22b..09baa9c57 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -714,7 +714,7 @@ class PublicRoomList(BaseFederationServlet): This API returns information in the same format as /publicRooms on the client API, but will only ever include local public rooms and hence is - intended for consumption by other home servers. + intended for consumption by other homeservers. GET /publicRooms HTTP/1.1 diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 7a0f54ca2..c9d0db482 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -223,7 +223,7 @@ class AuthHandler(BaseHandler): # could continue registration from your phone having clicked the # email auth link on there). It's probably too open to abuse # because it lets unauthenticated clients store arbitrary objects - # on a home server. + # on a homeserver. # Revisit: Assumimg the REST APIs do sensible validation, the data # isn't arbintrary. session["clientdict"] = clientdict @@ -810,7 +810,7 @@ class AuthHandler(BaseHandler): @defer.inlineCallbacks def add_threepid(self, user_id, medium, address, validated_at): # 'Canonicalise' email addresses down to lower case. - # We've now moving towards the Home Server being the entity that + # We've now moving towards the homeserver being the entity that # is responsible for validating threepids used for resetting passwords # on accounts, so in future Synapse will gain knowledge of specific # types (mediums) of threepid. For now, we still use the existing diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index c4632f898..69051101a 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -283,7 +283,7 @@ class DirectoryHandler(BaseHandler): def on_directory_query(self, args): room_alias = RoomAlias.from_string(args["room_alias"]) if not self.hs.is_mine(room_alias): - raise SynapseError(400, "Room Alias is not hosted on this Home Server") + raise SynapseError(400, "Room Alias is not hosted on this homeserver") result = yield self.get_association_from_room_alias(room_alias) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 05dd8d267..0e904f2da 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -97,9 +97,9 @@ class FederationHandler(BaseHandler): """Handles events that originated from federation. Responsible for: a) handling received Pdus before handing them on as Events to the rest - of the home server (including auth and state conflict resoultion) + of the homeserver (including auth and state conflict resoultion) b) converting events that were produced by local clients that may need - to be sent to remote home servers. + to be sent to remote homeservers. c) doing the necessary dances to invite remote users and join remote rooms. """ diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 22e0a04da..1e5a4613c 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -152,7 +152,7 @@ class BaseProfileHandler(BaseHandler): by_admin (bool): Whether this change was made by an administrator. """ if not self.hs.is_mine(target_user): - raise SynapseError(400, "User is not hosted on this Home Server") + raise SynapseError(400, "User is not hosted on this homeserver") if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's displayname") @@ -207,7 +207,7 @@ class BaseProfileHandler(BaseHandler): """target_user is the user whose avatar_url is to be changed; auth_user is the user attempting to make this change.""" if not self.hs.is_mine(target_user): - raise SynapseError(400, "User is not hosted on this Home Server") + raise SynapseError(400, "User is not hosted on this homeserver") if not by_admin and target_user != requester.user: raise AuthError(400, "Cannot set another user's avatar_url") @@ -231,7 +231,7 @@ class BaseProfileHandler(BaseHandler): def on_profile_query(self, args): user = UserID.from_string(args["user_id"]) if not self.hs.is_mine(user): - raise SynapseError(400, "User is not hosted on this Home Server") + raise SynapseError(400, "User is not hosted on this homeserver") just_field = args.get("field", None) diff --git a/synapse/handlers/register.py b/synapse/handlers/register.py index 235f11c32..95806af41 100644 --- a/synapse/handlers/register.py +++ b/synapse/handlers/register.py @@ -630,7 +630,7 @@ class RegistrationHandler(BaseHandler): # And we add an email pusher for them by default, but only # if email notifications are enabled (so people don't start # getting mail spam where they weren't before if email - # notifs are set up on a home server) + # notifs are set up on a homeserver) if ( self.hs.config.email_enable_notifs and self.hs.config.email_notif_for_new_users diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index ca8ae9fb5..856337b7e 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -120,7 +120,7 @@ class TypingHandler(object): auth_user_id = auth_user.to_string() if not self.is_mine_id(target_user_id): - raise SynapseError(400, "User is not hosted on this Home Server") + raise SynapseError(400, "User is not hosted on this homeserver") if target_user_id != auth_user_id: raise AuthError(400, "Cannot set another user's typing state") @@ -150,7 +150,7 @@ class TypingHandler(object): auth_user_id = auth_user.to_string() if not self.is_mine_id(target_user_id): - raise SynapseError(400, "User is not hosted on this Home Server") + raise SynapseError(400, "User is not hosted on this homeserver") if target_user_id != auth_user_id: raise AuthError(400, "Cannot set another user's typing state") diff --git a/synapse/http/matrixfederationclient.py b/synapse/http/matrixfederationclient.py index 691380abd..16765d54e 100644 --- a/synapse/http/matrixfederationclient.py +++ b/synapse/http/matrixfederationclient.py @@ -530,7 +530,7 @@ class MatrixFederationHttpClient(object): """ Builds the Authorization headers for a federation request Args: - destination (bytes|None): The desination home server of the request. + destination (bytes|None): The desination homeserver of the request. May be None if the destination is an identity server, in which case destination_is must be non-None. method (bytes): The HTTP method of the request diff --git a/synapse/util/httpresourcetree.py b/synapse/util/httpresourcetree.py index 1a20c596b..3c0e8469f 100644 --- a/synapse/util/httpresourcetree.py +++ b/synapse/util/httpresourcetree.py @@ -20,7 +20,7 @@ logger = logging.getLogger(__name__) def create_resource_tree(desired_tree, root_resource): - """Create the resource tree for this Home Server. + """Create the resource tree for this homeserver. This in unduly complicated because Twisted does not support putting child resources more than 1 level deep at a time. From 73d091be48c6bfa1c41a8d31068d21fc28c26f6e Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 12 Nov 2019 13:12:25 +0000 Subject: [PATCH 074/133] A couple more instances --- synapse/config/server.py | 2 +- synapse/logging/_terse_json.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/synapse/config/server.py b/synapse/config/server.py index a04e600fd..a1bd3c0ae 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -721,7 +721,7 @@ class ServerConfig(Config): # Used by phonehome stats to group together related servers. #server_context: context - # Resource-constrained Homeserver Settings + # Resource-constrained homeserver Settings # # If limit_remote_rooms.enabled is True, the room complexity will be # checked before a user joins a new remote room. If it is above diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py index 0ebbde06f..76ce7d880 100644 --- a/synapse/logging/_terse_json.py +++ b/synapse/logging/_terse_json.py @@ -153,7 +153,7 @@ class TerseJSONToTCPLogObserver(object): An IObserver that writes JSON logs to a TCP target. Args: - hs (HomeServer): The Homeserver that is being logged for. + hs (HomeServer): The homeserver that is being logged for. host: The host of the logging target. port: The logging target's port. metadata: Metadata to be added to each log entry. From 85f172ef968a9726cd62bc61fe98e39cdf017e15 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 12 Nov 2019 13:13:19 +0000 Subject: [PATCH 075/133] Add changelog --- changelog.d/6357.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6357.misc diff --git a/changelog.d/6357.misc b/changelog.d/6357.misc new file mode 100644 index 000000000..a68df0f38 --- /dev/null +++ b/changelog.d/6357.misc @@ -0,0 +1 @@ +Correct spacing/case of various instances of the word "homeserver". \ No newline at end of file From e1648dc5763bda2cf10daafa5beebb4fbdfd2cb5 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 12 Nov 2019 13:15:59 +0000 Subject: [PATCH 076/133] sample config --- docs/sample_config.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index d2f4aff82..da7e5f2e2 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -287,7 +287,7 @@ listeners: # Used by phonehome stats to group together related servers. #server_context: context -# Resource-constrained Homeserver Settings +# Resource-constrained homeserver Settings # # If limit_remote_rooms.enabled is True, the room complexity will be # checked before a user joins a new remote room. If it is above @@ -743,11 +743,11 @@ uploads_path: "DATADIR/uploads" ## Captcha ## # See docs/CAPTCHA_SETUP for full details of configuring this. -# This Home Server's ReCAPTCHA public key. +# This homeserver's ReCAPTCHA public key. # #recaptcha_public_key: "YOUR_PUBLIC_KEY" -# This Home Server's ReCAPTCHA private key. +# This homeserver's ReCAPTCHA private key. # #recaptcha_private_key: "YOUR_PRIVATE_KEY" @@ -1270,7 +1270,7 @@ password_config: # smtp_user: "exampleusername" # smtp_pass: "examplepassword" # require_transport_security: false -# notif_from: "Your Friendly %(app)s Home Server " +# notif_from: "Your Friendly %(app)s homeserver " # app_name: Matrix # # # Enable email notifications by default From c350bc2f92d87e46a40f917f65c9e10e0f4999fc Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 13 Nov 2019 19:09:20 +0000 Subject: [PATCH 077/133] Blacklist PurgeRoomTestCase (#6361) --- changelog.d/6361.misc | 1 + tests/rest/admin/test_admin.py | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/6361.misc diff --git a/changelog.d/6361.misc b/changelog.d/6361.misc new file mode 100644 index 000000000..324d74ebf --- /dev/null +++ b/changelog.d/6361.misc @@ -0,0 +1 @@ +Temporarily blacklist the failing unit test PurgeRoomTestCase.test_purge_room. diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py index d9f1b95cb..957505825 100644 --- a/tests/rest/admin/test_admin.py +++ b/tests/rest/admin/test_admin.py @@ -641,3 +641,5 @@ class PurgeRoomTestCase(unittest.HomeserverTestCase): ) self.assertEqual(count, 0, msg="Rows not purged in {}".format(table)) + + test_purge_room.skip = "Disabled because it's currently broken" From 745a48625d9760374a7d683441185fa8bd2a2aac Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 14 Nov 2019 12:02:05 +0000 Subject: [PATCH 078/133] Fix guest -> real account upgrade with account validity enabled (#6359) --- changelog.d/6359.bugfix | 1 + synapse/storage/_base.py | 9 +++------ 2 files changed, 4 insertions(+), 6 deletions(-) create mode 100644 changelog.d/6359.bugfix diff --git a/changelog.d/6359.bugfix b/changelog.d/6359.bugfix new file mode 100644 index 000000000..22bf5f642 --- /dev/null +++ b/changelog.d/6359.bugfix @@ -0,0 +1 @@ +Fix bug where upgrading a guest account to a full user would fail when account validity is enabled. \ No newline at end of file diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 1a2b7ebe2..ab596fa68 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -361,14 +361,11 @@ class SQLBaseStore(object): expiration_ts, ) - self._simple_insert_txn( + self._simple_upsert_txn( txn, "account_validity", - values={ - "user_id": user_id, - "expiration_ts_ms": expiration_ts, - "email_sent": False, - }, + keyvalues={"user_id": user_id}, + values={"expiration_ts_ms": expiration_ts, "email_sent": False}, ) def start_profiling(self): From 53b6559a8906ba56f9f50469e0c2bec430614a4e Mon Sep 17 00:00:00 2001 From: James Date: Fri, 15 Nov 2019 05:42:46 +1100 Subject: [PATCH 079/133] Add optional python dependencies to snap packaging (#6317) Signed-off-by: James Hebden --- changelog.d/6317.misc | 1 + snap/snapcraft.yaml | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) create mode 100644 changelog.d/6317.misc diff --git a/changelog.d/6317.misc b/changelog.d/6317.misc new file mode 100644 index 000000000..a67d13fa7 --- /dev/null +++ b/changelog.d/6317.misc @@ -0,0 +1 @@ +Add optional python dependencies and dependant binary libraries to snapcraft packaging. diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index 1f7df71db..9e644e856 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -20,3 +20,23 @@ parts: source: . plugin: python python-version: python3 + python-packages: + - '.[all]' + build-packages: + - libffi-dev + - libturbojpeg0-dev + - libssl-dev + - libxslt1-dev + - libpq-dev + - zlib1g-dev + stage-packages: + - libasn1-8-heimdal + - libgssapi3-heimdal + - libhcrypto4-heimdal + - libheimbase1-heimdal + - libheimntlm0-heimdal + - libhx509-5-heimdal + - libkrb5-26-heimdal + - libldap-2.4-2 + - libpq5 + - libsasl2-2 From 657d614f6a53f3dbfd2858bd85d0f81563db0041 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 15 Nov 2019 14:02:34 +0000 Subject: [PATCH 080/133] Replace UPDATE with UPSERT on device_max_stream_id table (#6363) --- changelog.d/6363.bugfix | 1 + synapse/storage/data_stores/main/deviceinbox.py | 17 +++++++++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6363.bugfix diff --git a/changelog.d/6363.bugfix b/changelog.d/6363.bugfix new file mode 100644 index 000000000..d023b4918 --- /dev/null +++ b/changelog.d/6363.bugfix @@ -0,0 +1 @@ +Fix `to_device` stream ID getting reset every time Synapse restarts, which had the potential to cause unable to decrypt errors. \ No newline at end of file diff --git a/synapse/storage/data_stores/main/deviceinbox.py b/synapse/storage/data_stores/main/deviceinbox.py index f04aad074..96cd0fb77 100644 --- a/synapse/storage/data_stores/main/deviceinbox.py +++ b/synapse/storage/data_stores/main/deviceinbox.py @@ -358,8 +358,21 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore) def _add_messages_to_local_device_inbox_txn( self, txn, stream_id, messages_by_user_then_device ): - sql = "UPDATE device_max_stream_id" " SET stream_id = ?" " WHERE stream_id < ?" - txn.execute(sql, (stream_id, stream_id)) + # Compatible method of performing an upsert + sql = "SELECT stream_id FROM device_max_stream_id" + + txn.execute(sql) + rows = txn.fetchone() + if rows: + db_stream_id = rows[0] + if db_stream_id < stream_id: + # Insert the new stream_id + sql = "UPDATE device_max_stream_id SET stream_id = ?" + else: + # No rows, perform an insert + sql = "INSERT INTO device_max_stream_id (stream_id) VALUES (?)" + + txn.execute(sql, (stream_id,)) local_by_user_then_device = {} for user_id, messages_by_device in messages_by_user_then_device.items(): From c7376cdfe3efe05942964efcdf8886d66342383c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 18 Nov 2019 17:10:16 +0000 Subject: [PATCH 081/133] Apply suggestions from code review Co-Authored-By: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Co-Authored-By: Brendan Abolivier --- synapse/handlers/auth.py | 4 ++-- synapse/rest/client/v1/login.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index 20c62bd78..0955cf9db 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -135,8 +135,8 @@ class AuthHandler(BaseHandler): AuthError if the client has completed a login flow, and it gives a different user to `requester` - LimitExceededError if the ratelimiter's failed requests count for this - user is too high too proceed + LimitExceededError if the ratelimiter's failed request count for this + user is too high to proceed """ diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index abc210da5..f8d58afb2 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -397,7 +397,7 @@ class LoginRestServlet(RestServlet): raise LoginError(401, "Invalid JWT", errcode=Codes.UNAUTHORIZED) user_id = UserID(user, self.hs.hostname).to_string() - result = yield self._complete_login(user_id, login_submission) + result = yield self._complete_login(user_id, login_submission, create_non_existant_users=True) return result From 7c24d0f443724082376c89f9f75954d81f524a8e Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 19 Nov 2019 13:22:37 +0000 Subject: [PATCH 082/133] Lint --- synapse/config/server.py | 39 ++++++++----- synapse/handlers/pagination.py | 17 ++---- synapse/storage/data_stores/main/room.py | 49 ++++++++-------- tests/rest/client/test_retention.py | 73 ++++++++---------------- 4 files changed, 77 insertions(+), 101 deletions(-) diff --git a/synapse/config/server.py b/synapse/config/server.py index aa93a416f..8a55ffac4 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -19,7 +19,7 @@ import logging import os.path import re from textwrap import indent -from typing import List +from typing import List, Dict, Optional import attr import yaml @@ -287,13 +287,17 @@ class ServerConfig(Config): self.retention_default_min_lifetime = None self.retention_default_max_lifetime = None - self.retention_allowed_lifetime_min = retention_config.get("allowed_lifetime_min") + self.retention_allowed_lifetime_min = retention_config.get( + "allowed_lifetime_min" + ) if self.retention_allowed_lifetime_min is not None: self.retention_allowed_lifetime_min = self.parse_duration( self.retention_allowed_lifetime_min ) - self.retention_allowed_lifetime_max = retention_config.get("allowed_lifetime_max") + self.retention_allowed_lifetime_max = retention_config.get( + "allowed_lifetime_max" + ) if self.retention_allowed_lifetime_max is not None: self.retention_allowed_lifetime_max = self.parse_duration( self.retention_allowed_lifetime_max @@ -302,14 +306,15 @@ class ServerConfig(Config): if ( self.retention_allowed_lifetime_min is not None and self.retention_allowed_lifetime_max is not None - and self.retention_allowed_lifetime_min > self.retention_allowed_lifetime_max + and self.retention_allowed_lifetime_min + > self.retention_allowed_lifetime_max ): raise ConfigError( "Invalid retention policy limits: 'allowed_lifetime_min' can not be" " greater than 'allowed_lifetime_max'" ) - self.retention_purge_jobs = [] + self.retention_purge_jobs = [] # type: List[Dict[str, Optional[int]]] for purge_job_config in retention_config.get("purge_jobs", []): interval_config = purge_job_config.get("interval") @@ -342,18 +347,22 @@ class ServerConfig(Config): " 'longest_max_lifetime' value." ) - self.retention_purge_jobs.append({ - "interval": interval, - "shortest_max_lifetime": shortest_max_lifetime, - "longest_max_lifetime": longest_max_lifetime, - }) + self.retention_purge_jobs.append( + { + "interval": interval, + "shortest_max_lifetime": shortest_max_lifetime, + "longest_max_lifetime": longest_max_lifetime, + } + ) if not self.retention_purge_jobs: - self.retention_purge_jobs = [{ - "interval": self.parse_duration("1d"), - "shortest_max_lifetime": None, - "longest_max_lifetime": None, - }] + self.retention_purge_jobs = [ + { + "interval": self.parse_duration("1d"), + "shortest_max_lifetime": None, + "longest_max_lifetime": None, + } + ] self.listeners = [] # type: List[dict] for listener in config.get("listeners", []): diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index e1800177f..d122c11a4 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -154,20 +154,17 @@ class PaginationHandler(object): # Figure out what token we should start purging at. ts = self.clock.time_msec() - max_lifetime - stream_ordering = ( - yield self.store.find_first_stream_ordering_after_ts(ts) - ) + stream_ordering = yield self.store.find_first_stream_ordering_after_ts(ts) - r = ( - yield self.store.get_room_event_after_stream_ordering( - room_id, stream_ordering, - ) + r = yield self.store.get_room_event_after_stream_ordering( + room_id, stream_ordering, ) if not r: logger.warning( "[purge] purging events not possible: No event found " "(ts %i => stream_ordering %i)", - ts, stream_ordering, + ts, + stream_ordering, ) continue @@ -186,9 +183,7 @@ class PaginationHandler(object): # the background so that it's not blocking any other operation apart from # other purges in the same room. run_as_background_process( - "_purge_history", - self._purge_history, - purge_id, room_id, token, True, + "_purge_history", self._purge_history, purge_id, room_id, token, True, ) def start_purge_history(self, room_id, token, delete_local_events=False): diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py index 54a7d24c7..7fceae59c 100644 --- a/synapse/storage/data_stores/main/room.py +++ b/synapse/storage/data_stores/main/room.py @@ -334,8 +334,9 @@ class RoomStore(RoomWorkerStore, SearchStore): WHERE state.room_id > ? AND state.type = '%s' ORDER BY state.room_id ASC LIMIT ?; - """ % EventTypes.Retention, - (last_room, batch_size) + """ + % EventTypes.Retention, + (last_room, batch_size), ) rows = self.cursor_to_dict(txn) @@ -358,15 +359,13 @@ class RoomStore(RoomWorkerStore, SearchStore): "event_id": row["event_id"], "min_lifetime": retention_policy.get("min_lifetime"), "max_lifetime": retention_policy.get("max_lifetime"), - } + }, ) logger.info("Inserted %d rows into room_retention", len(rows)) self._background_update_progress_txn( - txn, "insert_room_retention", { - "room_id": rows[-1]["room_id"], - } + txn, "insert_room_retention", {"room_id": rows[-1]["room_id"]} ) if batch_size > len(rows): @@ -375,8 +374,7 @@ class RoomStore(RoomWorkerStore, SearchStore): return False end = yield self.runInteraction( - "insert_room_retention", - _background_insert_retention_txn, + "insert_room_retention", _background_insert_retention_txn, ) if end: @@ -585,17 +583,15 @@ class RoomStore(RoomWorkerStore, SearchStore): ) def _store_retention_policy_for_room_txn(self, txn, event): - if ( - hasattr(event, "content") - and ("min_lifetime" in event.content or "max_lifetime" in event.content) + if hasattr(event, "content") and ( + "min_lifetime" in event.content or "max_lifetime" in event.content ): if ( - ("min_lifetime" in event.content and not isinstance( - event.content.get("min_lifetime"), integer_types - )) - or ("max_lifetime" in event.content and not isinstance( - event.content.get("max_lifetime"), integer_types - )) + "min_lifetime" in event.content + and not isinstance(event.content.get("min_lifetime"), integer_types) + ) or ( + "max_lifetime" in event.content + and not isinstance(event.content.get("max_lifetime"), integer_types) ): # Ignore the event if one of the value isn't an integer. return @@ -798,7 +794,9 @@ class RoomStore(RoomWorkerStore, SearchStore): return local_media_mxcs, remote_media_mxcs @defer.inlineCallbacks - def get_rooms_for_retention_period_in_range(self, min_ms, max_ms, include_null=False): + def get_rooms_for_retention_period_in_range( + self, min_ms, max_ms, include_null=False + ): """Retrieves all of the rooms within the given retention range. Optionally includes the rooms which don't have a retention policy. @@ -904,23 +902,24 @@ class RoomStore(RoomWorkerStore, SearchStore): INNER JOIN current_state_events USING (event_id, room_id) WHERE room_id = ?; """, - (room_id,) + (room_id,), ) return self.cursor_to_dict(txn) ret = yield self.runInteraction( - "get_retention_policy_for_room", - get_retention_policy_for_room_txn, + "get_retention_policy_for_room", get_retention_policy_for_room_txn, ) # If we don't know this room ID, ret will be None, in this case return the default # policy. if not ret: - defer.returnValue({ - "min_lifetime": self.config.retention_default_min_lifetime, - "max_lifetime": self.config.retention_default_max_lifetime, - }) + defer.returnValue( + { + "min_lifetime": self.config.retention_default_min_lifetime, + "max_lifetime": self.config.retention_default_max_lifetime, + } + ) row = ret[0] diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index 7b6f25a83..6bf485c23 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -61,9 +61,7 @@ class RetentionTestCase(unittest.HomeserverTestCase): self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, - body={ - "max_lifetime": one_day_ms * 4, - }, + body={"max_lifetime": one_day_ms * 4}, tok=self.token, expect_code=400, ) @@ -71,9 +69,7 @@ class RetentionTestCase(unittest.HomeserverTestCase): self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, - body={ - "max_lifetime": one_hour_ms, - }, + body={"max_lifetime": one_hour_ms}, tok=self.token, expect_code=400, ) @@ -89,9 +85,7 @@ class RetentionTestCase(unittest.HomeserverTestCase): self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, - body={ - "max_lifetime": lifetime, - }, + body={"max_lifetime": lifetime}, tok=self.token, ) @@ -115,20 +109,12 @@ class RetentionTestCase(unittest.HomeserverTestCase): events = [] # Send a first event, which should be filtered out at the end of the test. - resp = self.helper.send( - room_id=room_id, - body="1", - tok=self.token, - ) + resp = self.helper.send(room_id=room_id, body="1", tok=self.token) # Get the event from the store so that we end up with a FrozenEvent that we can # give to filter_events_for_client. We need to do this now because the event won't # be in the database anymore after it has expired. - events.append(self.get_success( - store.get_event( - resp.get("event_id") - ) - )) + events.append(self.get_success(store.get_event(resp.get("event_id")))) # Advance the time by 2 days. We're using the default retention policy, therefore # after this the first event will still be valid. @@ -143,20 +129,16 @@ class RetentionTestCase(unittest.HomeserverTestCase): valid_event_id = resp.get("event_id") - events.append(self.get_success( - store.get_event( - valid_event_id - ) - )) + events.append(self.get_success(store.get_event(valid_event_id))) # Advance the time by anothe 2 days. After this, the first event should be # outdated but not the second one. self.reactor.advance(one_day_ms * 2 / 1000) # Run filter_events_for_client with our list of FrozenEvents. - filtered_events = self.get_success(filter_events_for_client( - storage, self.user_id, events - )) + filtered_events = self.get_success( + filter_events_for_client(storage, self.user_id, events) + ) # We should only get one event back. self.assertEqual(len(filtered_events), 1, filtered_events) @@ -172,28 +154,22 @@ class RetentionTestCase(unittest.HomeserverTestCase): # Send a first event to the room. This is the event we'll want to be purged at the # end of the test. - resp = self.helper.send( - room_id=room_id, - body="1", - tok=self.token, - ) + resp = self.helper.send(room_id=room_id, body="1", tok=self.token) expired_event_id = resp.get("event_id") # Check that we can retrieve the event. expired_event = self.get_event(room_id, expired_event_id) - self.assertEqual(expired_event.get("content", {}).get("body"), "1", expired_event) + self.assertEqual( + expired_event.get("content", {}).get("body"), "1", expired_event + ) # Advance the time. self.reactor.advance(increment / 1000) # Send another event. We need this because the purge job won't purge the most # recent event in the room. - resp = self.helper.send( - room_id=room_id, - body="2", - tok=self.token, - ) + resp = self.helper.send(room_id=room_id, body="2", tok=self.token) valid_event_id = resp.get("event_id") @@ -240,8 +216,7 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase): mock_federation_client = Mock(spec=["backfill"]) self.hs = self.setup_test_homeserver( - config=config, - federation_client=mock_federation_client, + config=config, federation_client=mock_federation_client, ) return self.hs @@ -268,9 +243,7 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase): self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, - body={ - "max_lifetime": one_day_ms * 35, - }, + body={"max_lifetime": one_day_ms * 35}, tok=self.token, ) @@ -289,18 +262,16 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase): # Check that we can retrieve the event. expired_event = self.get_event(room_id, first_event_id) - self.assertEqual(expired_event.get("content", {}).get("body"), "1", expired_event) + self.assertEqual( + expired_event.get("content", {}).get("body"), "1", expired_event + ) # Advance the time by a month. self.reactor.advance(one_day_ms * 30 / 1000) # Send another event. We need this because the purge job won't purge the most # recent event in the room. - resp = self.helper.send( - room_id=room_id, - body="2", - tok=self.token, - ) + resp = self.helper.send(room_id=room_id, body="2", tok=self.token) second_event_id = resp.get("event_id") @@ -313,7 +284,9 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase): ) if expected_code_for_first_event == 200: - self.assertEqual(first_event.get("content", {}).get("body"), "1", first_event) + self.assertEqual( + first_event.get("content", {}).get("body"), "1", first_event + ) # Check that the event that hasn't been purged can still be retrieved. second_event = self.get_event(room_id, second_event_id) From bf9a11c54d3c9ca1e4fa9420b567efceb1e46d5b Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 19 Nov 2019 13:30:04 +0000 Subject: [PATCH 083/133] Lint again --- synapse/config/server.py | 2 +- tests/rest/client/test_retention.py | 12 ++---------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/synapse/config/server.py b/synapse/config/server.py index 8a55ffac4..ed916e140 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -19,7 +19,7 @@ import logging import os.path import re from textwrap import indent -from typing import List, Dict, Optional +from typing import Dict, List, Optional import attr import yaml diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index 6bf485c23..9e549d8a9 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -121,11 +121,7 @@ class RetentionTestCase(unittest.HomeserverTestCase): self.reactor.advance(one_day_ms * 2 / 1000) # Send another event, which shouldn't get filtered out. - resp = self.helper.send( - room_id=room_id, - body="2", - tok=self.token, - ) + resp = self.helper.send(room_id=room_id, body="2", tok=self.token) valid_event_id = resp.get("event_id") @@ -252,11 +248,7 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase): def _test_retention(self, room_id, expected_code_for_first_event=200): # Send a first event to the room. This is the event we'll want to be purged at the # end of the test. - resp = self.helper.send( - room_id=room_id, - body="1", - tok=self.token, - ) + resp = self.helper.send(room_id=room_id, body="1", tok=self.token) first_event_id = resp.get("event_id") From 97b863fe32c03ce30c4cd5bda1479c1f9a03da83 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 19 Nov 2019 13:33:58 +0000 Subject: [PATCH 084/133] Lint again --- synapse/config/server.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/config/server.py b/synapse/config/server.py index ed916e140..4404953e2 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -879,7 +879,7 @@ class ServerConfig(Config): # Defaults to `28d`. Set to `null` to disable clearing out of old rows. # #user_ips_max_age: 14d - + # Message retention policy at the server level. # # Room admins and mods can define a retention period for their rooms using the From a6fc6754f8a680aa31e2be2b10d02e953d4ff368 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 19 Nov 2019 14:07:39 +0000 Subject: [PATCH 085/133] Fix 3PID invite exchange --- synapse/handlers/federation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 3994137d1..ab82f8362 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2569,7 +2569,7 @@ class FederationHandler(BaseHandler): event, context = yield self.event_creation_handler.create_new_client_event( builder=builder ) - EventValidator().validate_new(event) + EventValidator().validate_new(event, self.config) return (event, context) @defer.inlineCallbacks From cdd3cb870d2e81fc28073abade2f6eaf4ec54fdd Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 19 Nov 2019 14:40:21 +0000 Subject: [PATCH 086/133] Fix worker mode --- synapse/storage/data_stores/main/room.py | 112 +++++++++++------------ 1 file changed, 56 insertions(+), 56 deletions(-) diff --git a/synapse/storage/data_stores/main/room.py b/synapse/storage/data_stores/main/room.py index 7fceae59c..b7f902481 100644 --- a/synapse/storage/data_stores/main/room.py +++ b/synapse/storage/data_stores/main/room.py @@ -303,6 +303,62 @@ class RoomWorkerStore(SQLBaseStore): else: return None + @cachedInlineCallbacks() + def get_retention_policy_for_room(self, room_id): + """Get the retention policy for a given room. + + If no retention policy has been found for this room, returns a policy defined + by the configured default policy (which has None as both the 'min_lifetime' and + the 'max_lifetime' if no default policy has been defined in the server's + configuration). + + Args: + room_id (str): The ID of the room to get the retention policy of. + + Returns: + dict[int, int]: "min_lifetime" and "max_lifetime" for this room. + """ + + def get_retention_policy_for_room_txn(txn): + txn.execute( + """ + SELECT min_lifetime, max_lifetime FROM room_retention + INNER JOIN current_state_events USING (event_id, room_id) + WHERE room_id = ?; + """, + (room_id,), + ) + + return self.cursor_to_dict(txn) + + ret = yield self.runInteraction( + "get_retention_policy_for_room", get_retention_policy_for_room_txn, + ) + + # If we don't know this room ID, ret will be None, in this case return the default + # policy. + if not ret: + defer.returnValue( + { + "min_lifetime": self.config.retention_default_min_lifetime, + "max_lifetime": self.config.retention_default_max_lifetime, + } + ) + + row = ret[0] + + # If one of the room's policy's attributes isn't defined, use the matching + # attribute from the default policy. + # The default values will be None if no default policy has been defined, or if one + # of the attributes is missing from the default policy. + if row["min_lifetime"] is None: + row["min_lifetime"] = self.config.retention_default_min_lifetime + + if row["max_lifetime"] is None: + row["max_lifetime"] = self.config.retention_default_max_lifetime + + defer.returnValue(row) + class RoomStore(RoomWorkerStore, SearchStore): def __init__(self, db_conn, hs): @@ -878,59 +934,3 @@ class RoomStore(RoomWorkerStore, SearchStore): ) defer.returnValue(rooms) - - @cachedInlineCallbacks() - def get_retention_policy_for_room(self, room_id): - """Get the retention policy for a given room. - - If no retention policy has been found for this room, returns a policy defined - by the configured default policy (which has None as both the 'min_lifetime' and - the 'max_lifetime' if no default policy has been defined in the server's - configuration). - - Args: - room_id (str): The ID of the room to get the retention policy of. - - Returns: - dict[int, int]: "min_lifetime" and "max_lifetime" for this room. - """ - - def get_retention_policy_for_room_txn(txn): - txn.execute( - """ - SELECT min_lifetime, max_lifetime FROM room_retention - INNER JOIN current_state_events USING (event_id, room_id) - WHERE room_id = ?; - """, - (room_id,), - ) - - return self.cursor_to_dict(txn) - - ret = yield self.runInteraction( - "get_retention_policy_for_room", get_retention_policy_for_room_txn, - ) - - # If we don't know this room ID, ret will be None, in this case return the default - # policy. - if not ret: - defer.returnValue( - { - "min_lifetime": self.config.retention_default_min_lifetime, - "max_lifetime": self.config.retention_default_max_lifetime, - } - ) - - row = ret[0] - - # If one of the room's policy's attributes isn't defined, use the matching - # attribute from the default policy. - # The default values will be None if no default policy has been defined, or if one - # of the attributes is missing from the default policy. - if row["min_lifetime"] is None: - row["min_lifetime"] = self.config.retention_default_min_lifetime - - if row["max_lifetime"] is None: - row["max_lifetime"] = self.config.retention_default_max_lifetime - - defer.returnValue(row) From 271c322d08a3d13c986d97cbc40e72eef50e92ba Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 20 Nov 2019 09:29:48 +0000 Subject: [PATCH 087/133] Lint --- synapse/rest/client/v1/login.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/synapse/rest/client/v1/login.py b/synapse/rest/client/v1/login.py index f8d58afb2..19eb15003 100644 --- a/synapse/rest/client/v1/login.py +++ b/synapse/rest/client/v1/login.py @@ -397,7 +397,9 @@ class LoginRestServlet(RestServlet): raise LoginError(401, "Invalid JWT", errcode=Codes.UNAUTHORIZED) user_id = UserID(user, self.hs.hostname).to_string() - result = yield self._complete_login(user_id, login_submission, create_non_existant_users=True) + result = yield self._complete_login( + user_id, login_submission, create_non_existant_users=True + ) return result From 4f5ca455bf17b52d70ab08be043178b4678cc4b8 Mon Sep 17 00:00:00 2001 From: Manuel Stahl <37705355+awesome-manuel@users.noreply.github.com> Date: Wed, 20 Nov 2019 12:49:11 +0100 Subject: [PATCH 088/133] Move admin endpoints into separate files (#6308) --- changelog.d/6308.misc | 1 + synapse/rest/admin/__init__.py | 567 +-------------------------------- synapse/rest/admin/groups.py | 46 +++ synapse/rest/admin/rooms.py | 157 +++++++++ synapse/rest/admin/users.py | 406 ++++++++++++++++++++++- 5 files changed, 622 insertions(+), 555 deletions(-) create mode 100644 changelog.d/6308.misc create mode 100644 synapse/rest/admin/groups.py create mode 100644 synapse/rest/admin/rooms.py diff --git a/changelog.d/6308.misc b/changelog.d/6308.misc new file mode 100644 index 000000000..72be63ba4 --- /dev/null +++ b/changelog.d/6308.misc @@ -0,0 +1 @@ +Move admin endpoints into separate files. Contributed by Awesome Technologies Innovationslabor GmbH. \ No newline at end of file diff --git a/synapse/rest/admin/__init__.py b/synapse/rest/admin/__init__.py index 5c2a2eb59..68a59a342 100644 --- a/synapse/rest/admin/__init__.py +++ b/synapse/rest/admin/__init__.py @@ -14,62 +14,39 @@ # See the License for the specific language governing permissions and # limitations under the License. -import hashlib -import hmac import logging import platform import re -from six import text_type -from six.moves import http_client - import synapse -from synapse.api.constants import Membership, UserTypes from synapse.api.errors import Codes, NotFoundError, SynapseError from synapse.http.server import JsonResource -from synapse.http.servlet import ( - RestServlet, - assert_params_in_dict, - parse_integer, - parse_json_object_from_request, - parse_string, -) +from synapse.http.servlet import RestServlet, parse_json_object_from_request from synapse.rest.admin._base import ( assert_requester_is_admin, - assert_user_is_admin, historical_admin_path_patterns, ) +from synapse.rest.admin.groups import DeleteGroupAdminRestServlet from synapse.rest.admin.media import ListMediaInRoom, register_servlets_for_media_repo from synapse.rest.admin.purge_room_servlet import PurgeRoomServlet +from synapse.rest.admin.rooms import ShutdownRoomRestServlet from synapse.rest.admin.server_notice_servlet import SendServerNoticeServlet -from synapse.rest.admin.users import UserAdminServlet -from synapse.types import UserID, create_requester -from synapse.util.async_helpers import maybe_awaitable +from synapse.rest.admin.users import ( + AccountValidityRenewServlet, + DeactivateAccountRestServlet, + GetUsersPaginatedRestServlet, + ResetPasswordRestServlet, + SearchUsersRestServlet, + UserAdminServlet, + UserRegisterServlet, + UsersRestServlet, + WhoisRestServlet, +) from synapse.util.versionstring import get_version_string logger = logging.getLogger(__name__) -class UsersRestServlet(RestServlet): - PATTERNS = historical_admin_path_patterns("/users/(?P[^/]*)$") - - def __init__(self, hs): - self.hs = hs - self.auth = hs.get_auth() - self.handlers = hs.get_handlers() - - async def on_GET(self, request, user_id): - target_user = UserID.from_string(user_id) - await assert_requester_is_admin(self.auth, request) - - if not self.hs.is_mine(target_user): - raise SynapseError(400, "Can only users a local user") - - ret = await self.handlers.admin_handler.get_users() - - return 200, ret - - class VersionServlet(RestServlet): PATTERNS = (re.compile("^/_synapse/admin/v1/server_version$"),) @@ -83,159 +60,6 @@ class VersionServlet(RestServlet): return 200, self.res -class UserRegisterServlet(RestServlet): - """ - Attributes: - NONCE_TIMEOUT (int): Seconds until a generated nonce won't be accepted - nonces (dict[str, int]): The nonces that we will accept. A dict of - nonce to the time it was generated, in int seconds. - """ - - PATTERNS = historical_admin_path_patterns("/register") - NONCE_TIMEOUT = 60 - - def __init__(self, hs): - self.handlers = hs.get_handlers() - self.reactor = hs.get_reactor() - self.nonces = {} - self.hs = hs - - def _clear_old_nonces(self): - """ - Clear out old nonces that are older than NONCE_TIMEOUT. - """ - now = int(self.reactor.seconds()) - - for k, v in list(self.nonces.items()): - if now - v > self.NONCE_TIMEOUT: - del self.nonces[k] - - def on_GET(self, request): - """ - Generate a new nonce. - """ - self._clear_old_nonces() - - nonce = self.hs.get_secrets().token_hex(64) - self.nonces[nonce] = int(self.reactor.seconds()) - return 200, {"nonce": nonce} - - async def on_POST(self, request): - self._clear_old_nonces() - - if not self.hs.config.registration_shared_secret: - raise SynapseError(400, "Shared secret registration is not enabled") - - body = parse_json_object_from_request(request) - - if "nonce" not in body: - raise SynapseError(400, "nonce must be specified", errcode=Codes.BAD_JSON) - - nonce = body["nonce"] - - if nonce not in self.nonces: - raise SynapseError(400, "unrecognised nonce") - - # Delete the nonce, so it can't be reused, even if it's invalid - del self.nonces[nonce] - - if "username" not in body: - raise SynapseError( - 400, "username must be specified", errcode=Codes.BAD_JSON - ) - else: - if ( - not isinstance(body["username"], text_type) - or len(body["username"]) > 512 - ): - raise SynapseError(400, "Invalid username") - - username = body["username"].encode("utf-8") - if b"\x00" in username: - raise SynapseError(400, "Invalid username") - - if "password" not in body: - raise SynapseError( - 400, "password must be specified", errcode=Codes.BAD_JSON - ) - else: - if ( - not isinstance(body["password"], text_type) - or len(body["password"]) > 512 - ): - raise SynapseError(400, "Invalid password") - - password = body["password"].encode("utf-8") - if b"\x00" in password: - raise SynapseError(400, "Invalid password") - - admin = body.get("admin", None) - user_type = body.get("user_type", None) - - if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES: - raise SynapseError(400, "Invalid user type") - - got_mac = body["mac"] - - want_mac = hmac.new( - key=self.hs.config.registration_shared_secret.encode(), - digestmod=hashlib.sha1, - ) - want_mac.update(nonce.encode("utf8")) - want_mac.update(b"\x00") - want_mac.update(username) - want_mac.update(b"\x00") - want_mac.update(password) - want_mac.update(b"\x00") - want_mac.update(b"admin" if admin else b"notadmin") - if user_type: - want_mac.update(b"\x00") - want_mac.update(user_type.encode("utf8")) - want_mac = want_mac.hexdigest() - - if not hmac.compare_digest(want_mac.encode("ascii"), got_mac.encode("ascii")): - raise SynapseError(403, "HMAC incorrect") - - # Reuse the parts of RegisterRestServlet to reduce code duplication - from synapse.rest.client.v2_alpha.register import RegisterRestServlet - - register = RegisterRestServlet(self.hs) - - user_id = await register.registration_handler.register_user( - localpart=body["username"].lower(), - password=body["password"], - admin=bool(admin), - user_type=user_type, - ) - - result = await register._create_registration_details(user_id, body) - return 200, result - - -class WhoisRestServlet(RestServlet): - PATTERNS = historical_admin_path_patterns("/whois/(?P[^/]*)") - - def __init__(self, hs): - self.hs = hs - self.auth = hs.get_auth() - self.handlers = hs.get_handlers() - - async def on_GET(self, request, user_id): - target_user = UserID.from_string(user_id) - requester = await self.auth.get_user_by_req(request) - auth_user = requester.user - - if target_user != auth_user: - await assert_user_is_admin(self.auth, auth_user) - - if not self.hs.is_mine(target_user): - raise SynapseError(400, "Can only whois a local user") - - ret = await self.handlers.admin_handler.get_whois(target_user) - - return 200, ret - - class PurgeHistoryRestServlet(RestServlet): PATTERNS = historical_admin_path_patterns( "/purge_history/(?P[^/]*)(/(?P[^/]+))?" @@ -342,369 +166,6 @@ class PurgeHistoryStatusRestServlet(RestServlet): return 200, purge_status.asdict() -class DeactivateAccountRestServlet(RestServlet): - PATTERNS = historical_admin_path_patterns("/deactivate/(?P[^/]*)") - - def __init__(self, hs): - self._deactivate_account_handler = hs.get_deactivate_account_handler() - self.auth = hs.get_auth() - - async def on_POST(self, request, target_user_id): - await assert_requester_is_admin(self.auth, request) - body = parse_json_object_from_request(request, allow_empty_body=True) - erase = body.get("erase", False) - if not isinstance(erase, bool): - raise SynapseError( - http_client.BAD_REQUEST, - "Param 'erase' must be a boolean, if given", - Codes.BAD_JSON, - ) - - UserID.from_string(target_user_id) - - result = await self._deactivate_account_handler.deactivate_account( - target_user_id, erase - ) - if result: - id_server_unbind_result = "success" - else: - id_server_unbind_result = "no-support" - - return 200, {"id_server_unbind_result": id_server_unbind_result} - - -class ShutdownRoomRestServlet(RestServlet): - """Shuts down a room by removing all local users from the room and blocking - all future invites and joins to the room. Any local aliases will be repointed - to a new room created by `new_room_user_id` and kicked users will be auto - joined to the new room. - """ - - PATTERNS = historical_admin_path_patterns("/shutdown_room/(?P[^/]+)") - - DEFAULT_MESSAGE = ( - "Sharing illegal content on this server is not permitted and rooms in" - " violation will be blocked." - ) - - def __init__(self, hs): - self.hs = hs - self.store = hs.get_datastore() - self.state = hs.get_state_handler() - self._room_creation_handler = hs.get_room_creation_handler() - self.event_creation_handler = hs.get_event_creation_handler() - self.room_member_handler = hs.get_room_member_handler() - self.auth = hs.get_auth() - - async def on_POST(self, request, room_id): - requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) - - content = parse_json_object_from_request(request) - assert_params_in_dict(content, ["new_room_user_id"]) - new_room_user_id = content["new_room_user_id"] - - room_creator_requester = create_requester(new_room_user_id) - - message = content.get("message", self.DEFAULT_MESSAGE) - room_name = content.get("room_name", "Content Violation Notification") - - info = await self._room_creation_handler.create_room( - room_creator_requester, - config={ - "preset": "public_chat", - "name": room_name, - "power_level_content_override": {"users_default": -10}, - }, - ratelimit=False, - ) - new_room_id = info["room_id"] - - requester_user_id = requester.user.to_string() - - logger.info( - "Shutting down room %r, joining to new room: %r", room_id, new_room_id - ) - - # This will work even if the room is already blocked, but that is - # desirable in case the first attempt at blocking the room failed below. - await self.store.block_room(room_id, requester_user_id) - - users = await self.state.get_current_users_in_room(room_id) - kicked_users = [] - failed_to_kick_users = [] - for user_id in users: - if not self.hs.is_mine_id(user_id): - continue - - logger.info("Kicking %r from %r...", user_id, room_id) - - try: - target_requester = create_requester(user_id) - await self.room_member_handler.update_membership( - requester=target_requester, - target=target_requester.user, - room_id=room_id, - action=Membership.LEAVE, - content={}, - ratelimit=False, - require_consent=False, - ) - - await self.room_member_handler.forget(target_requester.user, room_id) - - await self.room_member_handler.update_membership( - requester=target_requester, - target=target_requester.user, - room_id=new_room_id, - action=Membership.JOIN, - content={}, - ratelimit=False, - require_consent=False, - ) - - kicked_users.append(user_id) - except Exception: - logger.exception( - "Failed to leave old room and join new room for %r", user_id - ) - failed_to_kick_users.append(user_id) - - await self.event_creation_handler.create_and_send_nonmember_event( - room_creator_requester, - { - "type": "m.room.message", - "content": {"body": message, "msgtype": "m.text"}, - "room_id": new_room_id, - "sender": new_room_user_id, - }, - ratelimit=False, - ) - - aliases_for_room = await maybe_awaitable( - self.store.get_aliases_for_room(room_id) - ) - - await self.store.update_aliases_for_room( - room_id, new_room_id, requester_user_id - ) - - return ( - 200, - { - "kicked_users": kicked_users, - "failed_to_kick_users": failed_to_kick_users, - "local_aliases": aliases_for_room, - "new_room_id": new_room_id, - }, - ) - - -class ResetPasswordRestServlet(RestServlet): - """Post request to allow an administrator reset password for a user. - This needs user to have administrator access in Synapse. - Example: - http://localhost:8008/_synapse/admin/v1/reset_password/ - @user:to_reset_password?access_token=admin_access_token - JsonBodyToSend: - { - "new_password": "secret" - } - Returns: - 200 OK with empty object if success otherwise an error. - """ - - PATTERNS = historical_admin_path_patterns( - "/reset_password/(?P[^/]*)" - ) - - def __init__(self, hs): - self.store = hs.get_datastore() - self.hs = hs - self.auth = hs.get_auth() - self._set_password_handler = hs.get_set_password_handler() - - async def on_POST(self, request, target_user_id): - """Post request to allow an administrator reset password for a user. - This needs user to have administrator access in Synapse. - """ - requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) - - UserID.from_string(target_user_id) - - params = parse_json_object_from_request(request) - assert_params_in_dict(params, ["new_password"]) - new_password = params["new_password"] - - await self._set_password_handler.set_password( - target_user_id, new_password, requester - ) - return 200, {} - - -class GetUsersPaginatedRestServlet(RestServlet): - """Get request to get specific number of users from Synapse. - This needs user to have administrator access in Synapse. - Example: - http://localhost:8008/_synapse/admin/v1/users_paginate/ - @admin:user?access_token=admin_access_token&start=0&limit=10 - Returns: - 200 OK with json object {list[dict[str, Any]], count} or empty object. - """ - - PATTERNS = historical_admin_path_patterns( - "/users_paginate/(?P[^/]*)" - ) - - def __init__(self, hs): - self.store = hs.get_datastore() - self.hs = hs - self.auth = hs.get_auth() - self.handlers = hs.get_handlers() - - async def on_GET(self, request, target_user_id): - """Get request to get specific number of users from Synapse. - This needs user to have administrator access in Synapse. - """ - await assert_requester_is_admin(self.auth, request) - - target_user = UserID.from_string(target_user_id) - - if not self.hs.is_mine(target_user): - raise SynapseError(400, "Can only users a local user") - - order = "name" # order by name in user table - start = parse_integer(request, "start", required=True) - limit = parse_integer(request, "limit", required=True) - - logger.info("limit: %s, start: %s", limit, start) - - ret = await self.handlers.admin_handler.get_users_paginate(order, start, limit) - return 200, ret - - async def on_POST(self, request, target_user_id): - """Post request to get specific number of users from Synapse.. - This needs user to have administrator access in Synapse. - Example: - http://localhost:8008/_synapse/admin/v1/users_paginate/ - @admin:user?access_token=admin_access_token - JsonBodyToSend: - { - "start": "0", - "limit": "10 - } - Returns: - 200 OK with json object {list[dict[str, Any]], count} or empty object. - """ - await assert_requester_is_admin(self.auth, request) - UserID.from_string(target_user_id) - - order = "name" # order by name in user table - params = parse_json_object_from_request(request) - assert_params_in_dict(params, ["limit", "start"]) - limit = params["limit"] - start = params["start"] - logger.info("limit: %s, start: %s", limit, start) - - ret = await self.handlers.admin_handler.get_users_paginate(order, start, limit) - return 200, ret - - -class SearchUsersRestServlet(RestServlet): - """Get request to search user table for specific users according to - search term. - This needs user to have administrator access in Synapse. - Example: - http://localhost:8008/_synapse/admin/v1/search_users/ - @admin:user?access_token=admin_access_token&term=alice - Returns: - 200 OK with json object {list[dict[str, Any]], count} or empty object. - """ - - PATTERNS = historical_admin_path_patterns("/search_users/(?P[^/]*)") - - def __init__(self, hs): - self.store = hs.get_datastore() - self.hs = hs - self.auth = hs.get_auth() - self.handlers = hs.get_handlers() - - async def on_GET(self, request, target_user_id): - """Get request to search user table for specific users according to - search term. - This needs user to have a administrator access in Synapse. - """ - await assert_requester_is_admin(self.auth, request) - - target_user = UserID.from_string(target_user_id) - - # To allow all users to get the users list - # if not is_admin and target_user != auth_user: - # raise AuthError(403, "You are not a server admin") - - if not self.hs.is_mine(target_user): - raise SynapseError(400, "Can only users a local user") - - term = parse_string(request, "term", required=True) - logger.info("term: %s ", term) - - ret = await self.handlers.admin_handler.search_users(term) - return 200, ret - - -class DeleteGroupAdminRestServlet(RestServlet): - """Allows deleting of local groups - """ - - PATTERNS = historical_admin_path_patterns("/delete_group/(?P[^/]*)") - - def __init__(self, hs): - self.group_server = hs.get_groups_server_handler() - self.is_mine_id = hs.is_mine_id - self.auth = hs.get_auth() - - async def on_POST(self, request, group_id): - requester = await self.auth.get_user_by_req(request) - await assert_user_is_admin(self.auth, requester.user) - - if not self.is_mine_id(group_id): - raise SynapseError(400, "Can only delete local groups") - - await self.group_server.delete_group(group_id, requester.user.to_string()) - return 200, {} - - -class AccountValidityRenewServlet(RestServlet): - PATTERNS = historical_admin_path_patterns("/account_validity/validity$") - - def __init__(self, hs): - """ - Args: - hs (synapse.server.HomeServer): server - """ - self.hs = hs - self.account_activity_handler = hs.get_account_validity_handler() - self.auth = hs.get_auth() - - async def on_POST(self, request): - await assert_requester_is_admin(self.auth, request) - - body = parse_json_object_from_request(request) - - if "user_id" not in body: - raise SynapseError(400, "Missing property 'user_id' in the request body") - - expiration_ts = await self.account_activity_handler.renew_account_for_user( - body["user_id"], - body.get("expiration_ts"), - not body.get("enable_renewal_emails", True), - ) - - res = {"expiration_ts": expiration_ts} - return 200, res - - ######################################################################################## # # please don't add more servlets here: this file is already long and unwieldy. Put diff --git a/synapse/rest/admin/groups.py b/synapse/rest/admin/groups.py new file mode 100644 index 000000000..0b54ca09f --- /dev/null +++ b/synapse/rest/admin/groups.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +from synapse.api.errors import SynapseError +from synapse.http.servlet import RestServlet +from synapse.rest.admin._base import ( + assert_user_is_admin, + historical_admin_path_patterns, +) + +logger = logging.getLogger(__name__) + + +class DeleteGroupAdminRestServlet(RestServlet): + """Allows deleting of local groups + """ + + PATTERNS = historical_admin_path_patterns("/delete_group/(?P[^/]*)") + + def __init__(self, hs): + self.group_server = hs.get_groups_server_handler() + self.is_mine_id = hs.is_mine_id + self.auth = hs.get_auth() + + async def on_POST(self, request, group_id): + requester = await self.auth.get_user_by_req(request) + await assert_user_is_admin(self.auth, requester.user) + + if not self.is_mine_id(group_id): + raise SynapseError(400, "Can only delete local groups") + + await self.group_server.delete_group(group_id, requester.user.to_string()) + return 200, {} diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py new file mode 100644 index 000000000..f7cc5e9be --- /dev/null +++ b/synapse/rest/admin/rooms.py @@ -0,0 +1,157 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +import logging + +from synapse.api.constants import Membership +from synapse.http.servlet import ( + RestServlet, + assert_params_in_dict, + parse_json_object_from_request, +) +from synapse.rest.admin._base import ( + assert_user_is_admin, + historical_admin_path_patterns, +) +from synapse.types import create_requester +from synapse.util.async_helpers import maybe_awaitable + +logger = logging.getLogger(__name__) + + +class ShutdownRoomRestServlet(RestServlet): + """Shuts down a room by removing all local users from the room and blocking + all future invites and joins to the room. Any local aliases will be repointed + to a new room created by `new_room_user_id` and kicked users will be auto + joined to the new room. + """ + + PATTERNS = historical_admin_path_patterns("/shutdown_room/(?P[^/]+)") + + DEFAULT_MESSAGE = ( + "Sharing illegal content on this server is not permitted and rooms in" + " violation will be blocked." + ) + + def __init__(self, hs): + self.hs = hs + self.store = hs.get_datastore() + self.state = hs.get_state_handler() + self._room_creation_handler = hs.get_room_creation_handler() + self.event_creation_handler = hs.get_event_creation_handler() + self.room_member_handler = hs.get_room_member_handler() + self.auth = hs.get_auth() + + async def on_POST(self, request, room_id): + requester = await self.auth.get_user_by_req(request) + await assert_user_is_admin(self.auth, requester.user) + + content = parse_json_object_from_request(request) + assert_params_in_dict(content, ["new_room_user_id"]) + new_room_user_id = content["new_room_user_id"] + + room_creator_requester = create_requester(new_room_user_id) + + message = content.get("message", self.DEFAULT_MESSAGE) + room_name = content.get("room_name", "Content Violation Notification") + + info = await self._room_creation_handler.create_room( + room_creator_requester, + config={ + "preset": "public_chat", + "name": room_name, + "power_level_content_override": {"users_default": -10}, + }, + ratelimit=False, + ) + new_room_id = info["room_id"] + + requester_user_id = requester.user.to_string() + + logger.info( + "Shutting down room %r, joining to new room: %r", room_id, new_room_id + ) + + # This will work even if the room is already blocked, but that is + # desirable in case the first attempt at blocking the room failed below. + await self.store.block_room(room_id, requester_user_id) + + users = await self.state.get_current_users_in_room(room_id) + kicked_users = [] + failed_to_kick_users = [] + for user_id in users: + if not self.hs.is_mine_id(user_id): + continue + + logger.info("Kicking %r from %r...", user_id, room_id) + + try: + target_requester = create_requester(user_id) + await self.room_member_handler.update_membership( + requester=target_requester, + target=target_requester.user, + room_id=room_id, + action=Membership.LEAVE, + content={}, + ratelimit=False, + require_consent=False, + ) + + await self.room_member_handler.forget(target_requester.user, room_id) + + await self.room_member_handler.update_membership( + requester=target_requester, + target=target_requester.user, + room_id=new_room_id, + action=Membership.JOIN, + content={}, + ratelimit=False, + require_consent=False, + ) + + kicked_users.append(user_id) + except Exception: + logger.exception( + "Failed to leave old room and join new room for %r", user_id + ) + failed_to_kick_users.append(user_id) + + await self.event_creation_handler.create_and_send_nonmember_event( + room_creator_requester, + { + "type": "m.room.message", + "content": {"body": message, "msgtype": "m.text"}, + "room_id": new_room_id, + "sender": new_room_user_id, + }, + ratelimit=False, + ) + + aliases_for_room = await maybe_awaitable( + self.store.get_aliases_for_room(room_id) + ) + + await self.store.update_aliases_for_room( + room_id, new_room_id, requester_user_id + ) + + return ( + 200, + { + "kicked_users": kicked_users, + "failed_to_kick_users": failed_to_kick_users, + "local_aliases": aliases_for_room, + "new_room_id": new_room_id, + }, + ) diff --git a/synapse/rest/admin/users.py b/synapse/rest/admin/users.py index d5d124a0d..58a83f93a 100644 --- a/synapse/rest/admin/users.py +++ b/synapse/rest/admin/users.py @@ -12,17 +12,419 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +import hashlib +import hmac +import logging import re -from synapse.api.errors import SynapseError +from six import text_type +from six.moves import http_client + +from synapse.api.constants import UserTypes +from synapse.api.errors import Codes, SynapseError from synapse.http.servlet import ( RestServlet, assert_params_in_dict, + parse_integer, parse_json_object_from_request, + parse_string, +) +from synapse.rest.admin._base import ( + assert_requester_is_admin, + assert_user_is_admin, + historical_admin_path_patterns, ) -from synapse.rest.admin import assert_requester_is_admin, assert_user_is_admin from synapse.types import UserID +logger = logging.getLogger(__name__) + + +class UsersRestServlet(RestServlet): + PATTERNS = historical_admin_path_patterns("/users/(?P[^/]*)$") + + def __init__(self, hs): + self.hs = hs + self.auth = hs.get_auth() + self.admin_handler = hs.get_handlers().admin_handler + + async def on_GET(self, request, user_id): + target_user = UserID.from_string(user_id) + await assert_requester_is_admin(self.auth, request) + + if not self.hs.is_mine(target_user): + raise SynapseError(400, "Can only users a local user") + + ret = await self.admin_handler.get_users() + + return 200, ret + + +class GetUsersPaginatedRestServlet(RestServlet): + """Get request to get specific number of users from Synapse. + This needs user to have administrator access in Synapse. + Example: + http://localhost:8008/_synapse/admin/v1/users_paginate/ + @admin:user?access_token=admin_access_token&start=0&limit=10 + Returns: + 200 OK with json object {list[dict[str, Any]], count} or empty object. + """ + + PATTERNS = historical_admin_path_patterns( + "/users_paginate/(?P[^/]*)" + ) + + def __init__(self, hs): + self.store = hs.get_datastore() + self.hs = hs + self.auth = hs.get_auth() + self.handlers = hs.get_handlers() + + async def on_GET(self, request, target_user_id): + """Get request to get specific number of users from Synapse. + This needs user to have administrator access in Synapse. + """ + await assert_requester_is_admin(self.auth, request) + + target_user = UserID.from_string(target_user_id) + + if not self.hs.is_mine(target_user): + raise SynapseError(400, "Can only users a local user") + + order = "name" # order by name in user table + start = parse_integer(request, "start", required=True) + limit = parse_integer(request, "limit", required=True) + + logger.info("limit: %s, start: %s", limit, start) + + ret = await self.handlers.admin_handler.get_users_paginate(order, start, limit) + return 200, ret + + async def on_POST(self, request, target_user_id): + """Post request to get specific number of users from Synapse.. + This needs user to have administrator access in Synapse. + Example: + http://localhost:8008/_synapse/admin/v1/users_paginate/ + @admin:user?access_token=admin_access_token + JsonBodyToSend: + { + "start": "0", + "limit": "10 + } + Returns: + 200 OK with json object {list[dict[str, Any]], count} or empty object. + """ + await assert_requester_is_admin(self.auth, request) + UserID.from_string(target_user_id) + + order = "name" # order by name in user table + params = parse_json_object_from_request(request) + assert_params_in_dict(params, ["limit", "start"]) + limit = params["limit"] + start = params["start"] + logger.info("limit: %s, start: %s", limit, start) + + ret = await self.handlers.admin_handler.get_users_paginate(order, start, limit) + return 200, ret + + +class UserRegisterServlet(RestServlet): + """ + Attributes: + NONCE_TIMEOUT (int): Seconds until a generated nonce won't be accepted + nonces (dict[str, int]): The nonces that we will accept. A dict of + nonce to the time it was generated, in int seconds. + """ + + PATTERNS = historical_admin_path_patterns("/register") + NONCE_TIMEOUT = 60 + + def __init__(self, hs): + self.handlers = hs.get_handlers() + self.reactor = hs.get_reactor() + self.nonces = {} + self.hs = hs + + def _clear_old_nonces(self): + """ + Clear out old nonces that are older than NONCE_TIMEOUT. + """ + now = int(self.reactor.seconds()) + + for k, v in list(self.nonces.items()): + if now - v > self.NONCE_TIMEOUT: + del self.nonces[k] + + def on_GET(self, request): + """ + Generate a new nonce. + """ + self._clear_old_nonces() + + nonce = self.hs.get_secrets().token_hex(64) + self.nonces[nonce] = int(self.reactor.seconds()) + return 200, {"nonce": nonce} + + async def on_POST(self, request): + self._clear_old_nonces() + + if not self.hs.config.registration_shared_secret: + raise SynapseError(400, "Shared secret registration is not enabled") + + body = parse_json_object_from_request(request) + + if "nonce" not in body: + raise SynapseError(400, "nonce must be specified", errcode=Codes.BAD_JSON) + + nonce = body["nonce"] + + if nonce not in self.nonces: + raise SynapseError(400, "unrecognised nonce") + + # Delete the nonce, so it can't be reused, even if it's invalid + del self.nonces[nonce] + + if "username" not in body: + raise SynapseError( + 400, "username must be specified", errcode=Codes.BAD_JSON + ) + else: + if ( + not isinstance(body["username"], text_type) + or len(body["username"]) > 512 + ): + raise SynapseError(400, "Invalid username") + + username = body["username"].encode("utf-8") + if b"\x00" in username: + raise SynapseError(400, "Invalid username") + + if "password" not in body: + raise SynapseError( + 400, "password must be specified", errcode=Codes.BAD_JSON + ) + else: + if ( + not isinstance(body["password"], text_type) + or len(body["password"]) > 512 + ): + raise SynapseError(400, "Invalid password") + + password = body["password"].encode("utf-8") + if b"\x00" in password: + raise SynapseError(400, "Invalid password") + + admin = body.get("admin", None) + user_type = body.get("user_type", None) + + if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES: + raise SynapseError(400, "Invalid user type") + + got_mac = body["mac"] + + want_mac = hmac.new( + key=self.hs.config.registration_shared_secret.encode(), + digestmod=hashlib.sha1, + ) + want_mac.update(nonce.encode("utf8")) + want_mac.update(b"\x00") + want_mac.update(username) + want_mac.update(b"\x00") + want_mac.update(password) + want_mac.update(b"\x00") + want_mac.update(b"admin" if admin else b"notadmin") + if user_type: + want_mac.update(b"\x00") + want_mac.update(user_type.encode("utf8")) + want_mac = want_mac.hexdigest() + + if not hmac.compare_digest(want_mac.encode("ascii"), got_mac.encode("ascii")): + raise SynapseError(403, "HMAC incorrect") + + # Reuse the parts of RegisterRestServlet to reduce code duplication + from synapse.rest.client.v2_alpha.register import RegisterRestServlet + + register = RegisterRestServlet(self.hs) + + user_id = await register.registration_handler.register_user( + localpart=body["username"].lower(), + password=body["password"], + admin=bool(admin), + user_type=user_type, + ) + + result = await register._create_registration_details(user_id, body) + return 200, result + + +class WhoisRestServlet(RestServlet): + PATTERNS = historical_admin_path_patterns("/whois/(?P[^/]*)") + + def __init__(self, hs): + self.hs = hs + self.auth = hs.get_auth() + self.handlers = hs.get_handlers() + + async def on_GET(self, request, user_id): + target_user = UserID.from_string(user_id) + requester = await self.auth.get_user_by_req(request) + auth_user = requester.user + + if target_user != auth_user: + await assert_user_is_admin(self.auth, auth_user) + + if not self.hs.is_mine(target_user): + raise SynapseError(400, "Can only whois a local user") + + ret = await self.handlers.admin_handler.get_whois(target_user) + + return 200, ret + + +class DeactivateAccountRestServlet(RestServlet): + PATTERNS = historical_admin_path_patterns("/deactivate/(?P[^/]*)") + + def __init__(self, hs): + self._deactivate_account_handler = hs.get_deactivate_account_handler() + self.auth = hs.get_auth() + + async def on_POST(self, request, target_user_id): + await assert_requester_is_admin(self.auth, request) + body = parse_json_object_from_request(request, allow_empty_body=True) + erase = body.get("erase", False) + if not isinstance(erase, bool): + raise SynapseError( + http_client.BAD_REQUEST, + "Param 'erase' must be a boolean, if given", + Codes.BAD_JSON, + ) + + UserID.from_string(target_user_id) + + result = await self._deactivate_account_handler.deactivate_account( + target_user_id, erase + ) + if result: + id_server_unbind_result = "success" + else: + id_server_unbind_result = "no-support" + + return 200, {"id_server_unbind_result": id_server_unbind_result} + + +class AccountValidityRenewServlet(RestServlet): + PATTERNS = historical_admin_path_patterns("/account_validity/validity$") + + def __init__(self, hs): + """ + Args: + hs (synapse.server.HomeServer): server + """ + self.hs = hs + self.account_activity_handler = hs.get_account_validity_handler() + self.auth = hs.get_auth() + + async def on_POST(self, request): + await assert_requester_is_admin(self.auth, request) + + body = parse_json_object_from_request(request) + + if "user_id" not in body: + raise SynapseError(400, "Missing property 'user_id' in the request body") + + expiration_ts = await self.account_activity_handler.renew_account_for_user( + body["user_id"], + body.get("expiration_ts"), + not body.get("enable_renewal_emails", True), + ) + + res = {"expiration_ts": expiration_ts} + return 200, res + + +class ResetPasswordRestServlet(RestServlet): + """Post request to allow an administrator reset password for a user. + This needs user to have administrator access in Synapse. + Example: + http://localhost:8008/_synapse/admin/v1/reset_password/ + @user:to_reset_password?access_token=admin_access_token + JsonBodyToSend: + { + "new_password": "secret" + } + Returns: + 200 OK with empty object if success otherwise an error. + """ + + PATTERNS = historical_admin_path_patterns( + "/reset_password/(?P[^/]*)" + ) + + def __init__(self, hs): + self.store = hs.get_datastore() + self.hs = hs + self.auth = hs.get_auth() + self._set_password_handler = hs.get_set_password_handler() + + async def on_POST(self, request, target_user_id): + """Post request to allow an administrator reset password for a user. + This needs user to have administrator access in Synapse. + """ + requester = await self.auth.get_user_by_req(request) + await assert_user_is_admin(self.auth, requester.user) + + UserID.from_string(target_user_id) + + params = parse_json_object_from_request(request) + assert_params_in_dict(params, ["new_password"]) + new_password = params["new_password"] + + await self._set_password_handler.set_password( + target_user_id, new_password, requester + ) + return 200, {} + + +class SearchUsersRestServlet(RestServlet): + """Get request to search user table for specific users according to + search term. + This needs user to have administrator access in Synapse. + Example: + http://localhost:8008/_synapse/admin/v1/search_users/ + @admin:user?access_token=admin_access_token&term=alice + Returns: + 200 OK with json object {list[dict[str, Any]], count} or empty object. + """ + + PATTERNS = historical_admin_path_patterns("/search_users/(?P[^/]*)") + + def __init__(self, hs): + self.store = hs.get_datastore() + self.hs = hs + self.auth = hs.get_auth() + self.handlers = hs.get_handlers() + + async def on_GET(self, request, target_user_id): + """Get request to search user table for specific users according to + search term. + This needs user to have a administrator access in Synapse. + """ + await assert_requester_is_admin(self.auth, request) + + target_user = UserID.from_string(target_user_id) + + # To allow all users to get the users list + # if not is_admin and target_user != auth_user: + # raise AuthError(403, "You are not a server admin") + + if not self.hs.is_mine(target_user): + raise SynapseError(400, "Can only users a local user") + + term = parse_string(request, "term", required=True) + logger.info("term: %s ", term) + + ret = await self.handlers.admin_handler.search_users(term) + return 200, ret + class UserAdminServlet(RestServlet): """ From 6356f2088f0adb681fe24a8435955b19883fa3b4 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 20 Nov 2019 12:09:06 +0000 Subject: [PATCH 089/133] Test if a purge can make /messages return 500 responses --- tests/rest/client/v1/test_rooms.py | 72 ++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index 5e38fd6ce..ebaa67e89 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -25,7 +25,9 @@ from twisted.internet import defer import synapse.rest.admin from synapse.api.constants import EventContentFields, EventTypes, Membership +from synapse.handlers.pagination import PurgeStatus from synapse.rest.client.v1 import login, profile, room +from synapse.util.stringutils import random_string from tests import unittest @@ -910,6 +912,76 @@ class RoomMessageListTestCase(RoomBase): return channel.json_body["chunk"] + def test_room_messages_purge(self): + store = self.hs.get_datastore() + pagination_handler = self.hs.get_pagination_handler() + + # Send a first message in the room, which will be removed by the purge. + first_event_id = self.helper.send(self.room_id, "message 1")["event_id"] + first_token = self.get_success( + store.get_topological_token_for_event(first_event_id) + ) + + # Send a second message in the room, which won't be removed, and which we'll + # use as the marker to purge events before. + second_event_id = self.helper.send(self.room_id, "message 2")["event_id"] + second_token = self.get_success( + store.get_topological_token_for_event(second_event_id) + ) + + # Send a third event in the room to ensure we don't fall under any edge case + # due to our marker being the latest forward extremity in the room. + self.helper.send(self.room_id, "message 3") + + # Check that we get the first and second message when querying /messages. + request, channel = self.make_request( + "GET", + "/rooms/%s/messages?access_token=x&from=%s&dir=b&filter=%s" + % (self.room_id, second_token, json.dumps({"types": [EventTypes.Message]})), + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.json_body) + + chunk = channel.json_body["chunk"] + self.assertEqual(len(chunk), 2, [event["content"] for event in chunk]) + + # Purge every event before the second event. + purge_id = random_string(16) + pagination_handler._purges_by_id[purge_id] = PurgeStatus() + self.get_success(pagination_handler._purge_history( + purge_id=purge_id, + room_id=self.room_id, + token=second_token, + delete_local_events=True, + )) + + # Check that we only get the second message through /message now that the first + # has been purged. + request, channel = self.make_request( + "GET", + "/rooms/%s/messages?access_token=x&from=%s&dir=b&filter=%s" + % (self.room_id, second_token, json.dumps({"types": [EventTypes.Message]})), + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.json_body) + + chunk = channel.json_body["chunk"] + self.assertEqual(len(chunk), 1, [event["content"] for event in chunk]) + + # Check that we get no event, but also no error, when querying /messages with + # the token that was pointing at the first event, because we don't have it + # anymore. + request, channel = self.make_request( + "GET", + "/rooms/%s/messages?access_token=x&from=%s&dir=b&filter=%s" + % (self.room_id, first_token, json.dumps({"types": [EventTypes.Message]})), + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.json_body) + + chunk = channel.json_body["chunk"] + self.assertEqual(len(chunk), 0, [event["content"] for event in chunk]) + class RoomSearchTestCase(unittest.HomeserverTestCase): servlets = [ From 234f55f3c4295f08399b725cda8a8aa4b559f1f5 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 20 Nov 2019 13:32:31 +0000 Subject: [PATCH 090/133] Docker: Change permissions for data dir before attempting to write to it (#6389) --- changelog.d/6389.bugfix | 1 + docker/start.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) create mode 100644 changelog.d/6389.bugfix diff --git a/changelog.d/6389.bugfix b/changelog.d/6389.bugfix new file mode 100644 index 000000000..c553622b0 --- /dev/null +++ b/changelog.d/6389.bugfix @@ -0,0 +1 @@ +Fix permission denied error when trying to generate a config file with the docker image. \ No newline at end of file diff --git a/docker/start.py b/docker/start.py index 6e1cb807a..97fd247f8 100755 --- a/docker/start.py +++ b/docker/start.py @@ -169,11 +169,11 @@ def run_generate_config(environ, ownership): # log("running %s" % (args, )) if ownership is not None: - args = ["su-exec", ownership] + args - os.execv("/sbin/su-exec", args) - # make sure that synapse has perms to write to the data dir. subprocess.check_output(["chown", ownership, data_dir]) + + args = ["su-exec", ownership] + args + os.execv("/sbin/su-exec", args) else: os.execv("/usr/local/bin/python", args) From 41e4566682946fca8600214969c4e9562b5a9315 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Wed, 20 Nov 2019 14:12:42 +0000 Subject: [PATCH 091/133] 1.6.0rc1 --- CHANGES.md | 74 ++++++++++++++++++++++++++++++++++++++++ changelog.d/5727.feature | 1 - changelog.d/6140.misc | 1 - changelog.d/6164.doc | 1 - changelog.d/6213.bugfix | 1 - changelog.d/6218.misc | 1 - changelog.d/6220.feature | 1 - changelog.d/6232.bugfix | 1 - changelog.d/6235.bugfix | 1 - changelog.d/6238.feature | 1 - changelog.d/6240.misc | 1 - changelog.d/6250.misc | 1 - changelog.d/6251.misc | 1 - changelog.d/6253.bugfix | 1 - changelog.d/6254.bugfix | 1 - changelog.d/6257.doc | 1 - changelog.d/6259.misc | 1 - changelog.d/6263.misc | 1 - changelog.d/6269.misc | 1 - changelog.d/6270.misc | 1 - changelog.d/6271.misc | 1 - changelog.d/6272.doc | 1 - changelog.d/6273.doc | 1 - changelog.d/6274.misc | 1 - changelog.d/6275.misc | 1 - changelog.d/6276.misc | 1 - changelog.d/6277.misc | 1 - changelog.d/6278.bugfix | 1 - changelog.d/6279.misc | 1 - changelog.d/6280.misc | 1 - changelog.d/6284.bugfix | 1 - changelog.d/6291.misc | 1 - changelog.d/6294.misc | 1 - changelog.d/6295.misc | 1 - changelog.d/6298.misc | 1 - changelog.d/6300.misc | 1 - changelog.d/6301.feature | 1 - changelog.d/6304.misc | 1 - changelog.d/6305.misc | 1 - changelog.d/6306.bugfix | 1 - changelog.d/6307.bugfix | 1 - changelog.d/6308.misc | 1 - changelog.d/6310.feature | 1 - changelog.d/6312.misc | 1 - changelog.d/6313.bugfix | 1 - changelog.d/6314.misc | 1 - changelog.d/6317.misc | 1 - changelog.d/6318.misc | 1 - changelog.d/6319.misc | 1 - changelog.d/6320.bugfix | 1 - changelog.d/6330.misc | 1 - changelog.d/6335.bugfix | 1 - changelog.d/6336.misc | 1 - changelog.d/6338.bugfix | 1 - changelog.d/6340.feature | 1 - changelog.d/6341.misc | 1 - changelog.d/6357.misc | 1 - changelog.d/6359.bugfix | 1 - changelog.d/6361.misc | 1 - changelog.d/6363.bugfix | 1 - changelog.d/6389.bugfix | 1 - synapse/__init__.py | 2 +- 62 files changed, 75 insertions(+), 61 deletions(-) delete mode 100644 changelog.d/5727.feature delete mode 100644 changelog.d/6140.misc delete mode 100644 changelog.d/6164.doc delete mode 100644 changelog.d/6213.bugfix delete mode 100644 changelog.d/6218.misc delete mode 100644 changelog.d/6220.feature delete mode 100644 changelog.d/6232.bugfix delete mode 100644 changelog.d/6235.bugfix delete mode 100644 changelog.d/6238.feature delete mode 100644 changelog.d/6240.misc delete mode 100644 changelog.d/6250.misc delete mode 100644 changelog.d/6251.misc delete mode 100644 changelog.d/6253.bugfix delete mode 100644 changelog.d/6254.bugfix delete mode 100644 changelog.d/6257.doc delete mode 100644 changelog.d/6259.misc delete mode 100644 changelog.d/6263.misc delete mode 100644 changelog.d/6269.misc delete mode 100644 changelog.d/6270.misc delete mode 100644 changelog.d/6271.misc delete mode 100644 changelog.d/6272.doc delete mode 100644 changelog.d/6273.doc delete mode 100644 changelog.d/6274.misc delete mode 100644 changelog.d/6275.misc delete mode 100644 changelog.d/6276.misc delete mode 100644 changelog.d/6277.misc delete mode 100644 changelog.d/6278.bugfix delete mode 100644 changelog.d/6279.misc delete mode 100644 changelog.d/6280.misc delete mode 100644 changelog.d/6284.bugfix delete mode 100644 changelog.d/6291.misc delete mode 100644 changelog.d/6294.misc delete mode 100644 changelog.d/6295.misc delete mode 100644 changelog.d/6298.misc delete mode 100644 changelog.d/6300.misc delete mode 100644 changelog.d/6301.feature delete mode 100644 changelog.d/6304.misc delete mode 100644 changelog.d/6305.misc delete mode 100644 changelog.d/6306.bugfix delete mode 100644 changelog.d/6307.bugfix delete mode 100644 changelog.d/6308.misc delete mode 100644 changelog.d/6310.feature delete mode 100644 changelog.d/6312.misc delete mode 100644 changelog.d/6313.bugfix delete mode 100644 changelog.d/6314.misc delete mode 100644 changelog.d/6317.misc delete mode 100644 changelog.d/6318.misc delete mode 100644 changelog.d/6319.misc delete mode 100644 changelog.d/6320.bugfix delete mode 100644 changelog.d/6330.misc delete mode 100644 changelog.d/6335.bugfix delete mode 100644 changelog.d/6336.misc delete mode 100644 changelog.d/6338.bugfix delete mode 100644 changelog.d/6340.feature delete mode 100644 changelog.d/6341.misc delete mode 100644 changelog.d/6357.misc delete mode 100644 changelog.d/6359.bugfix delete mode 100644 changelog.d/6361.misc delete mode 100644 changelog.d/6363.bugfix delete mode 100644 changelog.d/6389.bugfix diff --git a/CHANGES.md b/CHANGES.md index 9312dc294..f4f61db5d 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,77 @@ +Synapse 1.6.0rc1 (2019-11-20) +============================= + +Features +-------- + +- Add federation support for cross-signing. ([\#5727](https://github.com/matrix-org/synapse/issues/5727)) +- Increase default room version from 4 to 5, thereby enforcing server key validity period checks. ([\#6220](https://github.com/matrix-org/synapse/issues/6220)) +- Add support for outbound http proxying via http_proxy/HTTPS_PROXY env vars. ([\#6238](https://github.com/matrix-org/synapse/issues/6238)) +- Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)). ([\#6301](https://github.com/matrix-org/synapse/issues/6301), [\#6310](https://github.com/matrix-org/synapse/issues/6310), [\#6340](https://github.com/matrix-org/synapse/issues/6340)) + + +Bugfixes +-------- + +- Fix LruCache callback deduplication for Python 3.8. Contributed by @V02460. ([\#6213](https://github.com/matrix-org/synapse/issues/6213)) +- Remove a room from a server's public rooms list on room upgrade. ([\#6232](https://github.com/matrix-org/synapse/issues/6232), [\#6235](https://github.com/matrix-org/synapse/issues/6235)) +- Delete keys from key backup when deleting backup versions. ([\#6253](https://github.com/matrix-org/synapse/issues/6253)) +- Make notification of cross-signing signatures work with workers. ([\#6254](https://github.com/matrix-org/synapse/issues/6254)) +- Fix exception when remote servers attempt to join a room that they're not allowed to join. ([\#6278](https://github.com/matrix-org/synapse/issues/6278)) +- Prevent errors from appearing on Synapse startup if `git` is not installed. ([\#6284](https://github.com/matrix-org/synapse/issues/6284)) +- Appservice requests will no longer contain a double slash prefix when the appservice url provided ends in a slash. ([\#6306](https://github.com/matrix-org/synapse/issues/6306)) +- Fix `/purge_room` admin API. ([\#6307](https://github.com/matrix-org/synapse/issues/6307)) +- Fix the `hidden` field in the `devices` table for SQLite versions prior to 3.23.0. ([\#6313](https://github.com/matrix-org/synapse/issues/6313)) +- Fix bug which casued rejected events to be persisted with the wrong room state. ([\#6320](https://github.com/matrix-org/synapse/issues/6320)) +- Fix bug where `rc_login` ratelimiting would prematurely kick in. ([\#6335](https://github.com/matrix-org/synapse/issues/6335)) +- Prevent the server taking a long time to start up when guest registration is enabled. ([\#6338](https://github.com/matrix-org/synapse/issues/6338)) +- Fix bug where upgrading a guest account to a full user would fail when account validity is enabled. ([\#6359](https://github.com/matrix-org/synapse/issues/6359)) +- Fix `to_device` stream ID getting reset every time Synapse restarts, which had the potential to cause unable to decrypt errors. ([\#6363](https://github.com/matrix-org/synapse/issues/6363)) +- Fix permission denied error when trying to generate a config file with the docker image. ([\#6389](https://github.com/matrix-org/synapse/issues/6389)) + + +Improved Documentation +---------------------- + +- Contributor documentation now mentions script to run linters. ([\#6164](https://github.com/matrix-org/synapse/issues/6164)) +- Modify CAPTCHA_SETUP.md to update the terms `private key` and `public key` to `secret key` and `site key` respectively. Contributed by Yash Jipkate. ([\#6257](https://github.com/matrix-org/synapse/issues/6257)) +- Update `INSTALL.md` Email section to talk about `account_threepid_delegates`. ([\#6272](https://github.com/matrix-org/synapse/issues/6272)) +- Fix a small typo in `account_threepid_delegates` configuration option. ([\#6273](https://github.com/matrix-org/synapse/issues/6273)) + + +Internal Changes +---------------- + +- Add a CI job to test the `synapse_port_db` script. ([\#6140](https://github.com/matrix-org/synapse/issues/6140), [\#6276](https://github.com/matrix-org/synapse/issues/6276)) +- Convert EventContext to an attrs. ([\#6218](https://github.com/matrix-org/synapse/issues/6218)) +- Move `persist_events` out from main data store. ([\#6240](https://github.com/matrix-org/synapse/issues/6240), [\#6300](https://github.com/matrix-org/synapse/issues/6300)) +- Reduce verbosity of user/room stats. ([\#6250](https://github.com/matrix-org/synapse/issues/6250)) +- Reduce impact of debug logging. ([\#6251](https://github.com/matrix-org/synapse/issues/6251)) +- Expose some homeserver functionality to spam checkers. ([\#6259](https://github.com/matrix-org/synapse/issues/6259)) +- Change cache descriptors to always return deferreds. ([\#6263](https://github.com/matrix-org/synapse/issues/6263), [\#6291](https://github.com/matrix-org/synapse/issues/6291)) +- Fix incorrect comment regarding the functionality of an `if` statement. ([\#6269](https://github.com/matrix-org/synapse/issues/6269)) +- Update CI to run `isort` over the `scripts` and `scripts-dev` directories. ([\#6270](https://github.com/matrix-org/synapse/issues/6270)) +- Replace every instance of `logger.warn` method with `logger.warning` as the former is deprecated. ([\#6271](https://github.com/matrix-org/synapse/issues/6271), [\#6314](https://github.com/matrix-org/synapse/issues/6314)) +- Port replication http server endpoints to async/await. ([\#6274](https://github.com/matrix-org/synapse/issues/6274)) +- Port room rest handlers to async/await. ([\#6275](https://github.com/matrix-org/synapse/issues/6275)) +- Remove redundant CLI parameters on CI's `flake8` step. ([\#6277](https://github.com/matrix-org/synapse/issues/6277)) +- Port `federation_server.py` to async/await. ([\#6279](https://github.com/matrix-org/synapse/issues/6279)) +- Port receipt and read markers to async/wait. ([\#6280](https://github.com/matrix-org/synapse/issues/6280)) +- Split out state storage into separate data store. ([\#6294](https://github.com/matrix-org/synapse/issues/6294), [\#6295](https://github.com/matrix-org/synapse/issues/6295)) +- Refactor EventContext for clarity. ([\#6298](https://github.com/matrix-org/synapse/issues/6298)) +- Update the version of black used to 19.10b0. ([\#6304](https://github.com/matrix-org/synapse/issues/6304)) +- Add some documentation about worker replication. ([\#6305](https://github.com/matrix-org/synapse/issues/6305)) +- Move admin endpoints into separate files. Contributed by Awesome Technologies Innovationslabor GmbH. ([\#6308](https://github.com/matrix-org/synapse/issues/6308)) +- Document the use of `lint.sh` for code style enforcement & extend it to run on specified paths only. ([\#6312](https://github.com/matrix-org/synapse/issues/6312)) +- Add optional python dependencies and dependant binary libraries to snapcraft packaging. ([\#6317](https://github.com/matrix-org/synapse/issues/6317)) +- Remove the dependency on psutil and replace functionality with the stdlib `resource` module. ([\#6318](https://github.com/matrix-org/synapse/issues/6318), [\#6336](https://github.com/matrix-org/synapse/issues/6336)) +- Improve documentation for EventContext fields. ([\#6319](https://github.com/matrix-org/synapse/issues/6319)) +- Add some checks that we aren't using state from rejected events. ([\#6330](https://github.com/matrix-org/synapse/issues/6330)) +- Add continuous integration for python 3.8. ([\#6341](https://github.com/matrix-org/synapse/issues/6341)) +- Correct spacing/case of various instances of the word "homeserver". ([\#6357](https://github.com/matrix-org/synapse/issues/6357)) +- Temporarily blacklist the failing unit test PurgeRoomTestCase.test_purge_room. ([\#6361](https://github.com/matrix-org/synapse/issues/6361)) + + Synapse 1.5.1 (2019-11-06) ========================== diff --git a/changelog.d/5727.feature b/changelog.d/5727.feature deleted file mode 100644 index 819bebf2d..000000000 --- a/changelog.d/5727.feature +++ /dev/null @@ -1 +0,0 @@ -Add federation support for cross-signing. diff --git a/changelog.d/6140.misc b/changelog.d/6140.misc deleted file mode 100644 index 0feb97ec6..000000000 --- a/changelog.d/6140.misc +++ /dev/null @@ -1 +0,0 @@ -Add a CI job to test the `synapse_port_db` script. \ No newline at end of file diff --git a/changelog.d/6164.doc b/changelog.d/6164.doc deleted file mode 100644 index f9395b02b..000000000 --- a/changelog.d/6164.doc +++ /dev/null @@ -1 +0,0 @@ -Contributor documentation now mentions script to run linters. diff --git a/changelog.d/6213.bugfix b/changelog.d/6213.bugfix deleted file mode 100644 index 2bb2d0885..000000000 --- a/changelog.d/6213.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix LruCache callback deduplication for Python 3.8. Contributed by @V02460. diff --git a/changelog.d/6218.misc b/changelog.d/6218.misc deleted file mode 100644 index 49d10c36c..000000000 --- a/changelog.d/6218.misc +++ /dev/null @@ -1 +0,0 @@ -Convert EventContext to an attrs. diff --git a/changelog.d/6220.feature b/changelog.d/6220.feature deleted file mode 100644 index 8343e9912..000000000 --- a/changelog.d/6220.feature +++ /dev/null @@ -1 +0,0 @@ -Increase default room version from 4 to 5, thereby enforcing server key validity period checks. diff --git a/changelog.d/6232.bugfix b/changelog.d/6232.bugfix deleted file mode 100644 index 12718ba93..000000000 --- a/changelog.d/6232.bugfix +++ /dev/null @@ -1 +0,0 @@ -Remove a room from a server's public rooms list on room upgrade. \ No newline at end of file diff --git a/changelog.d/6235.bugfix b/changelog.d/6235.bugfix deleted file mode 100644 index 12718ba93..000000000 --- a/changelog.d/6235.bugfix +++ /dev/null @@ -1 +0,0 @@ -Remove a room from a server's public rooms list on room upgrade. \ No newline at end of file diff --git a/changelog.d/6238.feature b/changelog.d/6238.feature deleted file mode 100644 index d225ac33b..000000000 --- a/changelog.d/6238.feature +++ /dev/null @@ -1 +0,0 @@ -Add support for outbound http proxying via http_proxy/HTTPS_PROXY env vars. diff --git a/changelog.d/6240.misc b/changelog.d/6240.misc deleted file mode 100644 index 0b3d7a14a..000000000 --- a/changelog.d/6240.misc +++ /dev/null @@ -1 +0,0 @@ -Move `persist_events` out from main data store. diff --git a/changelog.d/6250.misc b/changelog.d/6250.misc deleted file mode 100644 index 12e3fe66b..000000000 --- a/changelog.d/6250.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce verbosity of user/room stats. diff --git a/changelog.d/6251.misc b/changelog.d/6251.misc deleted file mode 100644 index 371c6983b..000000000 --- a/changelog.d/6251.misc +++ /dev/null @@ -1 +0,0 @@ -Reduce impact of debug logging. diff --git a/changelog.d/6253.bugfix b/changelog.d/6253.bugfix deleted file mode 100644 index 266fae381..000000000 --- a/changelog.d/6253.bugfix +++ /dev/null @@ -1 +0,0 @@ -Delete keys from key backup when deleting backup versions. diff --git a/changelog.d/6254.bugfix b/changelog.d/6254.bugfix deleted file mode 100644 index 3181484b8..000000000 --- a/changelog.d/6254.bugfix +++ /dev/null @@ -1 +0,0 @@ -Make notification of cross-signing signatures work with workers. diff --git a/changelog.d/6257.doc b/changelog.d/6257.doc deleted file mode 100644 index e985afde0..000000000 --- a/changelog.d/6257.doc +++ /dev/null @@ -1 +0,0 @@ -Modify CAPTCHA_SETUP.md to update the terms `private key` and `public key` to `secret key` and `site key` respectively. Contributed by Yash Jipkate. diff --git a/changelog.d/6259.misc b/changelog.d/6259.misc deleted file mode 100644 index 3ff81b1ac..000000000 --- a/changelog.d/6259.misc +++ /dev/null @@ -1 +0,0 @@ -Expose some homeserver functionality to spam checkers. diff --git a/changelog.d/6263.misc b/changelog.d/6263.misc deleted file mode 100644 index 7b1bb4b67..000000000 --- a/changelog.d/6263.misc +++ /dev/null @@ -1 +0,0 @@ -Change cache descriptors to always return deferreds. diff --git a/changelog.d/6269.misc b/changelog.d/6269.misc deleted file mode 100644 index 9fd333cc8..000000000 --- a/changelog.d/6269.misc +++ /dev/null @@ -1 +0,0 @@ -Fix incorrect comment regarding the functionality of an `if` statement. \ No newline at end of file diff --git a/changelog.d/6270.misc b/changelog.d/6270.misc deleted file mode 100644 index d1c581132..000000000 --- a/changelog.d/6270.misc +++ /dev/null @@ -1 +0,0 @@ -Update CI to run `isort` over the `scripts` and `scripts-dev` directories. \ No newline at end of file diff --git a/changelog.d/6271.misc b/changelog.d/6271.misc deleted file mode 100644 index 236976027..000000000 --- a/changelog.d/6271.misc +++ /dev/null @@ -1 +0,0 @@ -Replace every instance of `logger.warn` method with `logger.warning` as the former is deprecated. \ No newline at end of file diff --git a/changelog.d/6272.doc b/changelog.d/6272.doc deleted file mode 100644 index 232180bcd..000000000 --- a/changelog.d/6272.doc +++ /dev/null @@ -1 +0,0 @@ -Update `INSTALL.md` Email section to talk about `account_threepid_delegates`. \ No newline at end of file diff --git a/changelog.d/6273.doc b/changelog.d/6273.doc deleted file mode 100644 index 21a41d987..000000000 --- a/changelog.d/6273.doc +++ /dev/null @@ -1 +0,0 @@ -Fix a small typo in `account_threepid_delegates` configuration option. \ No newline at end of file diff --git a/changelog.d/6274.misc b/changelog.d/6274.misc deleted file mode 100644 index eb4966124..000000000 --- a/changelog.d/6274.misc +++ /dev/null @@ -1 +0,0 @@ -Port replication http server endpoints to async/await. diff --git a/changelog.d/6275.misc b/changelog.d/6275.misc deleted file mode 100644 index f57e2c4ad..000000000 --- a/changelog.d/6275.misc +++ /dev/null @@ -1 +0,0 @@ -Port room rest handlers to async/await. diff --git a/changelog.d/6276.misc b/changelog.d/6276.misc deleted file mode 100644 index 4a4428251..000000000 --- a/changelog.d/6276.misc +++ /dev/null @@ -1 +0,0 @@ -Add a CI job to test the `synapse_port_db` script. diff --git a/changelog.d/6277.misc b/changelog.d/6277.misc deleted file mode 100644 index 490713577..000000000 --- a/changelog.d/6277.misc +++ /dev/null @@ -1 +0,0 @@ -Remove redundant CLI parameters on CI's `flake8` step. \ No newline at end of file diff --git a/changelog.d/6278.bugfix b/changelog.d/6278.bugfix deleted file mode 100644 index c10727046..000000000 --- a/changelog.d/6278.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix exception when remote servers attempt to join a room that they're not allowed to join. diff --git a/changelog.d/6279.misc b/changelog.d/6279.misc deleted file mode 100644 index 5f5144a9e..000000000 --- a/changelog.d/6279.misc +++ /dev/null @@ -1 +0,0 @@ -Port `federation_server.py` to async/await. diff --git a/changelog.d/6280.misc b/changelog.d/6280.misc deleted file mode 100644 index 96a0eb21b..000000000 --- a/changelog.d/6280.misc +++ /dev/null @@ -1 +0,0 @@ -Port receipt and read markers to async/wait. diff --git a/changelog.d/6284.bugfix b/changelog.d/6284.bugfix deleted file mode 100644 index cf15053d2..000000000 --- a/changelog.d/6284.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent errors from appearing on Synapse startup if `git` is not installed. \ No newline at end of file diff --git a/changelog.d/6291.misc b/changelog.d/6291.misc deleted file mode 100644 index 7b1bb4b67..000000000 --- a/changelog.d/6291.misc +++ /dev/null @@ -1 +0,0 @@ -Change cache descriptors to always return deferreds. diff --git a/changelog.d/6294.misc b/changelog.d/6294.misc deleted file mode 100644 index a3e6b8296..000000000 --- a/changelog.d/6294.misc +++ /dev/null @@ -1 +0,0 @@ -Split out state storage into separate data store. diff --git a/changelog.d/6295.misc b/changelog.d/6295.misc deleted file mode 100644 index a3e6b8296..000000000 --- a/changelog.d/6295.misc +++ /dev/null @@ -1 +0,0 @@ -Split out state storage into separate data store. diff --git a/changelog.d/6298.misc b/changelog.d/6298.misc deleted file mode 100644 index d4190730b..000000000 --- a/changelog.d/6298.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor EventContext for clarity. \ No newline at end of file diff --git a/changelog.d/6300.misc b/changelog.d/6300.misc deleted file mode 100644 index 0b3d7a14a..000000000 --- a/changelog.d/6300.misc +++ /dev/null @@ -1 +0,0 @@ -Move `persist_events` out from main data store. diff --git a/changelog.d/6301.feature b/changelog.d/6301.feature deleted file mode 100644 index 78a187a1d..000000000 --- a/changelog.d/6301.feature +++ /dev/null @@ -1 +0,0 @@ -Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)). diff --git a/changelog.d/6304.misc b/changelog.d/6304.misc deleted file mode 100644 index 20372b4f7..000000000 --- a/changelog.d/6304.misc +++ /dev/null @@ -1 +0,0 @@ -Update the version of black used to 19.10b0. diff --git a/changelog.d/6305.misc b/changelog.d/6305.misc deleted file mode 100644 index f047fc306..000000000 --- a/changelog.d/6305.misc +++ /dev/null @@ -1 +0,0 @@ -Add some documentation about worker replication. diff --git a/changelog.d/6306.bugfix b/changelog.d/6306.bugfix deleted file mode 100644 index c7dcbcdce..000000000 --- a/changelog.d/6306.bugfix +++ /dev/null @@ -1 +0,0 @@ -Appservice requests will no longer contain a double slash prefix when the appservice url provided ends in a slash. diff --git a/changelog.d/6307.bugfix b/changelog.d/6307.bugfix deleted file mode 100644 index f2917c505..000000000 --- a/changelog.d/6307.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix `/purge_room` admin API. diff --git a/changelog.d/6308.misc b/changelog.d/6308.misc deleted file mode 100644 index 72be63ba4..000000000 --- a/changelog.d/6308.misc +++ /dev/null @@ -1 +0,0 @@ -Move admin endpoints into separate files. Contributed by Awesome Technologies Innovationslabor GmbH. \ No newline at end of file diff --git a/changelog.d/6310.feature b/changelog.d/6310.feature deleted file mode 100644 index 78a187a1d..000000000 --- a/changelog.d/6310.feature +++ /dev/null @@ -1 +0,0 @@ -Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)). diff --git a/changelog.d/6312.misc b/changelog.d/6312.misc deleted file mode 100644 index 55e3e1654..000000000 --- a/changelog.d/6312.misc +++ /dev/null @@ -1 +0,0 @@ -Document the use of `lint.sh` for code style enforcement & extend it to run on specified paths only. diff --git a/changelog.d/6313.bugfix b/changelog.d/6313.bugfix deleted file mode 100644 index f4d4a97f0..000000000 --- a/changelog.d/6313.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix the `hidden` field in the `devices` table for SQLite versions prior to 3.23.0. diff --git a/changelog.d/6314.misc b/changelog.d/6314.misc deleted file mode 100644 index 236976027..000000000 --- a/changelog.d/6314.misc +++ /dev/null @@ -1 +0,0 @@ -Replace every instance of `logger.warn` method with `logger.warning` as the former is deprecated. \ No newline at end of file diff --git a/changelog.d/6317.misc b/changelog.d/6317.misc deleted file mode 100644 index a67d13fa7..000000000 --- a/changelog.d/6317.misc +++ /dev/null @@ -1 +0,0 @@ -Add optional python dependencies and dependant binary libraries to snapcraft packaging. diff --git a/changelog.d/6318.misc b/changelog.d/6318.misc deleted file mode 100644 index 63527ccef..000000000 --- a/changelog.d/6318.misc +++ /dev/null @@ -1 +0,0 @@ -Remove the dependency on psutil and replace functionality with the stdlib `resource` module. diff --git a/changelog.d/6319.misc b/changelog.d/6319.misc deleted file mode 100644 index 9711ef21e..000000000 --- a/changelog.d/6319.misc +++ /dev/null @@ -1 +0,0 @@ -Improve documentation for EventContext fields. diff --git a/changelog.d/6320.bugfix b/changelog.d/6320.bugfix deleted file mode 100644 index 2c3fad565..000000000 --- a/changelog.d/6320.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug which casued rejected events to be persisted with the wrong room state. diff --git a/changelog.d/6330.misc b/changelog.d/6330.misc deleted file mode 100644 index 6239cba26..000000000 --- a/changelog.d/6330.misc +++ /dev/null @@ -1 +0,0 @@ -Add some checks that we aren't using state from rejected events. diff --git a/changelog.d/6335.bugfix b/changelog.d/6335.bugfix deleted file mode 100644 index a95f6b9ee..000000000 --- a/changelog.d/6335.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where `rc_login` ratelimiting would prematurely kick in. diff --git a/changelog.d/6336.misc b/changelog.d/6336.misc deleted file mode 100644 index 63527ccef..000000000 --- a/changelog.d/6336.misc +++ /dev/null @@ -1 +0,0 @@ -Remove the dependency on psutil and replace functionality with the stdlib `resource` module. diff --git a/changelog.d/6338.bugfix b/changelog.d/6338.bugfix deleted file mode 100644 index 8e469f0fb..000000000 --- a/changelog.d/6338.bugfix +++ /dev/null @@ -1 +0,0 @@ -Prevent the server taking a long time to start up when guest registration is enabled. \ No newline at end of file diff --git a/changelog.d/6340.feature b/changelog.d/6340.feature deleted file mode 100644 index 78a187a1d..000000000 --- a/changelog.d/6340.feature +++ /dev/null @@ -1 +0,0 @@ -Implement label-based filtering on `/sync` and `/messages` ([MSC2326](https://github.com/matrix-org/matrix-doc/pull/2326)). diff --git a/changelog.d/6341.misc b/changelog.d/6341.misc deleted file mode 100644 index 359b9bf1d..000000000 --- a/changelog.d/6341.misc +++ /dev/null @@ -1 +0,0 @@ -Add continuous integration for python 3.8. \ No newline at end of file diff --git a/changelog.d/6357.misc b/changelog.d/6357.misc deleted file mode 100644 index a68df0f38..000000000 --- a/changelog.d/6357.misc +++ /dev/null @@ -1 +0,0 @@ -Correct spacing/case of various instances of the word "homeserver". \ No newline at end of file diff --git a/changelog.d/6359.bugfix b/changelog.d/6359.bugfix deleted file mode 100644 index 22bf5f642..000000000 --- a/changelog.d/6359.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where upgrading a guest account to a full user would fail when account validity is enabled. \ No newline at end of file diff --git a/changelog.d/6361.misc b/changelog.d/6361.misc deleted file mode 100644 index 324d74ebf..000000000 --- a/changelog.d/6361.misc +++ /dev/null @@ -1 +0,0 @@ -Temporarily blacklist the failing unit test PurgeRoomTestCase.test_purge_room. diff --git a/changelog.d/6363.bugfix b/changelog.d/6363.bugfix deleted file mode 100644 index d023b4918..000000000 --- a/changelog.d/6363.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix `to_device` stream ID getting reset every time Synapse restarts, which had the potential to cause unable to decrypt errors. \ No newline at end of file diff --git a/changelog.d/6389.bugfix b/changelog.d/6389.bugfix deleted file mode 100644 index c553622b0..000000000 --- a/changelog.d/6389.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix permission denied error when trying to generate a config file with the docker image. \ No newline at end of file diff --git a/synapse/__init__.py b/synapse/__init__.py index 1c27d6800..1d962f5dc 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.5.1" +__version__ = "1.6.0rc1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 486be06f4877842dfb109caac42ab052e09fd5b0 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 20 Nov 2019 15:08:03 +0000 Subject: [PATCH 092/133] Changelog --- changelog.d/6392.misc | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6392.misc diff --git a/changelog.d/6392.misc b/changelog.d/6392.misc new file mode 100644 index 000000000..a00257944 --- /dev/null +++ b/changelog.d/6392.misc @@ -0,0 +1 @@ +Add a test scenario to make sure room history purges don't break `/messages` in the future. From e2a20326e8141fdf9304434901da38c64b917a78 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Wed, 20 Nov 2019 15:08:47 +0000 Subject: [PATCH 093/133] Lint --- tests/rest/client/v1/test_rooms.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index ebaa67e89..e84e578f9 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -948,12 +948,14 @@ class RoomMessageListTestCase(RoomBase): # Purge every event before the second event. purge_id = random_string(16) pagination_handler._purges_by_id[purge_id] = PurgeStatus() - self.get_success(pagination_handler._purge_history( - purge_id=purge_id, - room_id=self.room_id, - token=second_token, - delete_local_events=True, - )) + self.get_success( + pagination_handler._purge_history( + purge_id=purge_id, + room_id=self.room_id, + token=second_token, + delete_local_events=True, + ) + ) # Check that we only get the second message through /message now that the first # has been purged. From 9cc168e42e14481387b4e6de73bfe1f8f9a2a508 Mon Sep 17 00:00:00 2001 From: Matthew Hodgson Date: Wed, 20 Nov 2019 18:44:45 +0000 Subject: [PATCH 094/133] update macOS installation instructions --- INSTALL.md | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/INSTALL.md b/INSTALL.md index 29e0abafd..9b7360f0e 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -133,9 +133,9 @@ sudo yum install libtiff-devel libjpeg-devel libzip-devel freetype-devel \ sudo yum groupinstall "Development Tools" ``` -#### Mac OS X +#### macOS -Installing prerequisites on Mac OS X: +Installing prerequisites on macOS: ``` xcode-select --install @@ -144,6 +144,14 @@ sudo pip install virtualenv brew install pkg-config libffi ``` +On macOS Catalina (10.15) you may need to explicitly install OpenSSL +via brew and inform `pip` about it so that `psycopg2` builds: + +``` +brew install openssl@1.1 +export LDFLAGS=-L/usr/local/Cellar/openssl\@1.1/1.1.1d/lib/ +``` + #### OpenSUSE Installing prerequisites on openSUSE: From 3916e1b97a1ffc481dfdf66f7da58201a52140a9 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 21 Nov 2019 12:00:14 +0000 Subject: [PATCH 095/133] Clean up newline quote marks around the codebase (#6362) --- changelog.d/6362.misc | 1 + synapse/app/federation_sender.py | 2 +- synapse/appservice/api.py | 2 +- synapse/config/appservice.py | 2 +- synapse/config/room_directory.py | 2 +- synapse/config/server.py | 6 +++--- synapse/federation/persistence.py | 4 ++-- synapse/federation/sender/__init__.py | 2 +- synapse/federation/sender/transaction_manager.py | 4 ++-- synapse/handlers/directory.py | 2 +- synapse/http/servlet.py | 2 +- synapse/push/httppusher.py | 5 ++--- synapse/push/mailer.py | 4 ++-- synapse/rest/media/v1/preview_url_resource.py | 2 +- synapse/server_notices/consent_server_notices.py | 2 +- synapse/storage/_base.py | 2 +- synapse/storage/data_stores/main/deviceinbox.py | 2 +- synapse/storage/data_stores/main/end_to_end_keys.py | 6 +++--- synapse/storage/data_stores/main/events.py | 8 +++----- synapse/storage/data_stores/main/filtering.py | 2 +- synapse/storage/data_stores/main/media_repository.py | 6 +++--- synapse/storage/data_stores/main/registration.py | 4 +--- synapse/storage/data_stores/main/stream.py | 2 +- synapse/storage/data_stores/main/tags.py | 4 +--- synapse/storage/prepare_database.py | 2 +- synapse/streams/config.py | 9 ++++++--- 26 files changed, 43 insertions(+), 46 deletions(-) create mode 100644 changelog.d/6362.misc diff --git a/changelog.d/6362.misc b/changelog.d/6362.misc new file mode 100644 index 000000000..b79a5bea9 --- /dev/null +++ b/changelog.d/6362.misc @@ -0,0 +1 @@ +Clean up some unnecessary quotation marks around the codebase. \ No newline at end of file diff --git a/synapse/app/federation_sender.py b/synapse/app/federation_sender.py index 139221ad3..448e45e00 100644 --- a/synapse/app/federation_sender.py +++ b/synapse/app/federation_sender.py @@ -69,7 +69,7 @@ class FederationSenderSlaveStore( self.federation_out_pos_startup = self._get_federation_out_pos(db_conn) def _get_federation_out_pos(self, db_conn): - sql = "SELECT stream_id FROM federation_stream_position" " WHERE type = ?" + sql = "SELECT stream_id FROM federation_stream_position WHERE type = ?" sql = self.database_engine.convert_param_style(sql) txn = db_conn.cursor() diff --git a/synapse/appservice/api.py b/synapse/appservice/api.py index 3e25bf574..57174da02 100644 --- a/synapse/appservice/api.py +++ b/synapse/appservice/api.py @@ -185,7 +185,7 @@ class ApplicationServiceApi(SimpleHttpClient): if not _is_valid_3pe_metadata(info): logger.warning( - "query_3pe_protocol to %s did not return a" " valid result", uri + "query_3pe_protocol to %s did not return a valid result", uri ) return None diff --git a/synapse/config/appservice.py b/synapse/config/appservice.py index e77d3387f..ca43e96bd 100644 --- a/synapse/config/appservice.py +++ b/synapse/config/appservice.py @@ -134,7 +134,7 @@ def _load_appservice(hostname, as_info, config_filename): for regex_obj in as_info["namespaces"][ns]: if not isinstance(regex_obj, dict): raise ValueError( - "Expected namespace entry in %s to be an object," " but got %s", + "Expected namespace entry in %s to be an object, but got %s", ns, regex_obj, ) diff --git a/synapse/config/room_directory.py b/synapse/config/room_directory.py index 7c9f05bde..7ac769967 100644 --- a/synapse/config/room_directory.py +++ b/synapse/config/room_directory.py @@ -170,7 +170,7 @@ class _RoomDirectoryRule(object): self.action = action else: raise ConfigError( - "%s rules can only have action of 'allow'" " or 'deny'" % (option_name,) + "%s rules can only have action of 'allow' or 'deny'" % (option_name,) ) self._alias_matches_all = alias == "*" diff --git a/synapse/config/server.py b/synapse/config/server.py index 00d01c43a..11336d754 100644 --- a/synapse/config/server.py +++ b/synapse/config/server.py @@ -223,7 +223,7 @@ class ServerConfig(Config): self.federation_ip_range_blacklist.update(["0.0.0.0", "::"]) except Exception as e: raise ConfigError( - "Invalid range(s) provided in " "federation_ip_range_blacklist: %s" % e + "Invalid range(s) provided in federation_ip_range_blacklist: %s" % e ) if self.public_baseurl is not None: @@ -787,14 +787,14 @@ class ServerConfig(Config): "--print-pidfile", action="store_true", default=None, - help="Print the path to the pidfile just" " before daemonizing", + help="Print the path to the pidfile just before daemonizing", ) server_group.add_argument( "--manhole", metavar="PORT", dest="manhole", type=int, - help="Turn on the twisted telnet manhole" " service on the given port.", + help="Turn on the twisted telnet manhole service on the given port.", ) diff --git a/synapse/federation/persistence.py b/synapse/federation/persistence.py index 44edcabed..d68b4bd67 100644 --- a/synapse/federation/persistence.py +++ b/synapse/federation/persistence.py @@ -44,7 +44,7 @@ class TransactionActions(object): response code and response body. """ if not transaction.transaction_id: - raise RuntimeError("Cannot persist a transaction with no " "transaction_id") + raise RuntimeError("Cannot persist a transaction with no transaction_id") return self.store.get_received_txn_response(transaction.transaction_id, origin) @@ -56,7 +56,7 @@ class TransactionActions(object): Deferred """ if not transaction.transaction_id: - raise RuntimeError("Cannot persist a transaction with no " "transaction_id") + raise RuntimeError("Cannot persist a transaction with no transaction_id") return self.store.set_received_txn_response( transaction.transaction_id, origin, code, response diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 2b2ee8612..4ebb0e8bc 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -49,7 +49,7 @@ sent_pdus_destination_dist_count = Counter( sent_pdus_destination_dist_total = Counter( "synapse_federation_client_sent_pdu_destinations:total", - "" "Total number of PDUs queued for sending across all destinations", + "Total number of PDUs queued for sending across all destinations", ) diff --git a/synapse/federation/sender/transaction_manager.py b/synapse/federation/sender/transaction_manager.py index 67b3e1ab6..5fed626d5 100644 --- a/synapse/federation/sender/transaction_manager.py +++ b/synapse/federation/sender/transaction_manager.py @@ -84,7 +84,7 @@ class TransactionManager(object): txn_id = str(self._next_txn_id) logger.debug( - "TX [%s] {%s} Attempting new transaction" " (pdus: %d, edus: %d)", + "TX [%s] {%s} Attempting new transaction (pdus: %d, edus: %d)", destination, txn_id, len(pdus), @@ -103,7 +103,7 @@ class TransactionManager(object): self._next_txn_id += 1 logger.info( - "TX [%s] {%s} Sending transaction [%s]," " (PDUs: %d, EDUs: %d)", + "TX [%s] {%s} Sending transaction [%s], (PDUs: %d, EDUs: %d)", destination, txn_id, transaction.transaction_id, diff --git a/synapse/handlers/directory.py b/synapse/handlers/directory.py index 69051101a..a07d2f1a1 100644 --- a/synapse/handlers/directory.py +++ b/synapse/handlers/directory.py @@ -119,7 +119,7 @@ class DirectoryHandler(BaseHandler): if not service.is_interested_in_alias(room_alias.to_string()): raise SynapseError( 400, - "This application service has not reserved" " this kind of alias.", + "This application service has not reserved this kind of alias.", errcode=Codes.EXCLUSIVE, ) else: diff --git a/synapse/http/servlet.py b/synapse/http/servlet.py index e9a5e46ce..13fcb408a 100644 --- a/synapse/http/servlet.py +++ b/synapse/http/servlet.py @@ -96,7 +96,7 @@ def parse_boolean_from_args(args, name, default=None, required=False): return {b"true": True, b"false": False}[args[name][0]] except Exception: message = ( - "Boolean query parameter %r must be one of" " ['true', 'false']" + "Boolean query parameter %r must be one of ['true', 'false']" ) % (name,) raise SynapseError(400, message) else: diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index e994037be..d0879b049 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -246,7 +246,7 @@ class HttpPusher(object): # fixed, we don't suddenly deliver a load # of old notifications. logger.warning( - "Giving up on a notification to user %s, " "pushkey %s", + "Giving up on a notification to user %s, pushkey %s", self.user_id, self.pushkey, ) @@ -299,8 +299,7 @@ class HttpPusher(object): # for sanity, we only remove the pushkey if it # was the one we actually sent... logger.warning( - ("Ignoring rejected pushkey %s because we" " didn't send it"), - pk, + ("Ignoring rejected pushkey %s because we didn't send it"), pk, ) else: logger.info("Pushkey %s was rejected: removing", pk) diff --git a/synapse/push/mailer.py b/synapse/push/mailer.py index 1d15a06a5..b13b646bf 100644 --- a/synapse/push/mailer.py +++ b/synapse/push/mailer.py @@ -43,7 +43,7 @@ logger = logging.getLogger(__name__) MESSAGE_FROM_PERSON_IN_ROOM = ( - "You have a message on %(app)s from %(person)s " "in the %(room)s room..." + "You have a message on %(app)s from %(person)s in the %(room)s room..." ) MESSAGE_FROM_PERSON = "You have a message on %(app)s from %(person)s..." MESSAGES_FROM_PERSON = "You have messages on %(app)s from %(person)s..." @@ -55,7 +55,7 @@ MESSAGES_FROM_PERSON_AND_OTHERS = ( "You have messages on %(app)s from %(person)s and others..." ) INVITE_FROM_PERSON_TO_ROOM = ( - "%(person)s has invited you to join the " "%(room)s room on %(app)s..." + "%(person)s has invited you to join the %(room)s room on %(app)s..." ) INVITE_FROM_PERSON = "%(person)s has invited you to chat on %(app)s..." diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 15c15a12f..a23d6f5c7 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -122,7 +122,7 @@ class PreviewUrlResource(DirectServeResource): pattern = entry[attrib] value = getattr(url_tuple, attrib) logger.debug( - "Matching attrib '%s' with value '%s' against" " pattern '%s'", + "Matching attrib '%s' with value '%s' against pattern '%s'", attrib, value, pattern, diff --git a/synapse/server_notices/consent_server_notices.py b/synapse/server_notices/consent_server_notices.py index 415e9c17d..5736c5603 100644 --- a/synapse/server_notices/consent_server_notices.py +++ b/synapse/server_notices/consent_server_notices.py @@ -54,7 +54,7 @@ class ConsentServerNotices(object): ) if "body" not in self._server_notice_content: raise ConfigError( - "user_consent server_notice_consent must contain a 'body' " "key." + "user_consent server_notice_consent must contain a 'body' key." ) self._consent_uri_builder = ConsentURIBuilder(hs.config) diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index ab596fa68..6b8a9cd89 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -851,7 +851,7 @@ class SQLBaseStore(object): allvalues.update(values) latter = "UPDATE SET " + ", ".join(k + "=EXCLUDED." + k for k in values) - sql = ("INSERT INTO %s (%s) VALUES (%s) " "ON CONFLICT (%s) DO %s") % ( + sql = ("INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) DO %s") % ( table, ", ".join(k for k in allvalues), ", ".join("?" for _ in allvalues), diff --git a/synapse/storage/data_stores/main/deviceinbox.py b/synapse/storage/data_stores/main/deviceinbox.py index 96cd0fb77..a23744f11 100644 --- a/synapse/storage/data_stores/main/deviceinbox.py +++ b/synapse/storage/data_stores/main/deviceinbox.py @@ -380,7 +380,7 @@ class DeviceInboxStore(DeviceInboxWorkerStore, DeviceInboxBackgroundUpdateStore) devices = list(messages_by_device.keys()) if len(devices) == 1 and devices[0] == "*": # Handle wildcard device_ids. - sql = "SELECT device_id FROM devices" " WHERE user_id = ?" + sql = "SELECT device_id FROM devices WHERE user_id = ?" txn.execute(sql, (user_id,)) message_json = json.dumps(messages_by_device["*"]) for row in txn: diff --git a/synapse/storage/data_stores/main/end_to_end_keys.py b/synapse/storage/data_stores/main/end_to_end_keys.py index 073412a78..d8ad59ad9 100644 --- a/synapse/storage/data_stores/main/end_to_end_keys.py +++ b/synapse/storage/data_stores/main/end_to_end_keys.py @@ -138,9 +138,9 @@ class EndToEndKeyWorkerStore(SQLBaseStore): result.setdefault(user_id, {})[device_id] = None # get signatures on the device - signature_sql = ( - "SELECT * " " FROM e2e_cross_signing_signatures " " WHERE %s" - ) % (" OR ".join("(" + q + ")" for q in signature_query_clauses)) + signature_sql = ("SELECT * FROM e2e_cross_signing_signatures WHERE %s") % ( + " OR ".join("(" + q + ")" for q in signature_query_clauses) + ) txn.execute(signature_sql, signature_query_params) rows = self.cursor_to_dict(txn) diff --git a/synapse/storage/data_stores/main/events.py b/synapse/storage/data_stores/main/events.py index 878f7568a..627c0b67f 100644 --- a/synapse/storage/data_stores/main/events.py +++ b/synapse/storage/data_stores/main/events.py @@ -713,9 +713,7 @@ class EventsStore( metadata_json = encode_json(event.internal_metadata.get_dict()) - sql = ( - "UPDATE event_json SET internal_metadata = ?" " WHERE event_id = ?" - ) + sql = "UPDATE event_json SET internal_metadata = ? WHERE event_id = ?" txn.execute(sql, (metadata_json, event.event_id)) # Add an entry to the ex_outlier_stream table to replicate the @@ -732,7 +730,7 @@ class EventsStore( }, ) - sql = "UPDATE events SET outlier = ?" " WHERE event_id = ?" + sql = "UPDATE events SET outlier = ? WHERE event_id = ?" txn.execute(sql, (False, event.event_id)) # Update the event_backward_extremities table now that this @@ -1479,7 +1477,7 @@ class EventsStore( # We do joins against events_to_purge for e.g. calculating state # groups to purge, etc., so lets make an index. - txn.execute("CREATE INDEX events_to_purge_id" " ON events_to_purge(event_id)") + txn.execute("CREATE INDEX events_to_purge_id ON events_to_purge(event_id)") txn.execute("SELECT event_id, should_delete FROM events_to_purge") event_rows = txn.fetchall() diff --git a/synapse/storage/data_stores/main/filtering.py b/synapse/storage/data_stores/main/filtering.py index a2a2a6792..f05ace299 100644 --- a/synapse/storage/data_stores/main/filtering.py +++ b/synapse/storage/data_stores/main/filtering.py @@ -55,7 +55,7 @@ class FilteringStore(SQLBaseStore): if filter_id_response is not None: return filter_id_response[0] - sql = "SELECT MAX(filter_id) FROM user_filters " "WHERE user_id = ?" + sql = "SELECT MAX(filter_id) FROM user_filters WHERE user_id = ?" txn.execute(sql, (user_localpart,)) max_id = txn.fetchone()[0] if max_id is None: diff --git a/synapse/storage/data_stores/main/media_repository.py b/synapse/storage/data_stores/main/media_repository.py index 84b5f3ad5..0f2887bdc 100644 --- a/synapse/storage/data_stores/main/media_repository.py +++ b/synapse/storage/data_stores/main/media_repository.py @@ -337,7 +337,7 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): if len(media_ids) == 0: return - sql = "DELETE FROM local_media_repository_url_cache" " WHERE media_id = ?" + sql = "DELETE FROM local_media_repository_url_cache WHERE media_id = ?" def _delete_url_cache_txn(txn): txn.executemany(sql, [(media_id,) for media_id in media_ids]) @@ -365,11 +365,11 @@ class MediaRepositoryStore(MediaRepositoryBackgroundUpdateStore): return def _delete_url_cache_media_txn(txn): - sql = "DELETE FROM local_media_repository" " WHERE media_id = ?" + sql = "DELETE FROM local_media_repository WHERE media_id = ?" txn.executemany(sql, [(media_id,) for media_id in media_ids]) - sql = "DELETE FROM local_media_repository_thumbnails" " WHERE media_id = ?" + sql = "DELETE FROM local_media_repository_thumbnails WHERE media_id = ?" txn.executemany(sql, [(media_id,) for media_id in media_ids]) diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py index ee1b2b2bb..6a594c160 100644 --- a/synapse/storage/data_stores/main/registration.py +++ b/synapse/storage/data_stores/main/registration.py @@ -377,9 +377,7 @@ class RegistrationWorkerStore(SQLBaseStore): """ def f(txn): - sql = ( - "SELECT name, password_hash FROM users" " WHERE lower(name) = lower(?)" - ) + sql = "SELECT name, password_hash FROM users WHERE lower(name) = lower(?)" txn.execute(sql, (user_id,)) return dict(txn) diff --git a/synapse/storage/data_stores/main/stream.py b/synapse/storage/data_stores/main/stream.py index 8780fdd98..9ae4a913a 100644 --- a/synapse/storage/data_stores/main/stream.py +++ b/synapse/storage/data_stores/main/stream.py @@ -616,7 +616,7 @@ class StreamWorkerStore(EventsWorkerStore, SQLBaseStore): def _get_max_topological_txn(self, txn, room_id): txn.execute( - "SELECT MAX(topological_ordering) FROM events" " WHERE room_id = ?", + "SELECT MAX(topological_ordering) FROM events WHERE room_id = ?", (room_id,), ) diff --git a/synapse/storage/data_stores/main/tags.py b/synapse/storage/data_stores/main/tags.py index 10d1887f7..aa2433971 100644 --- a/synapse/storage/data_stores/main/tags.py +++ b/synapse/storage/data_stores/main/tags.py @@ -83,9 +83,7 @@ class TagsWorkerStore(AccountDataWorkerStore): ) def get_tag_content(txn, tag_ids): - sql = ( - "SELECT tag, content" " FROM room_tags" " WHERE user_id=? AND room_id=?" - ) + sql = "SELECT tag, content FROM room_tags WHERE user_id=? AND room_id=?" results = [] for stream_id, user_id, room_id in tag_ids: txn.execute(sql, (user_id, room_id)) diff --git a/synapse/storage/prepare_database.py b/synapse/storage/prepare_database.py index 2e7753820..731e1c9d9 100644 --- a/synapse/storage/prepare_database.py +++ b/synapse/storage/prepare_database.py @@ -447,7 +447,7 @@ def _apply_module_schema_files(cur, database_engine, modname, names_and_streams) # Mark as done. cur.execute( database_engine.convert_param_style( - "INSERT INTO applied_module_schemas (module_name, file)" " VALUES (?,?)" + "INSERT INTO applied_module_schemas (module_name, file) VALUES (?,?)" ), (modname, name), ) diff --git a/synapse/streams/config.py b/synapse/streams/config.py index 02994ab2a..cd56cd91e 100644 --- a/synapse/streams/config.py +++ b/synapse/streams/config.py @@ -88,9 +88,12 @@ class PaginationConfig(object): raise SynapseError(400, "Invalid request.") def __repr__(self): - return ( - "PaginationConfig(from_tok=%r, to_tok=%r," " direction=%r, limit=%r)" - ) % (self.from_token, self.to_token, self.direction, self.limit) + return ("PaginationConfig(from_tok=%r, to_tok=%r, direction=%r, limit=%r)") % ( + self.from_token, + self.to_token, + self.direction, + self.limit, + ) def get_source_config(self, source_name): keyname = "%s_key" % source_name From 24cc31ee967e5c387a137e22b428dcea17fc9fa5 Mon Sep 17 00:00:00 2001 From: Aaron Raimist Date: Thu, 21 Nov 2019 11:38:14 -0600 Subject: [PATCH 096/133] Fix link to user_dir_populate.sql in the user directory docs (#6388) --- changelog.d/6388.doc | 1 + docs/user_directory.md | 3 +-- 2 files changed, 2 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6388.doc diff --git a/changelog.d/6388.doc b/changelog.d/6388.doc new file mode 100644 index 000000000..c777cb6b8 --- /dev/null +++ b/changelog.d/6388.doc @@ -0,0 +1 @@ +Fix link in the user directory documentation. diff --git a/docs/user_directory.md b/docs/user_directory.md index e64aa453c..37dc71e75 100644 --- a/docs/user_directory.md +++ b/docs/user_directory.md @@ -7,7 +7,6 @@ who are present in a publicly viewable room present on the server. The directory info is stored in various tables, which can (typically after DB corruption) get stale or out of sync. If this happens, for now the -solution to fix it is to execute the SQL here -https://github.com/matrix-org/synapse/blob/master/synapse/storage/schema/delta/53/user_dir_populate.sql +solution to fix it is to execute the SQL [here](../synapse/storage/data_stores/main/schema/delta/53/user_dir_populate.sql) and then restart synapse. This should then start a background task to flush the current tables and regenerate the directory. From 265c0bd2fe54db7f8a7dab05f41b27ce9a450563 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 22 Nov 2019 19:54:05 +0000 Subject: [PATCH 097/133] Add working build command for docker image (#6390) * Add working build command for docker image * Add changelog --- changelog.d/6390.doc | 1 + docker/README.md | 12 ++++++++++++ 2 files changed, 13 insertions(+) create mode 100644 changelog.d/6390.doc diff --git a/changelog.d/6390.doc b/changelog.d/6390.doc new file mode 100644 index 000000000..093411bec --- /dev/null +++ b/changelog.d/6390.doc @@ -0,0 +1 @@ +Add build instructions to the docker readme. \ No newline at end of file diff --git a/docker/README.md b/docker/README.md index 24dfa77dc..9f112a01d 100644 --- a/docker/README.md +++ b/docker/README.md @@ -130,3 +130,15 @@ docker run -it --rm \ This will generate the same configuration file as the legacy mode used, but will store it in `/data/homeserver.yaml` instead of a temporary location. You can then use it as shown above at [Running synapse](#running-synapse). + +## Building the image + +If you need to build the image from a Synapse checkout, use the following `docker + build` command from the repo's root: + +``` +docker build -t matrixdotorg/synapse -f docker/Dockerfile . +``` + +You can choose to build a different docker image by changing the value of the `-f` flag to +point to another Dockerfile. From b7367c339db153824fb47728d3eebe2f944530e6 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 25 Nov 2019 13:26:59 +0000 Subject: [PATCH 098/133] Fix exceptions from background database update for event labels. (#6407) Add some exception handling here so that events whose json cannot be parsed are ignored rather than getting us stuck in a loop. Fixes #6404. --- changelog.d/6407.bugfix | 1 + .../data_stores/main/events_bg_updates.py | 41 +++++++++++-------- 2 files changed, 25 insertions(+), 17 deletions(-) create mode 100644 changelog.d/6407.bugfix diff --git a/changelog.d/6407.bugfix b/changelog.d/6407.bugfix new file mode 100644 index 000000000..0fdbf2a78 --- /dev/null +++ b/changelog.d/6407.bugfix @@ -0,0 +1 @@ +Fix a bug which could cause the background database update hander for event labels to get stuck in a loop raising exceptions. diff --git a/synapse/storage/data_stores/main/events_bg_updates.py b/synapse/storage/data_stores/main/events_bg_updates.py index 0ed59ef48..aa87f9abc 100644 --- a/synapse/storage/data_stores/main/events_bg_updates.py +++ b/synapse/storage/data_stores/main/events_bg_updates.py @@ -530,24 +530,31 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore): nbrows = 0 last_row_event_id = "" for (event_id, event_json_raw) in results: - event_json = json.loads(event_json_raw) + try: + event_json = json.loads(event_json_raw) - self._simple_insert_many_txn( - txn=txn, - table="event_labels", - values=[ - { - "event_id": event_id, - "label": label, - "room_id": event_json["room_id"], - "topological_ordering": event_json["depth"], - } - for label in event_json["content"].get( - EventContentFields.LABELS, [] - ) - if isinstance(label, str) - ], - ) + self._simple_insert_many_txn( + txn=txn, + table="event_labels", + values=[ + { + "event_id": event_id, + "label": label, + "room_id": event_json["room_id"], + "topological_ordering": event_json["depth"], + } + for label in event_json["content"].get( + EventContentFields.LABELS, [] + ) + if isinstance(label, str) + ], + ) + except Exception as e: + logger.warning( + "Unable to load event %s (no labels will be imported): %s", + event_id, + e, + ) nbrows += 1 last_row_event_id = event_id From f9c9e1f07646262cb064782d4bf427dd1634617f Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Mon, 25 Nov 2019 13:28:12 +0000 Subject: [PATCH 099/133] 1.6.0rc2 --- CHANGES.md | 9 +++++++++ changelog.d/6407.bugfix | 1 - synapse/__init__.py | 2 +- 3 files changed, 10 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/6407.bugfix diff --git a/CHANGES.md b/CHANGES.md index f4f61db5d..d26bc7a86 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.6.0rc2 (2019-11-25) +============================= + +Bugfixes +-------- + +- Fix a bug which could cause the background database update hander for event labels to get stuck in a loop raising exceptions. ([\#6407](https://github.com/matrix-org/synapse/issues/6407)) + + Synapse 1.6.0rc1 (2019-11-20) ============================= diff --git a/changelog.d/6407.bugfix b/changelog.d/6407.bugfix deleted file mode 100644 index 0fdbf2a78..000000000 --- a/changelog.d/6407.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug which could cause the background database update hander for event labels to get stuck in a loop raising exceptions. diff --git a/synapse/__init__.py b/synapse/__init__.py index 1d962f5dc..051c83774 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.6.0rc1" +__version__ = "1.6.0rc2" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 9eebd46048d0b34767047b2156760a1467f19ae6 Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 26 Nov 2019 03:45:50 +1100 Subject: [PATCH 100/133] Improve the performance of structured logging (#6322) --- changelog.d/6322.misc | 1 + synapse/logging/_structured.py | 14 ++++- synapse/logging/_terse_json.py | 104 ++++++++++++++++++++++++--------- tests/server.py | 2 + 4 files changed, 92 insertions(+), 29 deletions(-) create mode 100644 changelog.d/6322.misc diff --git a/changelog.d/6322.misc b/changelog.d/6322.misc new file mode 100644 index 000000000..70ef36ca8 --- /dev/null +++ b/changelog.d/6322.misc @@ -0,0 +1 @@ +Improve the performance of outputting structured logging. diff --git a/synapse/logging/_structured.py b/synapse/logging/_structured.py index 334ddaf39..ffa7b20ca 100644 --- a/synapse/logging/_structured.py +++ b/synapse/logging/_structured.py @@ -261,6 +261,18 @@ def parse_drain_configs( ) +class StoppableLogPublisher(LogPublisher): + """ + A log publisher that can tell its observers to shut down any external + communications. + """ + + def stop(self): + for obs in self._observers: + if hasattr(obs, "stop"): + obs.stop() + + def setup_structured_logging( hs, config, @@ -336,7 +348,7 @@ def setup_structured_logging( # We should never get here, but, just in case, throw an error. raise ConfigError("%s drain type cannot be configured" % (observer.type,)) - publisher = LogPublisher(*observers) + publisher = StoppableLogPublisher(*observers) log_filter = LogLevelFilterPredicate() for namespace, namespace_config in log_config.get( diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py index 76ce7d880..05fc64f40 100644 --- a/synapse/logging/_terse_json.py +++ b/synapse/logging/_terse_json.py @@ -17,25 +17,29 @@ Log formatters that output terse JSON. """ +import json import sys +import traceback from collections import deque from ipaddress import IPv4Address, IPv6Address, ip_address from math import floor -from typing import IO +from typing import IO, Optional import attr -from simplejson import dumps from zope.interface import implementer from twisted.application.internet import ClientService +from twisted.internet.defer import Deferred from twisted.internet.endpoints import ( HostnameEndpoint, TCP4ClientEndpoint, TCP6ClientEndpoint, ) +from twisted.internet.interfaces import IPushProducer, ITransport from twisted.internet.protocol import Factory, Protocol from twisted.logger import FileLogObserver, ILogObserver, Logger -from twisted.python.failure import Failure + +_encoder = json.JSONEncoder(ensure_ascii=False, separators=(",", ":")) def flatten_event(event: dict, metadata: dict, include_time: bool = False): @@ -141,11 +145,49 @@ def TerseJSONToConsoleLogObserver(outFile: IO[str], metadata: dict) -> FileLogOb def formatEvent(_event: dict) -> str: flattened = flatten_event(_event, metadata) - return dumps(flattened, ensure_ascii=False, separators=(",", ":")) + "\n" + return _encoder.encode(flattened) + "\n" return FileLogObserver(outFile, formatEvent) +@attr.s +@implementer(IPushProducer) +class LogProducer(object): + """ + An IPushProducer that writes logs from its buffer to its transport when it + is resumed. + + Args: + buffer: Log buffer to read logs from. + transport: Transport to write to. + """ + + transport = attr.ib(type=ITransport) + _buffer = attr.ib(type=deque) + _paused = attr.ib(default=False, type=bool, init=False) + + def pauseProducing(self): + self._paused = True + + def stopProducing(self): + self._paused = True + self._buffer = None + + def resumeProducing(self): + self._paused = False + + while self._paused is False and (self._buffer and self.transport.connected): + try: + event = self._buffer.popleft() + self.transport.write(_encoder.encode(event).encode("utf8")) + self.transport.write(b"\n") + except Exception: + # Something has gone wrong writing to the transport -- log it + # and break out of the while. + traceback.print_exc(file=sys.__stderr__) + break + + @attr.s @implementer(ILogObserver) class TerseJSONToTCPLogObserver(object): @@ -165,8 +207,9 @@ class TerseJSONToTCPLogObserver(object): metadata = attr.ib(type=dict) maximum_buffer = attr.ib(type=int) _buffer = attr.ib(default=attr.Factory(deque), type=deque) - _writer = attr.ib(default=None) + _connection_waiter = attr.ib(default=None, type=Optional[Deferred]) _logger = attr.ib(default=attr.Factory(Logger)) + _producer = attr.ib(default=None, type=Optional[LogProducer]) def start(self) -> None: @@ -187,38 +230,43 @@ class TerseJSONToTCPLogObserver(object): factory = Factory.forProtocol(Protocol) self._service = ClientService(endpoint, factory, clock=self.hs.get_reactor()) self._service.startService() + self._connect() - def _write_loop(self) -> None: + def stop(self): + self._service.stopService() + + def _connect(self) -> None: """ - Implement the write loop. + Triggers an attempt to connect then write to the remote if not already writing. """ - if self._writer: + if self._connection_waiter: return - self._writer = self._service.whenConnected() + self._connection_waiter = self._service.whenConnected(failAfterFailures=1) - @self._writer.addBoth + @self._connection_waiter.addErrback + def fail(r): + r.printTraceback(file=sys.__stderr__) + self._connection_waiter = None + self._connect() + + @self._connection_waiter.addCallback def writer(r): - if isinstance(r, Failure): - r.printTraceback(file=sys.__stderr__) - self._writer = None - self.hs.get_reactor().callLater(1, self._write_loop) + # We have a connection. If we already have a producer, and its + # transport is the same, just trigger a resumeProducing. + if self._producer and r.transport is self._producer.transport: + self._producer.resumeProducing() return - try: - for event in self._buffer: - r.transport.write( - dumps(event, ensure_ascii=False, separators=(",", ":")).encode( - "utf8" - ) - ) - r.transport.write(b"\n") - self._buffer.clear() - except Exception as e: - sys.__stderr__.write("Failed writing out logs with %s\n" % (str(e),)) + # If the producer is still producing, stop it. + if self._producer: + self._producer.stopProducing() - self._writer = False - self.hs.get_reactor().callLater(1, self._write_loop) + # Make a new producer and start it. + self._producer = LogProducer(buffer=self._buffer, transport=r.transport) + r.transport.registerProducer(self._producer, True) + self._producer.resumeProducing() + self._connection_waiter = None def _handle_pressure(self) -> None: """ @@ -277,4 +325,4 @@ class TerseJSONToTCPLogObserver(object): self._logger.failure("Failed clearing backpressure") # Try and write immediately. - self._write_loop() + self._connect() diff --git a/tests/server.py b/tests/server.py index f878aeaad..2b7cf4242 100644 --- a/tests/server.py +++ b/tests/server.py @@ -379,6 +379,7 @@ class FakeTransport(object): disconnecting = False disconnected = False + connected = True buffer = attr.ib(default=b"") producer = attr.ib(default=None) autoflush = attr.ib(default=True) @@ -402,6 +403,7 @@ class FakeTransport(object): "FakeTransport: Delaying disconnect until buffer is flushed" ) else: + self.connected = False self.disconnected = True def abortConnection(self): From c01d5435843ad4af3d520851e86d9938b47b2d12 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Mon, 25 Nov 2019 21:03:17 +0000 Subject: [PATCH 101/133] Make sure that we close cursors before returning from a query (#6408) There are lots of words in the comment as to why this is a good idea. Fixes #6403. --- changelog.d/6408.bugfix | 1 + synapse/storage/_base.py | 51 ++++++++++++++++---- synapse/storage/data_stores/main/receipts.py | 2 +- 3 files changed, 44 insertions(+), 10 deletions(-) create mode 100644 changelog.d/6408.bugfix diff --git a/changelog.d/6408.bugfix b/changelog.d/6408.bugfix new file mode 100644 index 000000000..c9babe599 --- /dev/null +++ b/changelog.d/6408.bugfix @@ -0,0 +1 @@ +Fix an intermittent exception when handling read-receipts. diff --git a/synapse/storage/_base.py b/synapse/storage/_base.py index 6b8a9cd89..459901ac6 100644 --- a/synapse/storage/_base.py +++ b/synapse/storage/_base.py @@ -409,16 +409,15 @@ class SQLBaseStore(object): i = 0 N = 5 while True: + cursor = LoggingTransaction( + conn.cursor(), + name, + self.database_engine, + after_callbacks, + exception_callbacks, + ) try: - txn = conn.cursor() - txn = LoggingTransaction( - txn, - name, - self.database_engine, - after_callbacks, - exception_callbacks, - ) - r = func(txn, *args, **kwargs) + r = func(cursor, *args, **kwargs) conn.commit() return r except self.database_engine.module.OperationalError as e: @@ -456,6 +455,40 @@ class SQLBaseStore(object): ) continue raise + finally: + # we're either about to retry with a new cursor, or we're about to + # release the connection. Once we release the connection, it could + # get used for another query, which might do a conn.rollback(). + # + # In the latter case, even though that probably wouldn't affect the + # results of this transaction, python's sqlite will reset all + # statements on the connection [1], which will make our cursor + # invalid [2]. + # + # In any case, continuing to read rows after commit()ing seems + # dubious from the PoV of ACID transactional semantics + # (sqlite explicitly says that once you commit, you may see rows + # from subsequent updates.) + # + # In psycopg2, cursors are essentially a client-side fabrication - + # all the data is transferred to the client side when the statement + # finishes executing - so in theory we could go on streaming results + # from the cursor, but attempting to do so would make us + # incompatible with sqlite, so let's make sure we're not doing that + # by closing the cursor. + # + # (*named* cursors in psycopg2 are different and are proper server- + # side things, but (a) we don't use them and (b) they are implicitly + # closed by ending the transaction anyway.) + # + # In short, if we haven't finished with the cursor yet, that's a + # problem waiting to bite us. + # + # TL;DR: we're done with the cursor, so we can close it. + # + # [1]: https://github.com/python/cpython/blob/v3.8.0/Modules/_sqlite/connection.c#L465 + # [2]: https://github.com/python/cpython/blob/v3.8.0/Modules/_sqlite/cursor.c#L236 + cursor.close() except Exception as e: logger.debug("[TXN FAIL] {%s} %s", name, e) raise diff --git a/synapse/storage/data_stores/main/receipts.py b/synapse/storage/data_stores/main/receipts.py index 0c24430f2..8b17334ff 100644 --- a/synapse/storage/data_stores/main/receipts.py +++ b/synapse/storage/data_stores/main/receipts.py @@ -280,7 +280,7 @@ class ReceiptsWorkerStore(SQLBaseStore): args.append(limit) txn.execute(sql, args) - return (r[0:5] + (json.loads(r[5]),) for r in txn) + return list(r[0:5] + (json.loads(r[5]),) for r in txn) return self.runInteraction( "get_all_updated_receipts", get_all_updated_receipts_txn From 35f9165e96f6261e15aadb439a5d2199bede3c99 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Nov 2019 12:04:48 +0000 Subject: [PATCH 102/133] Fixup docs --- changelog.d/6332.bugfix | 2 +- synapse/replication/http/devices.py | 6 +++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/changelog.d/6332.bugfix b/changelog.d/6332.bugfix index b14bd7e43..67d5170ba 100644 --- a/changelog.d/6332.bugfix +++ b/changelog.d/6332.bugfix @@ -1 +1 @@ -Fix caching devices for remote users when using workers. +Fix caching devices for remote users when using workers, so that we don't attempt to refetch (and potentially fail) each time a user requests devices. diff --git a/synapse/replication/http/devices.py b/synapse/replication/http/devices.py index 795ca7b65..e32aac0a2 100644 --- a/synapse/replication/http/devices.py +++ b/synapse/replication/http/devices.py @@ -21,7 +21,11 @@ logger = logging.getLogger(__name__) class ReplicationUserDevicesResyncRestServlet(ReplicationEndpoint): - """Notifies that a user has joined or left the room + """Ask master to resync the device list for a user by contacting their + server. + + This must happen on master so that the results can be correctly cached in + the database and streamed to workers. Request format: From 4d394d6415e3e4b64a577a1c83ee8acf147ce0af Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 26 Nov 2019 12:32:37 +0000 Subject: [PATCH 103/133] remove confusing fixme --- synapse/handlers/federation.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 66852153c..97d045db1 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -2187,12 +2187,6 @@ class FederationHandler(BaseHandler): logger.info("Skipping auth_event fetch for outlier") return context - # FIXME: Assumes we have and stored all the state for all the - # prev_events - # - # FIXME: what does the fixme above mean? where do prev_events come into - # it, why do we care about the state for those events, and what does "have and - # stored" mean? Seems erik wrote it in c1d860870b different_auth = event_auth_events.difference( e.event_id for e in auth_events.values() ) From 65d54c5e8c1434eada5d1b670d7db2e37d68d4f1 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Nov 2019 13:10:09 +0000 Subject: [PATCH 104/133] Fix phone home stats (#6418) Fix phone home stats --- changelog.d/6418.bugfix | 1 + synapse/app/homeserver.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6418.bugfix diff --git a/changelog.d/6418.bugfix b/changelog.d/6418.bugfix new file mode 100644 index 000000000..a1f488d3a --- /dev/null +++ b/changelog.d/6418.bugfix @@ -0,0 +1 @@ +Fix phone home stats reporting. diff --git a/synapse/app/homeserver.py b/synapse/app/homeserver.py index 73e2c29d0..883b3fb70 100644 --- a/synapse/app/homeserver.py +++ b/synapse/app/homeserver.py @@ -585,7 +585,7 @@ def run(hs): def performance_stats_init(): _stats_process.clear() _stats_process.append( - (int(hs.get_clock().time(), resource.getrusage(resource.RUSAGE_SELF))) + (int(hs.get_clock().time()), resource.getrusage(resource.RUSAGE_SELF)) ) def start_phone_stats_home(): From b98971e8a437eb3903506eadbefdf6cb2e0853d6 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 26 Nov 2019 12:15:46 +0000 Subject: [PATCH 105/133] 1.6.0 --- CHANGES.md | 9 +++++++++ changelog.d/6418.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 4 files changed, 16 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/6418.bugfix diff --git a/CHANGES.md b/CHANGES.md index d26bc7a86..42281483b 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,12 @@ +Synapse 1.6.0 (2019-11-26) +========================== + +Bugfixes +-------- + +- Fix phone home stats reporting. ([\#6418](https://github.com/matrix-org/synapse/issues/6418)) + + Synapse 1.6.0rc2 (2019-11-25) ============================= diff --git a/changelog.d/6418.bugfix b/changelog.d/6418.bugfix deleted file mode 100644 index a1f488d3a..000000000 --- a/changelog.d/6418.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix phone home stats reporting. diff --git a/debian/changelog b/debian/changelog index c4415f460..82dae017f 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.6.0) stable; urgency=medium + + * New synapse release 1.6.0. + + -- Synapse Packaging team Tue, 26 Nov 2019 12:15:40 +0000 + matrix-synapse-py3 (1.5.1) stable; urgency=medium * New synapse release 1.5.1. diff --git a/synapse/__init__.py b/synapse/__init__.py index 051c83774..53eedc004 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.6.0rc2" +__version__ = "1.6.0" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 8bb7b15894462c6a0ba81f7198f23a140100331d Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Nov 2019 15:50:17 +0000 Subject: [PATCH 106/133] Fix find_next_generated_user_id_localpart --- .../storage/data_stores/main/registration.py | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py index 6a594c160..c124bbb88 100644 --- a/synapse/storage/data_stores/main/registration.py +++ b/synapse/storage/data_stores/main/registration.py @@ -19,7 +19,6 @@ import logging import re from six import iterkeys -from six.moves import range from twisted.internet import defer from twisted.internet.defer import Deferred @@ -482,12 +481,8 @@ class RegistrationWorkerStore(SQLBaseStore): """ Gets the localpart of the next generated user ID. - Generated user IDs are integers, and we aim for them to be as small as - we can. Unfortunately, it's possible some of them are already taken by - existing users, and there may be gaps in the already taken range. This - function returns the start of the first allocatable gap. This is to - avoid the case of ID 1000 being pre-allocated and starting at 1001 while - 0-999 are available. + Generated user IDs are integers, so we find the largest integer user ID + already taken and return that plus one. """ def _find_next_generated_user_id(txn): @@ -503,9 +498,11 @@ class RegistrationWorkerStore(SQLBaseStore): match = regex.search(user_id) if match: found.add(int(match.group(1))) - for i in range(len(found) + 1): - if i not in found: - return i + + if not found: + return 1 + + return max(found) + 1 return ( ( From ba110a2030274dc785b37b1d5ad20acc4de9c612 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Nov 2019 15:54:10 +0000 Subject: [PATCH 107/133] Newsfile --- changelog.d/6420.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6420.bugfix diff --git a/changelog.d/6420.bugfix b/changelog.d/6420.bugfix new file mode 100644 index 000000000..aef47ccca --- /dev/null +++ b/changelog.d/6420.bugfix @@ -0,0 +1 @@ +Fix broken guest registration when there are existing blocks of numeric user IDs. From f8f14ba466a18ee38827629b8e9ab2d5931e5713 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 26 Nov 2019 16:06:41 +0000 Subject: [PATCH 108/133] Don't construct a set --- synapse/storage/data_stores/main/registration.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py index c124bbb88..0a3c1f051 100644 --- a/synapse/storage/data_stores/main/registration.py +++ b/synapse/storage/data_stores/main/registration.py @@ -492,17 +492,14 @@ class RegistrationWorkerStore(SQLBaseStore): regex = re.compile(r"^@(\d+):") - found = set() + max_found = 0 for (user_id,) in txn: match = regex.search(user_id) if match: - found.add(int(match.group(1))) + max_found = max(int(match.group(1)), max_found) - if not found: - return 1 - - return max(found) + 1 + return max_found + 1 return ( ( From f0ef9708241ec65fe6f32f1ad36f719ab4ab2b53 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Tue, 26 Nov 2019 17:49:12 +0000 Subject: [PATCH 109/133] Don't restrict the tests to v1 rooms --- tests/rest/client/test_retention.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/rest/client/test_retention.py b/tests/rest/client/test_retention.py index 9e549d8a9..95475bb65 100644 --- a/tests/rest/client/test_retention.py +++ b/tests/rest/client/test_retention.py @@ -34,7 +34,6 @@ class RetentionTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() - config["default_room_version"] = "1" config["retention"] = { "enabled": True, "default_policy": { @@ -204,7 +203,6 @@ class RetentionNoDefaultPolicyTestCase(unittest.HomeserverTestCase): def make_homeserver(self, reactor, clock): config = self.default_config() - config["default_room_version"] = "1" config["retention"] = { "enabled": True, } From ef1a85e7733bc1979f48357dd59b638110285075 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 26 Nov 2019 18:10:50 +0000 Subject: [PATCH 110/133] Fix startup error when http proxy is defined. (#6421) Guess I only tested this on python 2 :/ Fixes #6419. --- changelog.d/6421.bugfix | 1 + synapse/rest/media/v1/preview_url_resource.py | 4 ++-- synapse/server.py | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/6421.bugfix diff --git a/changelog.d/6421.bugfix b/changelog.d/6421.bugfix new file mode 100644 index 000000000..7969f7f71 --- /dev/null +++ b/changelog.d/6421.bugfix @@ -0,0 +1 @@ +Fix startup error when http proxy is defined. diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index a23d6f5c7..fb0d02aa8 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -77,8 +77,8 @@ class PreviewUrlResource(DirectServeResource): treq_args={"browser_like_redirects": True}, ip_whitelist=hs.config.url_preview_ip_range_whitelist, ip_blacklist=hs.config.url_preview_ip_range_blacklist, - http_proxy=os.getenv("http_proxy"), - https_proxy=os.getenv("HTTPS_PROXY"), + http_proxy=os.getenvb(b"http_proxy"), + https_proxy=os.getenvb(b"HTTPS_PROXY"), ) self.media_repo = media_repo self.primary_base_path = media_repo.primary_base_path diff --git a/synapse/server.py b/synapse/server.py index 90c3b072e..be9af7f98 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -318,8 +318,8 @@ class HomeServer(object): def build_proxied_http_client(self): return SimpleHttpClient( self, - http_proxy=os.getenv("http_proxy"), - https_proxy=os.getenv("HTTPS_PROXY"), + http_proxy=os.getenvb(b"http_proxy"), + https_proxy=os.getenvb(b"HTTPS_PROXY"), ) def build_room_creation_handler(self): From ce578031f4d0fe6f1eb26de4cb3d30a4175468db Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 26 Nov 2019 18:42:27 +0000 Subject: [PATCH 111/133] Remove assertion and provide a clear warning on startup for missing public_baseurl (#6379) --- changelog.d/6379.misc | 1 + synapse/config/emailconfig.py | 2 ++ synapse/config/registration.py | 7 +++++++ tests/rest/client/v2_alpha/test_register.py | 1 + 4 files changed, 11 insertions(+) create mode 100644 changelog.d/6379.misc diff --git a/changelog.d/6379.misc b/changelog.d/6379.misc new file mode 100644 index 000000000..725c2e7d8 --- /dev/null +++ b/changelog.d/6379.misc @@ -0,0 +1 @@ +Complain on startup instead of 500'ing during runtime when `public_baseurl` isn't set when necessary. \ No newline at end of file diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index 43fad0bf8..ac1724045 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -146,6 +146,8 @@ class EmailConfig(Config): if k not in email_config: missing.append("email." + k) + # public_baseurl is required to build password reset and validation links that + # will be emailed to users if config.get("public_baseurl") is None: missing.append("public_baseurl") diff --git a/synapse/config/registration.py b/synapse/config/registration.py index 1f6dac69d..ee9614c5f 100644 --- a/synapse/config/registration.py +++ b/synapse/config/registration.py @@ -106,6 +106,13 @@ class RegistrationConfig(Config): account_threepid_delegates = config.get("account_threepid_delegates") or {} self.account_threepid_delegate_email = account_threepid_delegates.get("email") self.account_threepid_delegate_msisdn = account_threepid_delegates.get("msisdn") + if self.account_threepid_delegate_msisdn and not self.public_baseurl: + raise ConfigError( + "The configuration option `public_baseurl` is required if " + "`account_threepid_delegate.msisdn` is set, such that " + "clients know where to submit validation tokens to. Please " + "configure `public_baseurl`." + ) self.default_identity_server = config.get("default_identity_server") self.allow_guest_access = config.get("allow_guest_access", False) diff --git a/tests/rest/client/v2_alpha/test_register.py b/tests/rest/client/v2_alpha/test_register.py index dab87e5ed..c0d0d2b44 100644 --- a/tests/rest/client/v2_alpha/test_register.py +++ b/tests/rest/client/v2_alpha/test_register.py @@ -203,6 +203,7 @@ class RegisterRestServletTestCase(unittest.HomeserverTestCase): @unittest.override_config( { + "public_baseurl": "https://test_server", "enable_registration_captcha": True, "user_consent": { "version": "1", From 70c0da4d82e30f8e84d0424666ea4797e437d4ca Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Tue, 26 Nov 2019 19:00:24 +0000 Subject: [PATCH 112/133] clean up buildkite output --- .buildkite/merge_base_branch.sh | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.buildkite/merge_base_branch.sh b/.buildkite/merge_base_branch.sh index eb7219a56..361440fd1 100755 --- a/.buildkite/merge_base_branch.sh +++ b/.buildkite/merge_base_branch.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -set -ex +set -e if [[ "$BUILDKITE_BRANCH" =~ ^(develop|master|dinsic|shhs|release-.*)$ ]]; then echo "Not merging forward, as this is a release branch" @@ -18,6 +18,8 @@ else GITBASE=$BUILDKITE_PULL_REQUEST_BASE_BRANCH fi +echo "--- merge_base_branch $GITBASE" + # Show what we are before git --no-pager show -s From 9b9ee75666ffca8e14b44efec6c360c2ccbcf615 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Tue, 26 Nov 2019 18:10:50 +0000 Subject: [PATCH 113/133] Fix startup error when http proxy is defined. (#6421) Guess I only tested this on python 2 :/ Fixes #6419. --- changelog.d/6421.bugfix | 1 + synapse/rest/media/v1/preview_url_resource.py | 4 ++-- synapse/server.py | 4 ++-- 3 files changed, 5 insertions(+), 4 deletions(-) create mode 100644 changelog.d/6421.bugfix diff --git a/changelog.d/6421.bugfix b/changelog.d/6421.bugfix new file mode 100644 index 000000000..7969f7f71 --- /dev/null +++ b/changelog.d/6421.bugfix @@ -0,0 +1 @@ +Fix startup error when http proxy is defined. diff --git a/synapse/rest/media/v1/preview_url_resource.py b/synapse/rest/media/v1/preview_url_resource.py index 15c15a12f..87343d9db 100644 --- a/synapse/rest/media/v1/preview_url_resource.py +++ b/synapse/rest/media/v1/preview_url_resource.py @@ -77,8 +77,8 @@ class PreviewUrlResource(DirectServeResource): treq_args={"browser_like_redirects": True}, ip_whitelist=hs.config.url_preview_ip_range_whitelist, ip_blacklist=hs.config.url_preview_ip_range_blacklist, - http_proxy=os.getenv("http_proxy"), - https_proxy=os.getenv("HTTPS_PROXY"), + http_proxy=os.getenvb(b"http_proxy"), + https_proxy=os.getenvb(b"HTTPS_PROXY"), ) self.media_repo = media_repo self.primary_base_path = media_repo.primary_base_path diff --git a/synapse/server.py b/synapse/server.py index 90c3b072e..be9af7f98 100644 --- a/synapse/server.py +++ b/synapse/server.py @@ -318,8 +318,8 @@ class HomeServer(object): def build_proxied_http_client(self): return SimpleHttpClient( self, - http_proxy=os.getenv("http_proxy"), - https_proxy=os.getenv("HTTPS_PROXY"), + http_proxy=os.getenvb(b"http_proxy"), + https_proxy=os.getenvb(b"HTTPS_PROXY"), ) def build_room_creation_handler(self): From 6f4a63df0044a304109f7d6958be94262558a7cf Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 27 Nov 2019 18:18:33 +0000 Subject: [PATCH 114/133] Add more tests to the worker blacklist (#6429) --- .buildkite/worker-blacklist | 29 +++++++++++++++++++++++++++++ changelog.d/6429.misc | 1 + 2 files changed, 30 insertions(+) create mode 100644 changelog.d/6429.misc diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist index cda5c84e9..d7908af17 100644 --- a/.buildkite/worker-blacklist +++ b/.buildkite/worker-blacklist @@ -28,3 +28,32 @@ User sees updates to presence from other users in the incremental sync. Gapped incremental syncs include all state changes Old members are included in gappy incr LL sync if they start speaking + +# new failures as of https://github.com/matrix-org/sytest/pull/732 +Device list doesn't change if remote server is down +Remote servers cannot set power levels in rooms without existing powerlevels +Remote servers should reject attempts by non-creators to set the power levels + +# new failures as of https://github.com/matrix-org/sytest/pull/753 +GET /rooms/:room_id/messages returns a message +GET /rooms/:room_id/messages lazy loads members correctly +Read receipts are sent as events +Only original members of the room can see messages from erased users +Device deletion propagates over federation +If user leaves room, remote user changes device and rejoins we see update in /sync and /keys/changes +Changing user-signing key notifies local users +Newly updated tags appear in an incremental v2 /sync +Server correctly handles incoming m.device_list_update +Local device key changes get to remote servers with correct prev_id +AS-ghosted users can use rooms via AS +Ghost user must register before joining room +Test that a message is pushed +Invites are pushed +Rooms with aliases are correctly named in pushed +Rooms with names are correctly named in pushed +Rooms with canonical alias are correctly named in pushed +Rooms with many users are correctly pushed +Don't get pushed for rooms you've muted +Rejected events are not pushed +Test that rejected pushers are removed. +Events come down the correct room diff --git a/changelog.d/6429.misc b/changelog.d/6429.misc new file mode 100644 index 000000000..4b32cdeac --- /dev/null +++ b/changelog.d/6429.misc @@ -0,0 +1 @@ +Add more tests to the blacklist when running in worker mode. From 0d27aba900136514a8801b902f9a8ac69150e2c0 Mon Sep 17 00:00:00 2001 From: Hubert Chathi Date: Wed, 27 Nov 2019 16:14:44 -0500 Subject: [PATCH 115/133] add etag and count to key backup endpoints (#5858) --- changelog.d/5858.feature | 1 + synapse/handlers/e2e_room_keys.py | 126 ++++++---- synapse/rest/client/v2_alpha/room_keys.py | 8 +- .../storage/data_stores/main/e2e_room_keys.py | 226 +++++++++++++----- .../main/schema/delta/56/room_key_etag.sql | 17 ++ tests/handlers/test_e2e_room_keys.py | 31 +++ tests/storage/test_e2e_room_keys.py | 8 +- 7 files changed, 295 insertions(+), 122 deletions(-) create mode 100644 changelog.d/5858.feature create mode 100644 synapse/storage/data_stores/main/schema/delta/56/room_key_etag.sql diff --git a/changelog.d/5858.feature b/changelog.d/5858.feature new file mode 100644 index 000000000..55ee93051 --- /dev/null +++ b/changelog.d/5858.feature @@ -0,0 +1 @@ +Add etag and count fields to key backup endpoints to help clients guess if there are new keys. diff --git a/synapse/handlers/e2e_room_keys.py b/synapse/handlers/e2e_room_keys.py index 0cea445f0..f1b4424a0 100644 --- a/synapse/handlers/e2e_room_keys.py +++ b/synapse/handlers/e2e_room_keys.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2017, 2018 New Vector Ltd +# Copyright 2019 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -103,14 +104,35 @@ class E2eRoomKeysHandler(object): rooms session_id(string): session ID to delete keys for, for None to delete keys for all sessions + Raises: + NotFoundError: if the backup version does not exist Returns: - A deferred of the deletion transaction + A dict containing the count and etag for the backup version """ # lock for consistency with uploading with (yield self._upload_linearizer.queue(user_id)): + # make sure the backup version exists + try: + version_info = yield self.store.get_e2e_room_keys_version_info( + user_id, version + ) + except StoreError as e: + if e.code == 404: + raise NotFoundError("Unknown backup version") + else: + raise + yield self.store.delete_e2e_room_keys(user_id, version, room_id, session_id) + version_etag = version_info["etag"] + 1 + yield self.store.update_e2e_room_keys_version( + user_id, version, None, version_etag + ) + + count = yield self.store.count_e2e_room_keys(user_id, version) + return {"etag": str(version_etag), "count": count} + @trace @defer.inlineCallbacks def upload_room_keys(self, user_id, version, room_keys): @@ -138,6 +160,9 @@ class E2eRoomKeysHandler(object): } } + Returns: + A dict containing the count and etag for the backup version + Raises: NotFoundError: if there are no versions defined RoomKeysVersionError: if the uploaded version is not the current version @@ -171,59 +196,62 @@ class E2eRoomKeysHandler(object): else: raise - # go through the room_keys. - # XXX: this should/could be done concurrently, given we're in a lock. + # Fetch any existing room keys for the sessions that have been + # submitted. Then compare them with the submitted keys. If the + # key is new, insert it; if the key should be updated, then update + # it; otherwise, drop it. + existing_keys = yield self.store.get_e2e_room_keys_multi( + user_id, version, room_keys["rooms"] + ) + to_insert = [] # batch the inserts together + changed = False # if anything has changed, we need to update the etag for room_id, room in iteritems(room_keys["rooms"]): - for session_id, session in iteritems(room["sessions"]): - yield self._upload_room_key( - user_id, version, room_id, session_id, session + for session_id, room_key in iteritems(room["sessions"]): + log_kv( + { + "message": "Trying to upload room key", + "room_id": room_id, + "session_id": session_id, + "user_id": user_id, + } ) + current_room_key = existing_keys.get(room_id, {}).get(session_id) + if current_room_key: + if self._should_replace_room_key(current_room_key, room_key): + log_kv({"message": "Replacing room key."}) + # updates are done one at a time in the DB, so send + # updates right away rather than batching them up, + # like we do with the inserts + yield self.store.update_e2e_room_key( + user_id, version, room_id, session_id, room_key + ) + changed = True + else: + log_kv({"message": "Not replacing room_key."}) + else: + log_kv( + { + "message": "Room key not found.", + "room_id": room_id, + "user_id": user_id, + } + ) + log_kv({"message": "Replacing room key."}) + to_insert.append((room_id, session_id, room_key)) + changed = True - @defer.inlineCallbacks - def _upload_room_key(self, user_id, version, room_id, session_id, room_key): - """Upload a given room_key for a given room and session into a given - version of the backup. Merges the key with any which might already exist. + if len(to_insert): + yield self.store.add_e2e_room_keys(user_id, version, to_insert) - Args: - user_id(str): the user whose backup we're setting - version(str): the version ID of the backup we're updating - room_id(str): the ID of the room whose keys we're setting - session_id(str): the session whose room_key we're setting - room_key(dict): the room_key being set - """ - log_kv( - { - "message": "Trying to upload room key", - "room_id": room_id, - "session_id": session_id, - "user_id": user_id, - } - ) - # get the room_key for this particular row - current_room_key = None - try: - current_room_key = yield self.store.get_e2e_room_key( - user_id, version, room_id, session_id - ) - except StoreError as e: - if e.code == 404: - log_kv( - { - "message": "Room key not found.", - "room_id": room_id, - "user_id": user_id, - } + version_etag = version_info["etag"] + if changed: + version_etag = version_etag + 1 + yield self.store.update_e2e_room_keys_version( + user_id, version, None, version_etag ) - else: - raise - if self._should_replace_room_key(current_room_key, room_key): - log_kv({"message": "Replacing room key."}) - yield self.store.set_e2e_room_key( - user_id, version, room_id, session_id, room_key - ) - else: - log_kv({"message": "Not replacing room_key."}) + count = yield self.store.count_e2e_room_keys(user_id, version) + return {"etag": str(version_etag), "count": count} @staticmethod def _should_replace_room_key(current_room_key, room_key): @@ -314,6 +342,8 @@ class E2eRoomKeysHandler(object): raise NotFoundError("Unknown backup version") else: raise + + res["count"] = yield self.store.count_e2e_room_keys(user_id, res["version"]) return res @trace diff --git a/synapse/rest/client/v2_alpha/room_keys.py b/synapse/rest/client/v2_alpha/room_keys.py index d59678643..d83ac8e3c 100644 --- a/synapse/rest/client/v2_alpha/room_keys.py +++ b/synapse/rest/client/v2_alpha/room_keys.py @@ -134,8 +134,8 @@ class RoomKeysServlet(RestServlet): if room_id: body = {"rooms": {room_id: body}} - yield self.e2e_room_keys_handler.upload_room_keys(user_id, version, body) - return 200, {} + ret = yield self.e2e_room_keys_handler.upload_room_keys(user_id, version, body) + return 200, ret @defer.inlineCallbacks def on_GET(self, request, room_id, session_id): @@ -239,10 +239,10 @@ class RoomKeysServlet(RestServlet): user_id = requester.user.to_string() version = parse_string(request, "version") - yield self.e2e_room_keys_handler.delete_room_keys( + ret = yield self.e2e_room_keys_handler.delete_room_keys( user_id, version, room_id, session_id ) - return 200, {} + return 200, ret class RoomKeysNewVersionServlet(RestServlet): diff --git a/synapse/storage/data_stores/main/e2e_room_keys.py b/synapse/storage/data_stores/main/e2e_room_keys.py index 1cbbae5b6..113224fd7 100644 --- a/synapse/storage/data_stores/main/e2e_room_keys.py +++ b/synapse/storage/data_stores/main/e2e_room_keys.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2017 New Vector Ltd +# Copyright 2019 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -24,49 +25,8 @@ from synapse.storage._base import SQLBaseStore class EndToEndRoomKeyStore(SQLBaseStore): @defer.inlineCallbacks - def get_e2e_room_key(self, user_id, version, room_id, session_id): - """Get the encrypted E2E room key for a given session from a given - backup version of room_keys. We only store the 'best' room key for a given - session at a given time, as determined by the handler. - - Args: - user_id(str): the user whose backup we're querying - version(str): the version ID of the backup for the set of keys we're querying - room_id(str): the ID of the room whose keys we're querying. - This is a bit redundant as it's implied by the session_id, but - we include for consistency with the rest of the API. - session_id(str): the session whose room_key we're querying. - - Returns: - A deferred dict giving the session_data and message metadata for - this room key. - """ - - row = yield self._simple_select_one( - table="e2e_room_keys", - keyvalues={ - "user_id": user_id, - "version": version, - "room_id": room_id, - "session_id": session_id, - }, - retcols=( - "first_message_index", - "forwarded_count", - "is_verified", - "session_data", - ), - desc="get_e2e_room_key", - ) - - row["session_data"] = json.loads(row["session_data"]) - - return row - - @defer.inlineCallbacks - def set_e2e_room_key(self, user_id, version, room_id, session_id, room_key): - """Replaces or inserts the encrypted E2E room key for a given session in - a given backup + def update_e2e_room_key(self, user_id, version, room_id, session_id, room_key): + """Replaces the encrypted E2E room key for a given session in a given backup Args: user_id(str): the user whose backup we're setting @@ -78,7 +38,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): StoreError """ - yield self._simple_upsert( + yield self._simple_update_one( table="e2e_room_keys", keyvalues={ "user_id": user_id, @@ -86,21 +46,51 @@ class EndToEndRoomKeyStore(SQLBaseStore): "room_id": room_id, "session_id": session_id, }, - values={ + updatevalues={ "first_message_index": room_key["first_message_index"], "forwarded_count": room_key["forwarded_count"], "is_verified": room_key["is_verified"], "session_data": json.dumps(room_key["session_data"]), }, - lock=False, + desc="update_e2e_room_key", ) - log_kv( - { - "message": "Set room key", - "room_id": room_id, - "session_id": session_id, - "room_key": room_key, - } + + @defer.inlineCallbacks + def add_e2e_room_keys(self, user_id, version, room_keys): + """Bulk add room keys to a given backup. + + Args: + user_id (str): the user whose backup we're adding to + version (str): the version ID of the backup for the set of keys we're adding to + room_keys (iterable[(str, str, dict)]): the keys to add, in the form + (roomID, sessionID, keyData) + """ + + values = [] + for (room_id, session_id, room_key) in room_keys: + values.append( + { + "user_id": user_id, + "version": version, + "room_id": room_id, + "session_id": session_id, + "first_message_index": room_key["first_message_index"], + "forwarded_count": room_key["forwarded_count"], + "is_verified": room_key["is_verified"], + "session_data": json.dumps(room_key["session_data"]), + } + ) + log_kv( + { + "message": "Set room key", + "room_id": room_id, + "session_id": session_id, + "room_key": room_key, + } + ) + + yield self._simple_insert_many( + table="e2e_room_keys", values=values, desc="add_e2e_room_keys" ) @trace @@ -110,11 +100,11 @@ class EndToEndRoomKeyStore(SQLBaseStore): room, or a given session. Args: - user_id(str): the user whose backup we're querying - version(str): the version ID of the backup for the set of keys we're querying - room_id(str): Optional. the ID of the room whose keys we're querying, if any. + user_id (str): the user whose backup we're querying + version (str): the version ID of the backup for the set of keys we're querying + room_id (str): Optional. the ID of the room whose keys we're querying, if any. If not specified, we return the keys for all the rooms in the backup. - session_id(str): Optional. the session whose room_key we're querying, if any. + session_id (str): Optional. the session whose room_key we're querying, if any. If specified, we also require the room_id to be specified. If not specified, we return all the keys in this version of the backup (or for the specified room) @@ -162,6 +152,95 @@ class EndToEndRoomKeyStore(SQLBaseStore): return sessions + def get_e2e_room_keys_multi(self, user_id, version, room_keys): + """Get multiple room keys at a time. The difference between this function and + get_e2e_room_keys is that this function can be used to retrieve + multiple specific keys at a time, whereas get_e2e_room_keys is used for + getting all the keys in a backup version, all the keys for a room, or a + specific key. + + Args: + user_id (str): the user whose backup we're querying + version (str): the version ID of the backup we're querying about + room_keys (dict[str, dict[str, iterable[str]]]): a map from + room ID -> {"session": [session ids]} indicating the session IDs + that we want to query + + Returns: + Deferred[dict[str, dict[str, dict]]]: a map of room IDs to session IDs to room key + """ + + return self.runInteraction( + "get_e2e_room_keys_multi", + self._get_e2e_room_keys_multi_txn, + user_id, + version, + room_keys, + ) + + @staticmethod + def _get_e2e_room_keys_multi_txn(txn, user_id, version, room_keys): + if not room_keys: + return {} + + where_clauses = [] + params = [user_id, version] + for room_id, room in room_keys.items(): + sessions = list(room["sessions"]) + if not sessions: + continue + params.append(room_id) + params.extend(sessions) + where_clauses.append( + "(room_id = ? AND session_id IN (%s))" + % (",".join(["?" for _ in sessions]),) + ) + + # check if we're actually querying something + if not where_clauses: + return {} + + sql = """ + SELECT room_id, session_id, first_message_index, forwarded_count, + is_verified, session_data + FROM e2e_room_keys + WHERE user_id = ? AND version = ? AND (%s) + """ % ( + " OR ".join(where_clauses) + ) + + txn.execute(sql, params) + + ret = {} + + for row in txn: + room_id = row[0] + session_id = row[1] + ret.setdefault(room_id, {}) + ret[room_id][session_id] = { + "first_message_index": row[2], + "forwarded_count": row[3], + "is_verified": row[4], + "session_data": json.loads(row[5]), + } + + return ret + + def count_e2e_room_keys(self, user_id, version): + """Get the number of keys in a backup version. + + Args: + user_id (str): the user whose backup we're querying + version (str): the version ID of the backup we're querying about + """ + + return self._simple_select_one_onecol( + table="e2e_room_keys", + keyvalues={"user_id": user_id, "version": version}, + retcol="COUNT(*)", + desc="count_e2e_room_keys", + ) + @trace @defer.inlineCallbacks def delete_e2e_room_keys(self, user_id, version, room_id=None, session_id=None): @@ -219,6 +298,7 @@ class EndToEndRoomKeyStore(SQLBaseStore): version(str) algorithm(str) auth_data(object): opaque dict supplied by the client + etag(int): tag of the keys in the backup """ def _get_e2e_room_keys_version_info_txn(txn): @@ -236,10 +316,12 @@ class EndToEndRoomKeyStore(SQLBaseStore): txn, table="e2e_room_keys_versions", keyvalues={"user_id": user_id, "version": this_version, "deleted": 0}, - retcols=("version", "algorithm", "auth_data"), + retcols=("version", "algorithm", "auth_data", "etag"), ) result["auth_data"] = json.loads(result["auth_data"]) result["version"] = str(result["version"]) + if result["etag"] is None: + result["etag"] = 0 return result return self.runInteraction( @@ -288,21 +370,33 @@ class EndToEndRoomKeyStore(SQLBaseStore): ) @trace - def update_e2e_room_keys_version(self, user_id, version, info): + def update_e2e_room_keys_version( + self, user_id, version, info=None, version_etag=None + ): """Update a given backup version Args: user_id(str): the user whose backup version we're updating version(str): the version ID of the backup version we're updating - info(dict): the new backup version info to store + info (dict): the new backup version info to store. If None, then + the backup version info is not updated + version_etag (Optional[int]): etag of the keys in the backup. If + None, then the etag is not updated """ + updatevalues = {} - return self._simple_update( - table="e2e_room_keys_versions", - keyvalues={"user_id": user_id, "version": version}, - updatevalues={"auth_data": json.dumps(info["auth_data"])}, - desc="update_e2e_room_keys_version", - ) + if info is not None and "auth_data" in info: + updatevalues["auth_data"] = json.dumps(info["auth_data"]) + if version_etag is not None: + updatevalues["etag"] = version_etag + + if updatevalues: + return self._simple_update( + table="e2e_room_keys_versions", + keyvalues={"user_id": user_id, "version": version}, + updatevalues=updatevalues, + desc="update_e2e_room_keys_version", + ) @trace def delete_e2e_room_keys_version(self, user_id, version=None): diff --git a/synapse/storage/data_stores/main/schema/delta/56/room_key_etag.sql b/synapse/storage/data_stores/main/schema/delta/56/room_key_etag.sql new file mode 100644 index 000000000..7d70dd071 --- /dev/null +++ b/synapse/storage/data_stores/main/schema/delta/56/room_key_etag.sql @@ -0,0 +1,17 @@ +/* Copyright 2019 Matrix.org Foundation C.I.C. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +-- store the current etag of backup version +ALTER TABLE e2e_room_keys_versions ADD COLUMN etag BIGINT; diff --git a/tests/handlers/test_e2e_room_keys.py b/tests/handlers/test_e2e_room_keys.py index 0bb96674a..70f172eb0 100644 --- a/tests/handlers/test_e2e_room_keys.py +++ b/tests/handlers/test_e2e_room_keys.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2016 OpenMarket Ltd # Copyright 2017 New Vector Ltd +# Copyright 2019 Matrix.org Foundation C.I.C. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -94,23 +95,29 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): # check we can retrieve it as the current version res = yield self.handler.get_version_info(self.local_user) + version_etag = res["etag"] + del res["etag"] self.assertDictEqual( res, { "version": "1", "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", + "count": 0, }, ) # check we can retrieve it as a specific version res = yield self.handler.get_version_info(self.local_user, "1") + self.assertEqual(res["etag"], version_etag) + del res["etag"] self.assertDictEqual( res, { "version": "1", "algorithm": "m.megolm_backup.v1", "auth_data": "first_version_auth_data", + "count": 0, }, ) @@ -126,12 +133,14 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): # check we can retrieve it as the current version res = yield self.handler.get_version_info(self.local_user) + del res["etag"] self.assertDictEqual( res, { "version": "2", "algorithm": "m.megolm_backup.v1", "auth_data": "second_version_auth_data", + "count": 0, }, ) @@ -158,12 +167,14 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): # check we can retrieve it as the current version res = yield self.handler.get_version_info(self.local_user) + del res["etag"] self.assertDictEqual( res, { "algorithm": "m.megolm_backup.v1", "auth_data": "revised_first_version_auth_data", "version": version, + "count": 0, }, ) @@ -207,12 +218,14 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): # check we can retrieve it as the current version res = yield self.handler.get_version_info(self.local_user) + del res["etag"] # etag is opaque, so don't test its contents self.assertDictEqual( res, { "algorithm": "m.megolm_backup.v1", "auth_data": "revised_first_version_auth_data", "version": version, + "count": 0, }, ) @@ -409,6 +422,11 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): yield self.handler.upload_room_keys(self.local_user, version, room_keys) + # get the etag to compare to future versions + res = yield self.handler.get_version_info(self.local_user) + backup_etag = res["etag"] + self.assertEqual(res["count"], 1) + new_room_keys = copy.deepcopy(room_keys) new_room_key = new_room_keys["rooms"]["!abc:matrix.org"]["sessions"]["c0ff33"] @@ -423,6 +441,10 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): "SSBBTSBBIEZJU0gK", ) + # the etag should be the same since the session did not change + res = yield self.handler.get_version_info(self.local_user) + self.assertEqual(res["etag"], backup_etag) + # test that marking the session as verified however /does/ replace it new_room_key["is_verified"] = True yield self.handler.upload_room_keys(self.local_user, version, new_room_keys) @@ -432,6 +454,11 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): res["rooms"]["!abc:matrix.org"]["sessions"]["c0ff33"]["session_data"], "new" ) + # the etag should NOT be equal now, since the key changed + res = yield self.handler.get_version_info(self.local_user) + self.assertNotEqual(res["etag"], backup_etag) + backup_etag = res["etag"] + # test that a session with a higher forwarded_count doesn't replace one # with a lower forwarding count new_room_key["forwarded_count"] = 2 @@ -443,6 +470,10 @@ class E2eRoomKeysHandlerTestCase(unittest.TestCase): res["rooms"]["!abc:matrix.org"]["sessions"]["c0ff33"]["session_data"], "new" ) + # the etag should be the same since the session did not change + res = yield self.handler.get_version_info(self.local_user) + self.assertEqual(res["etag"], backup_etag) + # TODO: check edge cases as well as the common variations here @defer.inlineCallbacks diff --git a/tests/storage/test_e2e_room_keys.py b/tests/storage/test_e2e_room_keys.py index d128fde44..35dafbb90 100644 --- a/tests/storage/test_e2e_room_keys.py +++ b/tests/storage/test_e2e_room_keys.py @@ -39,8 +39,8 @@ class E2eRoomKeysHandlerTestCase(unittest.HomeserverTestCase): ) self.get_success( - self.store.set_e2e_room_key( - "user_id", version1, "room", "session", room_key + self.store.add_e2e_room_keys( + "user_id", version1, [("room", "session", room_key)] ) ) @@ -51,8 +51,8 @@ class E2eRoomKeysHandlerTestCase(unittest.HomeserverTestCase): ) self.get_success( - self.store.set_e2e_room_key( - "user_id", version2, "room", "session", room_key + self.store.add_e2e_room_keys( + "user_id", version2, [("room", "session", room_key)] ) ) From 0f87b912aba7e678041632bc9a6d1f7c2d24342c Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Thu, 28 Nov 2019 08:54:07 +1100 Subject: [PATCH 116/133] Implementation of MSC2314 (#6176) --- changelog.d/6176.feature | 1 + synapse/federation/federation_server.py | 26 ++++++--- synapse/federation/transport/server.py | 6 +- sytest-blacklist | 6 +- tests/federation/test_complexity.py | 28 +-------- tests/federation/test_federation_sender.py | 4 +- tests/federation/test_federation_server.py | 63 ++++++++++++++++++++ tests/handlers/test_typing.py | 3 + tests/replication/slave/storage/_base.py | 3 + tests/replication/tcp/streams/_base.py | 4 ++ tests/storage/test_roommember.py | 26 +-------- tests/unittest.py | 68 +++++++++++++++++++++- tests/utils.py | 1 + 13 files changed, 174 insertions(+), 65 deletions(-) create mode 100644 changelog.d/6176.feature diff --git a/changelog.d/6176.feature b/changelog.d/6176.feature new file mode 100644 index 000000000..3c66d689d --- /dev/null +++ b/changelog.d/6176.feature @@ -0,0 +1 @@ +Implement the `/_matrix/federation/unstable/net.atleastfornow/state/` API as drafted in MSC2314. diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index d942d77a7..84d4eca04 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2015, 2016 OpenMarket Ltd # Copyright 2018 New Vector Ltd +# Copyright 2019 Matrix.org Federation C.I.C # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -73,6 +74,7 @@ class FederationServer(FederationBase): self.auth = hs.get_auth() self.handler = hs.get_handlers().federation_handler + self.state = hs.get_state_handler() self._server_linearizer = Linearizer("fed_server") self._transaction_linearizer = Linearizer("fed_txn_handler") @@ -264,9 +266,6 @@ class FederationServer(FederationBase): await self.registry.on_edu(edu_type, origin, content) async def on_context_state_request(self, origin, room_id, event_id): - if not event_id: - raise NotImplementedError("Specify an event") - origin_host, _ = parse_server_name(origin) await self.check_server_matches_acl(origin_host, room_id) @@ -280,13 +279,18 @@ class FederationServer(FederationBase): # - but that's non-trivial to get right, and anyway somewhat defeats # the point of the linearizer. with (await self._server_linearizer.queue((origin, room_id))): - resp = await self._state_resp_cache.wrap( - (room_id, event_id), - self._on_context_state_request_compute, - room_id, - event_id, + resp = dict( + await self._state_resp_cache.wrap( + (room_id, event_id), + self._on_context_state_request_compute, + room_id, + event_id, + ) ) + room_version = await self.store.get_room_version(room_id) + resp["room_version"] = room_version + return 200, resp async def on_state_ids_request(self, origin, room_id, event_id): @@ -306,7 +310,11 @@ class FederationServer(FederationBase): return 200, {"pdu_ids": state_ids, "auth_chain_ids": auth_chain_ids} async def _on_context_state_request_compute(self, room_id, event_id): - pdus = await self.handler.get_state_for_pdu(room_id, event_id) + if event_id: + pdus = await self.handler.get_state_for_pdu(room_id, event_id) + else: + pdus = (await self.state.get_current_state(room_id)).values() + auth_chain = await self.store.get_auth_chain([pdu.event_id for pdu in pdus]) return { diff --git a/synapse/federation/transport/server.py b/synapse/federation/transport/server.py index 09baa9c57..fefc789c8 100644 --- a/synapse/federation/transport/server.py +++ b/synapse/federation/transport/server.py @@ -421,7 +421,7 @@ class FederationEventServlet(BaseFederationServlet): return await self.handler.on_pdu_request(origin, event_id) -class FederationStateServlet(BaseFederationServlet): +class FederationStateV1Servlet(BaseFederationServlet): PATH = "/state/(?P[^/]*)/?" # This is when someone asks for all data for a given context. @@ -429,7 +429,7 @@ class FederationStateServlet(BaseFederationServlet): return await self.handler.on_context_state_request( origin, context, - parse_string_from_args(query, "event_id", None, required=True), + parse_string_from_args(query, "event_id", None, required=False), ) @@ -1360,7 +1360,7 @@ class RoomComplexityServlet(BaseFederationServlet): FEDERATION_SERVLET_CLASSES = ( FederationSendServlet, FederationEventServlet, - FederationStateServlet, + FederationStateV1Servlet, FederationStateIdsServlet, FederationBackfillServlet, FederationQueryServlet, diff --git a/sytest-blacklist b/sytest-blacklist index 11785fd43..411cce069 100644 --- a/sytest-blacklist +++ b/sytest-blacklist @@ -1,6 +1,6 @@ # This file serves as a blacklist for SyTest tests that we expect will fail in # Synapse. -# +# # Each line of this file is scanned by sytest during a run and if the line # exactly matches the name of a test, it will be marked as "expected fail", # meaning the test will still run, but failure will not mark the entire test @@ -29,3 +29,7 @@ Enabling an unknown default rule fails with 404 # Blacklisted due to https://github.com/matrix-org/synapse/issues/1663 New federated private chats get full presence information (SYN-115) + +# Blacklisted due to https://github.com/matrix-org/matrix-doc/pull/2314 removing +# this requirement from the spec +Inbound federation of state requires event_id as a mandatory paramater diff --git a/tests/federation/test_complexity.py b/tests/federation/test_complexity.py index 51714a2b0..24fa8dbb4 100644 --- a/tests/federation/test_complexity.py +++ b/tests/federation/test_complexity.py @@ -18,17 +18,14 @@ from mock import Mock from twisted.internet import defer from synapse.api.errors import Codes, SynapseError -from synapse.config.ratelimiting import FederationRateLimitConfig -from synapse.federation.transport import server from synapse.rest import admin from synapse.rest.client.v1 import login, room from synapse.types import UserID -from synapse.util.ratelimitutils import FederationRateLimiter from tests import unittest -class RoomComplexityTests(unittest.HomeserverTestCase): +class RoomComplexityTests(unittest.FederatingHomeserverTestCase): servlets = [ admin.register_servlets, @@ -41,25 +38,6 @@ class RoomComplexityTests(unittest.HomeserverTestCase): config["limit_remote_rooms"] = {"enabled": True, "complexity": 0.05} return config - def prepare(self, reactor, clock, homeserver): - class Authenticator(object): - def authenticate_request(self, request, content): - return defer.succeed("otherserver.nottld") - - ratelimiter = FederationRateLimiter( - clock, - FederationRateLimitConfig( - window_size=1, - sleep_limit=1, - sleep_msec=1, - reject_limit=1000, - concurrent_requests=1000, - ), - ) - server.register_servlets( - homeserver, self.resource, Authenticator(), ratelimiter - ) - def test_complexity_simple(self): u1 = self.register_user("u1", "pass") @@ -105,7 +83,7 @@ class RoomComplexityTests(unittest.HomeserverTestCase): d = handler._remote_join( None, - ["otherserver.example"], + ["other.example.com"], "roomid", UserID.from_string(u1), {"membership": "join"}, @@ -146,7 +124,7 @@ class RoomComplexityTests(unittest.HomeserverTestCase): d = handler._remote_join( None, - ["otherserver.example"], + ["other.example.com"], room_1, UserID.from_string(u1), {"membership": "join"}, diff --git a/tests/federation/test_federation_sender.py b/tests/federation/test_federation_sender.py index cce8d8c6d..d456267b8 100644 --- a/tests/federation/test_federation_sender.py +++ b/tests/federation/test_federation_sender.py @@ -19,7 +19,7 @@ from twisted.internet import defer from synapse.types import ReadReceipt -from tests.unittest import HomeserverTestCase +from tests.unittest import HomeserverTestCase, override_config class FederationSenderTestCases(HomeserverTestCase): @@ -29,6 +29,7 @@ class FederationSenderTestCases(HomeserverTestCase): federation_transport_client=Mock(spec=["send_transaction"]), ) + @override_config({"send_federation": True}) def test_send_receipts(self): mock_state_handler = self.hs.get_state_handler() mock_state_handler.get_current_hosts_in_room.return_value = ["test", "host2"] @@ -69,6 +70,7 @@ class FederationSenderTestCases(HomeserverTestCase): ], ) + @override_config({"send_federation": True}) def test_send_receipts_with_backoff(self): """Send two receipts in quick succession; the second should be flushed, but only after 20ms""" diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index b08be451a..1ec8c4090 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- # Copyright 2018 New Vector Ltd +# Copyright 2019 Matrix.org Federation C.I.C # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,6 +17,8 @@ import logging from synapse.events import FrozenEvent from synapse.federation.federation_server import server_matches_acl_event +from synapse.rest import admin +from synapse.rest.client.v1 import login, room from tests import unittest @@ -41,6 +44,66 @@ class ServerACLsTestCase(unittest.TestCase): self.assertTrue(server_matches_acl_event("1:2:3:4", e)) +class StateQueryTests(unittest.FederatingHomeserverTestCase): + + servlets = [ + admin.register_servlets, + room.register_servlets, + login.register_servlets, + ] + + def test_without_event_id(self): + """ + Querying v1/state/ without an event ID will return the current + known state. + """ + u1 = self.register_user("u1", "pass") + u1_token = self.login("u1", "pass") + + room_1 = self.helper.create_room_as(u1, tok=u1_token) + self.inject_room_member(room_1, "@user:other.example.com", "join") + + request, channel = self.make_request( + "GET", "/_matrix/federation/v1/state/%s" % (room_1,) + ) + self.render(request) + self.assertEquals(200, channel.code, channel.result) + + self.assertEqual( + channel.json_body["room_version"], + self.hs.config.default_room_version.identifier, + ) + + members = set( + map( + lambda x: x["state_key"], + filter( + lambda x: x["type"] == "m.room.member", channel.json_body["pdus"] + ), + ) + ) + + self.assertEqual(members, set(["@user:other.example.com", u1])) + self.assertEqual(len(channel.json_body["pdus"]), 6) + + def test_needs_to_be_in_room(self): + """ + Querying v1/state/ requires the server + be in the room to provide data. + """ + u1 = self.register_user("u1", "pass") + u1_token = self.login("u1", "pass") + + room_1 = self.helper.create_room_as(u1, tok=u1_token) + + request, channel = self.make_request( + "GET", "/_matrix/federation/v1/state/%s" % (room_1,) + ) + self.render(request) + self.assertEquals(403, channel.code, channel.result) + self.assertEqual(channel.json_body["errcode"], "M_FORBIDDEN") + + def _create_acl_event(content): return FrozenEvent( { diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 5ec568f4e..f6d866028 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -24,6 +24,7 @@ from synapse.api.errors import AuthError from synapse.types import UserID from tests import unittest +from tests.unittest import override_config from tests.utils import register_federation_servlets # Some local users to test with @@ -174,6 +175,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ], ) + @override_config({"send_federation": True}) def test_started_typing_remote_send(self): self.room_members = [U_APPLE, U_ONION] @@ -237,6 +239,7 @@ class TypingNotificationsTestCase(unittest.HomeserverTestCase): ], ) + @override_config({"send_federation": True}) def test_stopped_typing(self): self.room_members = [U_APPLE, U_BANANA, U_ONION] diff --git a/tests/replication/slave/storage/_base.py b/tests/replication/slave/storage/_base.py index 4f924ce45..e7472e3a9 100644 --- a/tests/replication/slave/storage/_base.py +++ b/tests/replication/slave/storage/_base.py @@ -48,7 +48,10 @@ class BaseSlavedStoreTestCase(unittest.HomeserverTestCase): server_factory = ReplicationStreamProtocolFactory(self.hs) self.streamer = server_factory.streamer + handler_factory = Mock() self.replication_handler = ReplicationClientHandler(self.slaved_store) + self.replication_handler.factory = handler_factory + client_factory = ReplicationClientFactory( self.hs, "client_name", self.replication_handler ) diff --git a/tests/replication/tcp/streams/_base.py b/tests/replication/tcp/streams/_base.py index ce3835ae6..1d14e7725 100644 --- a/tests/replication/tcp/streams/_base.py +++ b/tests/replication/tcp/streams/_base.py @@ -12,6 +12,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +from mock import Mock + from synapse.replication.tcp.commands import ReplicateCommand from synapse.replication.tcp.protocol import ClientReplicationStreamProtocol from synapse.replication.tcp.resource import ReplicationStreamProtocolFactory @@ -30,7 +32,9 @@ class BaseStreamTestCase(unittest.HomeserverTestCase): server = server_factory.buildProtocol(None) # build a replication client, with a dummy handler + handler_factory = Mock() self.test_handler = TestReplicationClientHandler() + self.test_handler.factory = handler_factory self.client = ClientReplicationStreamProtocol( "client", "test", clock, self.test_handler ) diff --git a/tests/storage/test_roommember.py b/tests/storage/test_roommember.py index 9ddd17f73..105a0c2b0 100644 --- a/tests/storage/test_roommember.py +++ b/tests/storage/test_roommember.py @@ -16,8 +16,7 @@ from unittest.mock import Mock -from synapse.api.constants import EventTypes, Membership -from synapse.api.room_versions import RoomVersions +from synapse.api.constants import Membership from synapse.rest.admin import register_servlets_for_client_rest_resource from synapse.rest.client.v1 import login, room from synapse.types import Requester, UserID @@ -44,9 +43,6 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): # We can't test the RoomMemberStore on its own without the other event # storage logic self.store = hs.get_datastore() - self.storage = hs.get_storage() - self.event_builder_factory = hs.get_event_builder_factory() - self.event_creation_handler = hs.get_event_creation_handler() self.u_alice = self.register_user("alice", "pass") self.t_alice = self.login("alice", "pass") @@ -55,26 +51,6 @@ class RoomMemberStoreTestCase(unittest.HomeserverTestCase): # User elsewhere on another host self.u_charlie = UserID.from_string("@charlie:elsewhere") - def inject_room_member(self, room, user, membership, replaces_state=None): - builder = self.event_builder_factory.for_room_version( - RoomVersions.V1, - { - "type": EventTypes.Member, - "sender": user, - "state_key": user, - "room_id": room, - "content": {"membership": membership}, - }, - ) - - event, context = self.get_success( - self.event_creation_handler.create_new_client_event(builder) - ) - - self.get_success(self.storage.persistence.persist_event(event, context)) - - return event - def test_one_member(self): # Alice creates the room, and is automatically joined diff --git a/tests/unittest.py b/tests/unittest.py index 561cebc22..31997a0f3 100644 --- a/tests/unittest.py +++ b/tests/unittest.py @@ -1,6 +1,7 @@ # -*- coding: utf-8 -*- # Copyright 2014-2016 OpenMarket Ltd # Copyright 2018 New Vector +# Copyright 2019 Matrix.org Federation C.I.C # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -13,6 +14,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. + import gc import hashlib import hmac @@ -27,13 +29,17 @@ from twisted.internet.defer import Deferred, succeed from twisted.python.threadpool import ThreadPool from twisted.trial import unittest -from synapse.api.constants import EventTypes +from synapse.api.constants import EventTypes, Membership +from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.config.homeserver import HomeServerConfig +from synapse.config.ratelimiting import FederationRateLimitConfig +from synapse.federation.transport import server as federation_server from synapse.http.server import JsonResource from synapse.http.site import SynapseRequest from synapse.logging.context import LoggingContext from synapse.server import HomeServer from synapse.types import Requester, UserID, create_requester +from synapse.util.ratelimitutils import FederationRateLimiter from tests.server import get_clock, make_request, render, setup_test_homeserver from tests.test_utils.logging_setup import setup_logging @@ -559,6 +565,66 @@ class HomeserverTestCase(TestCase): self.render(request) self.assertEqual(channel.code, 403, channel.result) + def inject_room_member(self, room: str, user: str, membership: Membership) -> None: + """ + Inject a membership event into a room. + + Args: + room: Room ID to inject the event into. + user: MXID of the user to inject the membership for. + membership: The membership type. + """ + event_builder_factory = self.hs.get_event_builder_factory() + event_creation_handler = self.hs.get_event_creation_handler() + + room_version = self.get_success(self.hs.get_datastore().get_room_version(room)) + + builder = event_builder_factory.for_room_version( + KNOWN_ROOM_VERSIONS[room_version], + { + "type": EventTypes.Member, + "sender": user, + "state_key": user, + "room_id": room, + "content": {"membership": membership}, + }, + ) + + event, context = self.get_success( + event_creation_handler.create_new_client_event(builder) + ) + + self.get_success( + self.hs.get_storage().persistence.persist_event(event, context) + ) + + +class FederatingHomeserverTestCase(HomeserverTestCase): + """ + A federating homeserver that authenticates incoming requests as `other.example.com`. + """ + + def prepare(self, reactor, clock, homeserver): + class Authenticator(object): + def authenticate_request(self, request, content): + return succeed("other.example.com") + + ratelimiter = FederationRateLimiter( + clock, + FederationRateLimitConfig( + window_size=1, + sleep_limit=1, + sleep_msec=1, + reject_limit=1000, + concurrent_requests=1000, + ), + ) + federation_server.register_servlets( + homeserver, self.resource, Authenticator(), ratelimiter + ) + + return super().prepare(reactor, clock, homeserver) + def override_config(extra_config): """A decorator which can be applied to test functions to give additional HS config diff --git a/tests/utils.py b/tests/utils.py index 7dc9bdc50..de2ac1ed3 100644 --- a/tests/utils.py +++ b/tests/utils.py @@ -109,6 +109,7 @@ def default_config(name, parse=False): """ config_dict = { "server_name": name, + "send_federation": False, "media_store_path": "media", "uploads_path": "uploads", # the test signing key is just an arbitrary ed25519 key to keep the config From c48ea9800769c22d763cd97ecb137141050739e1 Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Thu, 28 Nov 2019 09:29:18 +0000 Subject: [PATCH 117/133] Clarifications for the email configuration settings. (#6423) Cf #6422 --- changelog.d/6423.misc | 1 + docs/sample_config.yaml | 17 ++++++++++++++++- synapse/config/emailconfig.py | 17 ++++++++++++++++- 3 files changed, 33 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6423.misc diff --git a/changelog.d/6423.misc b/changelog.d/6423.misc new file mode 100644 index 000000000..9bcd5d36c --- /dev/null +++ b/changelog.d/6423.misc @@ -0,0 +1 @@ +Clarifications for the email configuration settings. diff --git a/docs/sample_config.yaml b/docs/sample_config.yaml index 09dd21352..c7391f0c4 100644 --- a/docs/sample_config.yaml +++ b/docs/sample_config.yaml @@ -1333,8 +1333,23 @@ password_config: # smtp_user: "exampleusername" # smtp_pass: "examplepassword" # require_transport_security: false +# +# # notif_from defines the "From" address to use when sending emails. +# # It must be set if email sending is enabled. +# # +# # The placeholder '%(app)s' will be replaced by the application name, +# # which is normally 'app_name' (below), but may be overridden by the +# # Matrix client application. +# # +# # Note that the placeholder must be written '%(app)s', including the +# # trailing 's'. +# # # notif_from: "Your Friendly %(app)s homeserver " -# app_name: Matrix +# +# # app_name defines the default value for '%(app)s' in notif_from. It +# # defaults to 'Matrix'. +# # +# #app_name: my_branded_matrix_server # # # Enable email notifications by default # # diff --git a/synapse/config/emailconfig.py b/synapse/config/emailconfig.py index ac1724045..18f42a87f 100644 --- a/synapse/config/emailconfig.py +++ b/synapse/config/emailconfig.py @@ -307,8 +307,23 @@ class EmailConfig(Config): # smtp_user: "exampleusername" # smtp_pass: "examplepassword" # require_transport_security: false + # + # # notif_from defines the "From" address to use when sending emails. + # # It must be set if email sending is enabled. + # # + # # The placeholder '%(app)s' will be replaced by the application name, + # # which is normally 'app_name' (below), but may be overridden by the + # # Matrix client application. + # # + # # Note that the placeholder must be written '%(app)s', including the + # # trailing 's'. + # # # notif_from: "Your Friendly %(app)s homeserver " - # app_name: Matrix + # + # # app_name defines the default value for '%(app)s' in notif_from. It + # # defaults to 'Matrix'. + # # + # #app_name: my_branded_matrix_server # # # Enable email notifications by default # # From a9c44d4008deb29503e2de00e5aae1a56a72d630 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 28 Nov 2019 10:40:42 +0000 Subject: [PATCH 118/133] Remove local threepids on account deactivation (#6426) --- changelog.d/6426.bugfix | 1 + synapse/handlers/deactivate_account.py | 3 +++ synapse/storage/data_stores/main/registration.py | 13 +++++++++++++ 3 files changed, 17 insertions(+) create mode 100644 changelog.d/6426.bugfix diff --git a/changelog.d/6426.bugfix b/changelog.d/6426.bugfix new file mode 100644 index 000000000..3acfde421 --- /dev/null +++ b/changelog.d/6426.bugfix @@ -0,0 +1 @@ +Clean up local threepids from user on account deactivation. \ No newline at end of file diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 63267a0a4..6dedaaff8 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -95,6 +95,9 @@ class DeactivateAccountHandler(BaseHandler): user_id, threepid["medium"], threepid["address"] ) + # Remove all 3PIDs this user has bound to the homeserver + yield self.store.user_delete_threepids(user_id) + # delete any devices belonging to the user, which will also # delete corresponding access tokens. yield self._device_handler.delete_all_devices_for_user(user_id) diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py index 0a3c1f051..98cf6427c 100644 --- a/synapse/storage/data_stores/main/registration.py +++ b/synapse/storage/data_stores/main/registration.py @@ -569,6 +569,19 @@ class RegistrationWorkerStore(SQLBaseStore): return self._simple_delete( "user_threepids", keyvalues={"user_id": user_id, "medium": medium, "address": address}, + desc="user_delete_threepid", + ) + + def user_delete_threepids(self, user_id: str): + """Delete all threepid this user has bound + + Args: + user_id: The user id to delete all threepids of + + """ + return self._simple_delete( + "user_threepids", + keyvalues={"user_id": user_id}, desc="user_delete_threepids", ) From 69d8fb83c6da35d7e1f04fa3afba0fd5406bd9d9 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Nov 2019 11:02:04 +0000 Subject: [PATCH 119/133] MSC2367 Allow reason field on all member events --- synapse/rest/client/v1/room.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/synapse/rest/client/v1/room.py b/synapse/rest/client/v1/room.py index 86bbcc0ee..711d4ad30 100644 --- a/synapse/rest/client/v1/room.py +++ b/synapse/rest/client/v1/room.py @@ -714,7 +714,7 @@ class RoomMembershipRestServlet(TransactionRestServlet): target = UserID.from_string(content["user_id"]) event_content = None - if "reason" in content and membership_action in ["kick", "ban"]: + if "reason" in content: event_content = {"reason": content["reason"]} await self.room_member_handler.update_membership( From 2030193e5523810c4fd6158f97b7a223cee4cb72 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 28 Nov 2019 10:40:42 +0000 Subject: [PATCH 120/133] Remove local threepids on account deactivation (#6426) --- changelog.d/6426.bugfix | 1 + synapse/handlers/deactivate_account.py | 3 +++ synapse/storage/data_stores/main/registration.py | 13 +++++++++++++ 3 files changed, 17 insertions(+) create mode 100644 changelog.d/6426.bugfix diff --git a/changelog.d/6426.bugfix b/changelog.d/6426.bugfix new file mode 100644 index 000000000..3acfde421 --- /dev/null +++ b/changelog.d/6426.bugfix @@ -0,0 +1 @@ +Clean up local threepids from user on account deactivation. \ No newline at end of file diff --git a/synapse/handlers/deactivate_account.py b/synapse/handlers/deactivate_account.py index 63267a0a4..6dedaaff8 100644 --- a/synapse/handlers/deactivate_account.py +++ b/synapse/handlers/deactivate_account.py @@ -95,6 +95,9 @@ class DeactivateAccountHandler(BaseHandler): user_id, threepid["medium"], threepid["address"] ) + # Remove all 3PIDs this user has bound to the homeserver + yield self.store.user_delete_threepids(user_id) + # delete any devices belonging to the user, which will also # delete corresponding access tokens. yield self._device_handler.delete_all_devices_for_user(user_id) diff --git a/synapse/storage/data_stores/main/registration.py b/synapse/storage/data_stores/main/registration.py index ee1b2b2bb..89147ad51 100644 --- a/synapse/storage/data_stores/main/registration.py +++ b/synapse/storage/data_stores/main/registration.py @@ -577,6 +577,19 @@ class RegistrationWorkerStore(SQLBaseStore): return self._simple_delete( "user_threepids", keyvalues={"user_id": user_id, "medium": medium, "address": address}, + desc="user_delete_threepid", + ) + + def user_delete_threepids(self, user_id: str): + """Delete all threepid this user has bound + + Args: + user_id: The user id to delete all threepids of + + """ + return self._simple_delete( + "user_threepids", + keyvalues={"user_id": user_id}, desc="user_delete_threepids", ) From e7777f3668d09c87335830f785f42c851827b497 Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Thu, 28 Nov 2019 11:24:11 +0000 Subject: [PATCH 121/133] 1.6.1 --- CHANGES.md | 15 +++++++++++++++ changelog.d/6421.bugfix | 1 - changelog.d/6426.bugfix | 1 - debian/changelog | 6 ++++++ synapse/__init__.py | 2 +- 5 files changed, 22 insertions(+), 3 deletions(-) delete mode 100644 changelog.d/6421.bugfix delete mode 100644 changelog.d/6426.bugfix diff --git a/CHANGES.md b/CHANGES.md index 42281483b..a9afd36d2 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,18 @@ +Synapse 1.6.1 (2019-11-28) +========================== + +Security updates +---------------- + +This release includes a security fix ([\#6426](https://github.com/matrix-org/synapse/issues/6426), below). Administrators are encouraged to upgrade as soon as possible. + +Bugfixes +-------- + +- Clean up local threepids from user on account deactivation. ([\#6426](https://github.com/matrix-org/synapse/issues/6426)) +- Fix startup error when http proxy is defined. ([\#6421](https://github.com/matrix-org/synapse/issues/6421)) + + Synapse 1.6.0 (2019-11-26) ========================== diff --git a/changelog.d/6421.bugfix b/changelog.d/6421.bugfix deleted file mode 100644 index 7969f7f71..000000000 --- a/changelog.d/6421.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix startup error when http proxy is defined. diff --git a/changelog.d/6426.bugfix b/changelog.d/6426.bugfix deleted file mode 100644 index 3acfde421..000000000 --- a/changelog.d/6426.bugfix +++ /dev/null @@ -1 +0,0 @@ -Clean up local threepids from user on account deactivation. \ No newline at end of file diff --git a/debian/changelog b/debian/changelog index 82dae017f..b8a43788e 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.6.1) stable; urgency=medium + + * New synapse release 1.6.1. + + -- Synapse Packaging team Thu, 28 Nov 2019 11:10:40 +0000 + matrix-synapse-py3 (1.6.0) stable; urgency=medium * New synapse release 1.6.0. diff --git a/synapse/__init__.py b/synapse/__init__.py index 53eedc004..f99de2f3f 100644 --- a/synapse/__init__.py +++ b/synapse/__init__.py @@ -36,7 +36,7 @@ try: except ImportError: pass -__version__ = "1.6.0" +__version__ = "1.6.1" if bool(os.environ.get("SYNAPSE_TEST_PATCH_LOG_CONTEXTS", False)): # We import here so that we don't have to install a bunch of deps when From 2173785f0d9124037ca841b568349ad0424b39cd Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Nov 2019 11:31:56 +0000 Subject: [PATCH 122/133] Propagate reason in remotely rejected invites --- synapse/handlers/federation.py | 4 ++-- synapse/handlers/room_member.py | 13 +++++++++---- synapse/handlers/room_member_worker.py | 5 ++++- synapse/replication/http/membership.py | 7 +++++-- 4 files changed, 20 insertions(+), 9 deletions(-) diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index a5ae7b77d..d3267734f 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -1428,9 +1428,9 @@ class FederationHandler(BaseHandler): return event @defer.inlineCallbacks - def do_remotely_reject_invite(self, target_hosts, room_id, user_id): + def do_remotely_reject_invite(self, target_hosts, room_id, user_id, content): origin, event, event_format_version = yield self._make_and_verify_event( - target_hosts, room_id, user_id, "leave" + target_hosts, room_id, user_id, "leave", content=content, ) # Mark as outlier as we don't have any state for this event; we're not # even in the room. diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index 6cfee4b36..7b7270fc6 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -94,7 +94,9 @@ class RoomMemberHandler(object): raise NotImplementedError() @abc.abstractmethod - def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target): + def _remote_reject_invite( + self, requester, remote_room_hosts, room_id, target, content + ): """Attempt to reject an invite for a room this server is not in. If we fail to do so we locally mark the invite as rejected. @@ -104,6 +106,7 @@ class RoomMemberHandler(object): reject invite room_id (str) target (UserID): The user rejecting the invite + content (dict): The content for the rejection event Returns: Deferred[dict]: A dictionary to be returned to the client, may @@ -471,7 +474,7 @@ class RoomMemberHandler(object): # send the rejection to the inviter's HS. remote_room_hosts = remote_room_hosts + [inviter.domain] res = yield self._remote_reject_invite( - requester, remote_room_hosts, room_id, target + requester, remote_room_hosts, room_id, target, content, ) return res @@ -971,13 +974,15 @@ class RoomMemberMasterHandler(RoomMemberHandler): ) @defer.inlineCallbacks - def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target): + def _remote_reject_invite( + self, requester, remote_room_hosts, room_id, target, content + ): """Implements RoomMemberHandler._remote_reject_invite """ fed_handler = self.federation_handler try: ret = yield fed_handler.do_remotely_reject_invite( - remote_room_hosts, room_id, target.to_string() + remote_room_hosts, room_id, target.to_string(), content=content, ) return ret except Exception as e: diff --git a/synapse/handlers/room_member_worker.py b/synapse/handlers/room_member_worker.py index 75e96ae1a..69be86893 100644 --- a/synapse/handlers/room_member_worker.py +++ b/synapse/handlers/room_member_worker.py @@ -55,7 +55,9 @@ class RoomMemberWorkerHandler(RoomMemberHandler): return ret - def _remote_reject_invite(self, requester, remote_room_hosts, room_id, target): + def _remote_reject_invite( + self, requester, remote_room_hosts, room_id, target, content + ): """Implements RoomMemberHandler._remote_reject_invite """ return self._remote_reject_client( @@ -63,6 +65,7 @@ class RoomMemberWorkerHandler(RoomMemberHandler): remote_room_hosts=remote_room_hosts, room_id=room_id, user_id=target.to_string(), + content=content, ) def _user_joined_room(self, target, room_id): diff --git a/synapse/replication/http/membership.py b/synapse/replication/http/membership.py index cc1f24974..3577611fd 100644 --- a/synapse/replication/http/membership.py +++ b/synapse/replication/http/membership.py @@ -93,6 +93,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): { "requester": ..., "remote_room_hosts": [...], + "content": { ... } } """ @@ -107,7 +108,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): self.clock = hs.get_clock() @staticmethod - def _serialize_payload(requester, room_id, user_id, remote_room_hosts): + def _serialize_payload(requester, room_id, user_id, remote_room_hosts, content): """ Args: requester(Requester) @@ -118,12 +119,14 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): return { "requester": requester.serialize(), "remote_room_hosts": remote_room_hosts, + "content": content, } async def _handle_request(self, request, room_id, user_id): content = parse_json_object_from_request(request) remote_room_hosts = content["remote_room_hosts"] + event_content = content["content"] requester = Requester.deserialize(self.store, content["requester"]) @@ -134,7 +137,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint): try: event = await self.federation_handler.do_remotely_reject_invite( - remote_room_hosts, room_id, user_id + remote_room_hosts, room_id, user_id, event_content, ) ret = event.get_pdu_json() except Exception as e: From 8c9a713f8db1d6fcc1f876ac6fbd0e54b5e5819c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Nov 2019 11:32:06 +0000 Subject: [PATCH 123/133] Add tests --- tests/rest/client/v1/test_rooms.py | 140 +++++++++++++++++++++++++++++ 1 file changed, 140 insertions(+) diff --git a/tests/rest/client/v1/test_rooms.py b/tests/rest/client/v1/test_rooms.py index e84e578f9..eda2fabc7 100644 --- a/tests/rest/client/v1/test_rooms.py +++ b/tests/rest/client/v1/test_rooms.py @@ -1180,3 +1180,143 @@ class PerRoomProfilesForbiddenTestCase(unittest.HomeserverTestCase): res_displayname = channel.json_body["content"]["displayname"] self.assertEqual(res_displayname, self.displayname, channel.result) + + +class RoomMembershipReasonTestCase(unittest.HomeserverTestCase): + """Tests that clients can add a "reason" field to membership events and + that they get correctly added to the generated events and propagated. + """ + + servlets = [ + synapse.rest.admin.register_servlets_for_client_rest_resource, + room.register_servlets, + login.register_servlets, + ] + + def prepare(self, reactor, clock, homeserver): + self.creator = self.register_user("creator", "test") + self.creator_tok = self.login("creator", "test") + + self.second_user_id = self.register_user("second", "test") + self.second_tok = self.login("second", "test") + + self.room_id = self.helper.create_room_as(self.creator, tok=self.creator_tok) + + def test_join_reason(self): + reason = "hello" + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/rooms/{}/join".format(self.room_id), + content={"reason": reason}, + access_token=self.second_tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + self._check_for_reason(reason) + + def test_leave_reason(self): + self.helper.join(self.room_id, user=self.second_user_id, tok=self.second_tok) + + reason = "hello" + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/rooms/{}/leave".format(self.room_id), + content={"reason": reason}, + access_token=self.second_tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + self._check_for_reason(reason) + + def test_kick_reason(self): + self.helper.join(self.room_id, user=self.second_user_id, tok=self.second_tok) + + reason = "hello" + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/rooms/{}/kick".format(self.room_id), + content={"reason": reason, "user_id": self.second_user_id}, + access_token=self.second_tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + self._check_for_reason(reason) + + def test_ban_reason(self): + self.helper.join(self.room_id, user=self.second_user_id, tok=self.second_tok) + + reason = "hello" + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/rooms/{}/ban".format(self.room_id), + content={"reason": reason, "user_id": self.second_user_id}, + access_token=self.creator_tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + self._check_for_reason(reason) + + def test_unban_reason(self): + reason = "hello" + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/rooms/{}/unban".format(self.room_id), + content={"reason": reason, "user_id": self.second_user_id}, + access_token=self.creator_tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + self._check_for_reason(reason) + + def test_invite_reason(self): + reason = "hello" + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/rooms/{}/invite".format(self.room_id), + content={"reason": reason, "user_id": self.second_user_id}, + access_token=self.creator_tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + self._check_for_reason(reason) + + def test_reject_invite_reason(self): + self.helper.invite( + self.room_id, + src=self.creator, + targ=self.second_user_id, + tok=self.creator_tok, + ) + + reason = "hello" + request, channel = self.make_request( + "POST", + "/_matrix/client/r0/rooms/{}/leave".format(self.room_id), + content={"reason": reason}, + access_token=self.second_tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + self._check_for_reason(reason) + + def _check_for_reason(self, reason): + request, channel = self.make_request( + "GET", + "/_matrix/client/r0/rooms/{}/state/m.room.member/{}".format( + self.room_id, self.second_user_id + ), + access_token=self.creator_tok, + ) + self.render(request) + self.assertEqual(channel.code, 200, channel.result) + + event_content = channel.json_body + + self.assertEqual(event_content.get("reason"), reason, channel.result) From 19ba7c142eaced06110c1cb2d22a489dae2ac155 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Thu, 28 Nov 2019 13:59:32 +0000 Subject: [PATCH 124/133] Newsfile --- changelog.d/6434.feature | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6434.feature diff --git a/changelog.d/6434.feature b/changelog.d/6434.feature new file mode 100644 index 000000000..affa5d50c --- /dev/null +++ b/changelog.d/6434.feature @@ -0,0 +1 @@ +Add support for MSC 2367, which allows specifying a reason on all membership events. From 7baeea9f37f1cb7bf9305f4991b1e1b357f161cf Mon Sep 17 00:00:00 2001 From: Richard van der Hoff Date: Thu, 28 Nov 2019 14:55:19 +0000 Subject: [PATCH 125/133] blacklist more tests --- .buildkite/worker-blacklist | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.buildkite/worker-blacklist b/.buildkite/worker-blacklist index d7908af17..7950d19db 100644 --- a/.buildkite/worker-blacklist +++ b/.buildkite/worker-blacklist @@ -57,3 +57,10 @@ Don't get pushed for rooms you've muted Rejected events are not pushed Test that rejected pushers are removed. Events come down the correct room + +# https://buildkite.com/matrix-dot-org/sytest/builds/326#cca62404-a88a-4fcb-ad41-175fd3377603 +Presence changes to UNAVAILABLE are reported to remote room members +If remote user leaves room, changes device and rejoins we see update in sync +uploading self-signing key notifies over federation +Inbound federation can receive redacted events +Outbound federation can request missing events From 708cef88cfbf8dd6df44d2da4ab4dbc7eb584f74 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 28 Nov 2019 19:26:13 +0000 Subject: [PATCH 126/133] Discard retention policies when retrieving state Purge jobs don't delete the latest event in a room in order to keep the forward extremity and not break the room. On the other hand, get_state_events, when given an at_token argument calls filter_events_for_client to know if the user can see the event that matches that (sync) token. That function uses the retention policies of the events it's given to filter out those that are too old from a client's view. Some clients, such as Riot, when loading a room, request the list of members for the latest sync token it knows about, and get confused to the point of refusing to send any message if the server tells it that it can't get that information. This can happen very easily with the message retention feature turned on and a room with low activity so that the last event sent becomes too old according to the room's retention policy. An easy and clean fix for that issue is to discard the room's retention policies when retrieving state. --- synapse/handlers/message.py | 2 +- synapse/visibility.py | 22 ++++++++++++++-------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 155ed6e06..3b0156f51 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -138,7 +138,7 @@ class MessageHandler(object): raise NotFoundError("Can't find event for token %s" % (at_token,)) visible_events = yield filter_events_for_client( - self.storage, user_id, last_events + self.storage, user_id, last_events, apply_retention_policies=False ) event = last_events[0] diff --git a/synapse/visibility.py b/synapse/visibility.py index 4d4141dac..7b037eeb0 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -44,7 +44,8 @@ MEMBERSHIP_PRIORITY = ( @defer.inlineCallbacks def filter_events_for_client( - storage: Storage, user_id, events, is_peeking=False, always_include_ids=frozenset() + storage: Storage, user_id, events, is_peeking=False, always_include_ids=frozenset(), + apply_retention_policies=True, ): """ Check which events a user is allowed to see @@ -59,6 +60,10 @@ def filter_events_for_client( events always_include_ids (set(event_id)): set of event ids to specifically include (unless sender is ignored) + apply_retention_policies (bool): Whether to filter out events that's older than + allowed by the room's retention policy. Useful when this function is called + to e.g. check whether a user should be allowed to see the state at a given + event rather than to know if it should send an event to a user's client(s). Returns: Deferred[list[synapse.events.EventBase]] @@ -86,13 +91,14 @@ def filter_events_for_client( erased_senders = yield storage.main.are_users_erased((e.sender for e in events)) - room_ids = set(e.room_id for e in events) - retention_policies = {} + if apply_retention_policies: + room_ids = set(e.room_id for e in events) + retention_policies = {} - for room_id in room_ids: - retention_policies[room_id] = yield storage.main.get_retention_policy_for_room( - room_id - ) + for room_id in room_ids: + retention_policies[room_id] = ( + yield storage.main.get_retention_policy_for_room(room_id) + ) def allowed(event): """ @@ -113,7 +119,7 @@ def filter_events_for_client( # Don't try to apply the room's retention policy if the event is a state event, as # MSC1763 states that retention is only considered for non-state events. - if not event.is_state(): + if apply_retention_policies and not event.is_state(): retention_policy = retention_policies[event.room_id] max_lifetime = retention_policy.get("max_lifetime") From 5ee2beeddbbcbf09ac054679de71db0e0bf9df31 Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 28 Nov 2019 19:32:49 +0000 Subject: [PATCH 127/133] Changelog --- changelog.d/6436.bugfix | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog.d/6436.bugfix diff --git a/changelog.d/6436.bugfix b/changelog.d/6436.bugfix new file mode 100644 index 000000000..954a4e1d8 --- /dev/null +++ b/changelog.d/6436.bugfix @@ -0,0 +1 @@ +Fix a bug where a room could become unusable with a low retention policy and a low activity. From 78ec11c08562bfd635497621da238c7197e69b6f Mon Sep 17 00:00:00 2001 From: Brendan Abolivier Date: Thu, 28 Nov 2019 20:35:22 +0000 Subject: [PATCH 128/133] Lint --- synapse/visibility.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/synapse/visibility.py b/synapse/visibility.py index 7b037eeb0..dffe943b2 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -44,7 +44,11 @@ MEMBERSHIP_PRIORITY = ( @defer.inlineCallbacks def filter_events_for_client( - storage: Storage, user_id, events, is_peeking=False, always_include_ids=frozenset(), + storage: Storage, + user_id, + events, + is_peeking=False, + always_include_ids=frozenset(), apply_retention_policies=True, ): """ @@ -96,9 +100,9 @@ def filter_events_for_client( retention_policies = {} for room_id in room_ids: - retention_policies[room_id] = ( - yield storage.main.get_retention_policy_for_room(room_id) - ) + retention_policies[ + room_id + ] = yield storage.main.get_retention_policy_for_room(room_id) def allowed(event): """ From 23ea5721259059d50b80083bb7240a8cb56cf297 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 29 Nov 2019 13:51:14 +0000 Subject: [PATCH 129/133] Add User-Interactive Auth to /account/3pid/add (#6119) --- changelog.d/6119.feature | 1 + synapse/rest/client/v2_alpha/account.py | 5 +++++ 2 files changed, 6 insertions(+) create mode 100644 changelog.d/6119.feature diff --git a/changelog.d/6119.feature b/changelog.d/6119.feature new file mode 100644 index 000000000..1492e83c5 --- /dev/null +++ b/changelog.d/6119.feature @@ -0,0 +1 @@ +Require User-Interactive Authentication for `/account/3pid/add`, meaning the user's password will be required to add a third-party ID to their account. \ No newline at end of file diff --git a/synapse/rest/client/v2_alpha/account.py b/synapse/rest/client/v2_alpha/account.py index f26eae794..ad674239a 100644 --- a/synapse/rest/client/v2_alpha/account.py +++ b/synapse/rest/client/v2_alpha/account.py @@ -642,6 +642,7 @@ class ThreepidAddRestServlet(RestServlet): self.auth = hs.get_auth() self.auth_handler = hs.get_auth_handler() + @interactive_auth_handler @defer.inlineCallbacks def on_POST(self, request): requester = yield self.auth.get_user_by_req(request) @@ -652,6 +653,10 @@ class ThreepidAddRestServlet(RestServlet): client_secret = body["client_secret"] sid = body["sid"] + yield self.auth_handler.validate_user_via_ui_auth( + requester, body, self.hs.get_ip_from_request(request) + ) + validation_session = yield self.identity_handler.validate_threepid_session( client_secret, sid ) From 81731c6e75fe904a5b44873efa361a229743d99f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Filip=20=C5=A0t=C4=9Bdronsk=C3=BD?= Date: Mon, 2 Dec 2019 12:12:55 +0000 Subject: [PATCH 130/133] Fix: Pillow error when uploading RGBA image (#3325) (#6241) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-Off-By: Filip Štědronský --- changelog.d/6241.bugfix | 1 + synapse/rest/media/v1/thumbnailer.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 changelog.d/6241.bugfix diff --git a/changelog.d/6241.bugfix b/changelog.d/6241.bugfix new file mode 100644 index 000000000..25109ca4a --- /dev/null +++ b/changelog.d/6241.bugfix @@ -0,0 +1 @@ +Fix error from the Pillow library when uploading RGBA images. diff --git a/synapse/rest/media/v1/thumbnailer.py b/synapse/rest/media/v1/thumbnailer.py index 8cf415e29..c234ea742 100644 --- a/synapse/rest/media/v1/thumbnailer.py +++ b/synapse/rest/media/v1/thumbnailer.py @@ -129,5 +129,8 @@ class Thumbnailer(object): def _encode_image(self, output_image, output_type): output_bytes_io = BytesIO() - output_image.save(output_bytes_io, self.FORMATS[output_type], quality=80) + fmt = self.FORMATS[output_type] + if fmt == "JPEG": + output_image = output_image.convert("RGB") + output_image.save(output_bytes_io, fmt, quality=80) return output_bytes_io From 0ad75fd98ef1943ebea98c6d9f2dc5770c643b0a Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 2 Dec 2019 15:09:57 +0000 Subject: [PATCH 131/133] Use python3 packages for Ubuntu (#6443) --- INSTALL.md | 4 ++-- changelog.d/6443.doc | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) create mode 100644 changelog.d/6443.doc diff --git a/INSTALL.md b/INSTALL.md index 9b7360f0e..9da2e3c73 100644 --- a/INSTALL.md +++ b/INSTALL.md @@ -109,8 +109,8 @@ Installing prerequisites on Ubuntu or Debian: ``` sudo apt-get install build-essential python3-dev libffi-dev \ - python-pip python-setuptools sqlite3 \ - libssl-dev python-virtualenv libjpeg-dev libxslt1-dev + python3-pip python3-setuptools sqlite3 \ + libssl-dev python3-virtualenv libjpeg-dev libxslt1-dev ``` #### ArchLinux diff --git a/changelog.d/6443.doc b/changelog.d/6443.doc new file mode 100644 index 000000000..67c59f92e --- /dev/null +++ b/changelog.d/6443.doc @@ -0,0 +1 @@ +Switch Ubuntu package install recommendation to use python3 packages in INSTALL.md. \ No newline at end of file From 72078e4be56d42421e8748e0e45d0fe1204853dd Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Mon, 2 Dec 2019 15:11:32 +0000 Subject: [PATCH 132/133] Transfer power level state events on room upgrade (#6237) --- changelog.d/6237.bugfix | 1 + synapse/handlers/room.py | 36 +++++++++++++++++++++++++++++++----- 2 files changed, 32 insertions(+), 5 deletions(-) create mode 100644 changelog.d/6237.bugfix diff --git a/changelog.d/6237.bugfix b/changelog.d/6237.bugfix new file mode 100644 index 000000000..9285600b0 --- /dev/null +++ b/changelog.d/6237.bugfix @@ -0,0 +1 @@ +Transfer non-standard power levels on room upgrade. \ No newline at end of file diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index e92b2eafd..35a759f2f 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -198,21 +198,21 @@ class RoomCreationHandler(BaseHandler): # finally, shut down the PLs in the old room, and update them in the new # room. yield self._update_upgraded_room_pls( - requester, old_room_id, new_room_id, old_room_state + requester, old_room_id, new_room_id, old_room_state, ) return new_room_id @defer.inlineCallbacks def _update_upgraded_room_pls( - self, requester, old_room_id, new_room_id, old_room_state + self, requester, old_room_id, new_room_id, old_room_state, ): """Send updated power levels in both rooms after an upgrade Args: requester (synapse.types.Requester): the user requesting the upgrade - old_room_id (unicode): the id of the room to be replaced - new_room_id (unicode): the id of the replacement room + old_room_id (str): the id of the room to be replaced + new_room_id (str): the id of the replacement room old_room_state (dict[tuple[str, str], str]): the state map for the old room Returns: @@ -298,7 +298,7 @@ class RoomCreationHandler(BaseHandler): tombstone_event_id (unicode|str): the ID of the tombstone event in the old room. Returns: - Deferred[None] + Deferred """ user_id = requester.user.to_string() @@ -333,6 +333,7 @@ class RoomCreationHandler(BaseHandler): (EventTypes.Encryption, ""), (EventTypes.ServerACL, ""), (EventTypes.RelatedGroups, ""), + (EventTypes.PowerLevels, ""), ) old_room_state_ids = yield self.store.get_filtered_current_state_ids( @@ -346,6 +347,31 @@ class RoomCreationHandler(BaseHandler): if old_event: initial_state[k] = old_event.content + # Resolve the minimum power level required to send any state event + # We will give the upgrading user this power level temporarily (if necessary) such that + # they are able to copy all of the state events over, then revert them back to their + # original power level afterwards in _update_upgraded_room_pls + + # Copy over user power levels now as this will not be possible with >100PL users once + # the room has been created + + power_levels = initial_state[(EventTypes.PowerLevels, "")] + + # Calculate the minimum power level needed to clone the room + event_power_levels = power_levels.get("events", {}) + state_default = power_levels.get("state_default", 0) + ban = power_levels.get("ban") + needed_power_level = max(state_default, ban, max(event_power_levels.values())) + + # Raise the requester's power level in the new room if necessary + current_power_level = power_levels["users"][requester.user.to_string()] + if current_power_level < needed_power_level: + # Assign this power level to the requester + power_levels["users"][requester.user.to_string()] = needed_power_level + + # Set the power levels to the modified state + initial_state[(EventTypes.PowerLevels, "")] = power_levels + yield self._send_events_for_new_room( requester, new_room_id, From fdec84aa427e2e3b806eb15f462d652f8554cc8d Mon Sep 17 00:00:00 2001 From: Amber Brown Date: Tue, 3 Dec 2019 20:21:25 +1100 Subject: [PATCH 133/133] Add benchmarks for structured logging performance (#6266) --- changelog.d/6266.misc | 1 + synapse/logging/_terse_json.py | 1 + synmark/__init__.py | 72 ++++++++++++++++++++ synmark/__main__.py | 90 +++++++++++++++++++++++++ synmark/suites/__init__.py | 3 + synmark/suites/logging.py | 118 +++++++++++++++++++++++++++++++++ tox.ini | 9 +++ 7 files changed, 294 insertions(+) create mode 100644 changelog.d/6266.misc create mode 100644 synmark/__init__.py create mode 100644 synmark/__main__.py create mode 100644 synmark/suites/__init__.py create mode 100644 synmark/suites/logging.py diff --git a/changelog.d/6266.misc b/changelog.d/6266.misc new file mode 100644 index 000000000..634e421a7 --- /dev/null +++ b/changelog.d/6266.misc @@ -0,0 +1 @@ +Add benchmarks for structured logging and improve output performance. diff --git a/synapse/logging/_terse_json.py b/synapse/logging/_terse_json.py index 05fc64f40..03934956f 100644 --- a/synapse/logging/_terse_json.py +++ b/synapse/logging/_terse_json.py @@ -256,6 +256,7 @@ class TerseJSONToTCPLogObserver(object): # transport is the same, just trigger a resumeProducing. if self._producer and r.transport is self._producer.transport: self._producer.resumeProducing() + self._connection_waiter = None return # If the producer is still producing, stop it. diff --git a/synmark/__init__.py b/synmark/__init__.py new file mode 100644 index 000000000..570eb818d --- /dev/null +++ b/synmark/__init__.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +from twisted.internet import epollreactor +from twisted.internet.main import installReactor + +from synapse.config.homeserver import HomeServerConfig +from synapse.util import Clock + +from tests.utils import default_config, setup_test_homeserver + + +async def make_homeserver(reactor, config=None): + """ + Make a Homeserver suitable for running benchmarks against. + + Args: + reactor: A Twisted reactor to run under. + config: A HomeServerConfig to use, or None. + """ + cleanup_tasks = [] + clock = Clock(reactor) + + if not config: + config = default_config("test") + + config_obj = HomeServerConfig() + config_obj.parse_config_dict(config, "", "") + + hs = await setup_test_homeserver( + cleanup_tasks.append, config=config_obj, reactor=reactor, clock=clock + ) + stor = hs.get_datastore() + + # Run the database background updates. + if hasattr(stor, "do_next_background_update"): + while not await stor.has_completed_background_updates(): + await stor.do_next_background_update(1) + + def cleanup(): + for i in cleanup_tasks: + i() + + return hs, clock.sleep, cleanup + + +def make_reactor(): + """ + Instantiate and install a Twisted reactor suitable for testing (i.e. not the + default global one). + """ + reactor = epollreactor.EPollReactor() + + if "twisted.internet.reactor" in sys.modules: + del sys.modules["twisted.internet.reactor"] + installReactor(reactor) + + return reactor diff --git a/synmark/__main__.py b/synmark/__main__.py new file mode 100644 index 000000000..ac59befbd --- /dev/null +++ b/synmark/__main__.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys +from contextlib import redirect_stderr +from io import StringIO + +import pyperf +from synmark import make_reactor +from synmark.suites import SUITES + +from twisted.internet.defer import ensureDeferred +from twisted.logger import globalLogBeginner, textFileLogObserver +from twisted.python.failure import Failure + +from tests.utils import setupdb + + +def make_test(main): + """ + Take a benchmark function and wrap it in a reactor start and stop. + """ + + def _main(loops): + + reactor = make_reactor() + + file_out = StringIO() + with redirect_stderr(file_out): + + d = ensureDeferred(main(reactor, loops)) + + def on_done(_): + if isinstance(_, Failure): + _.printTraceback() + print(file_out.getvalue()) + reactor.stop() + return _ + + d.addBoth(on_done) + reactor.run() + + return d.result + + return _main + + +if __name__ == "__main__": + + def add_cmdline_args(cmd, args): + if args.log: + cmd.extend(["--log"]) + + runner = pyperf.Runner( + processes=3, min_time=2, show_name=True, add_cmdline_args=add_cmdline_args + ) + runner.argparser.add_argument("--log", action="store_true") + runner.parse_args() + + orig_loops = runner.args.loops + runner.args.inherit_environ = ["SYNAPSE_POSTGRES"] + + if runner.args.worker: + if runner.args.log: + globalLogBeginner.beginLoggingTo( + [textFileLogObserver(sys.__stdout__)], redirectStandardIO=False + ) + setupdb() + + for suite, loops in SUITES: + if loops: + runner.args.loops = loops + else: + runner.args.loops = orig_loops + loops = "auto" + runner.bench_time_func( + suite.__name__ + "_" + str(loops), make_test(suite.main), + ) diff --git a/synmark/suites/__init__.py b/synmark/suites/__init__.py new file mode 100644 index 000000000..cfa3b0ba3 --- /dev/null +++ b/synmark/suites/__init__.py @@ -0,0 +1,3 @@ +from . import logging + +SUITES = [(logging, 1000), (logging, 10000), (logging, None)] diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py new file mode 100644 index 000000000..d8e4c7d58 --- /dev/null +++ b/synmark/suites/logging.py @@ -0,0 +1,118 @@ +# -*- coding: utf-8 -*- +# Copyright 2019 The Matrix.org Foundation C.I.C. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings +from io import StringIO + +from mock import Mock + +from pyperf import perf_counter +from synmark import make_homeserver + +from twisted.internet.defer import Deferred +from twisted.internet.protocol import ServerFactory +from twisted.logger import LogBeginner, Logger, LogPublisher +from twisted.protocols.basic import LineOnlyReceiver + +from synapse.logging._structured import setup_structured_logging + + +class LineCounter(LineOnlyReceiver): + + delimiter = b"\n" + + def __init__(self, *args, **kwargs): + self.count = 0 + super().__init__(*args, **kwargs) + + def lineReceived(self, line): + self.count += 1 + + if self.count >= self.factory.wait_for and self.factory.on_done: + on_done = self.factory.on_done + self.factory.on_done = None + on_done.callback(True) + + +async def main(reactor, loops): + """ + Benchmark how long it takes to send `loops` messages. + """ + servers = [] + + def protocol(): + p = LineCounter() + servers.append(p) + return p + + logger_factory = ServerFactory.forProtocol(protocol) + logger_factory.wait_for = loops + logger_factory.on_done = Deferred() + port = reactor.listenTCP(0, logger_factory, interface="127.0.0.1") + + hs, wait, cleanup = await make_homeserver(reactor) + + errors = StringIO() + publisher = LogPublisher() + mock_sys = Mock() + beginner = LogBeginner( + publisher, errors, mock_sys, warnings, initialBufferSize=loops + ) + + log_config = { + "loggers": {"synapse": {"level": "DEBUG"}}, + "drains": { + "tersejson": { + "type": "network_json_terse", + "host": "127.0.0.1", + "port": port.getHost().port, + "maximum_buffer": 100, + } + }, + } + + logger = Logger(namespace="synapse.logging.test_terse_json", observer=publisher) + logging_system = setup_structured_logging( + hs, hs.config, log_config, logBeginner=beginner, redirect_stdlib_logging=False + ) + + # Wait for it to connect... + await logging_system._observers[0]._service.whenConnected() + + start = perf_counter() + + # Send a bunch of useful messages + for i in range(0, loops): + logger.info("test message %s" % (i,)) + + if ( + len(logging_system._observers[0]._buffer) + == logging_system._observers[0].maximum_buffer + ): + while ( + len(logging_system._observers[0]._buffer) + > logging_system._observers[0].maximum_buffer / 2 + ): + await wait(0.01) + + await logger_factory.on_done + + end = perf_counter() - start + + logging_system.stop() + port.stopListening() + cleanup() + + return end diff --git a/tox.ini b/tox.ini index 62b350ea6..903a245fb 100644 --- a/tox.ini +++ b/tox.ini @@ -102,6 +102,15 @@ commands = {envbindir}/coverage run "{envbindir}/trial" {env:TRIAL_FLAGS:} {posargs:tests} {env:TOXSUFFIX:} +[testenv:benchmark] +deps = + {[base]deps} + pyperf +setenv = + SYNAPSE_POSTGRES = 1 +commands = + python -m synmark {posargs:} + [testenv:packaging] skip_install=True deps =