mirror of
https://mau.dev/maunium/synapse.git
synced 2024-10-01 01:36:05 -04:00
Merge branch 'develop' into uhoreg/cross_signing_fix_workers_notify
This commit is contained in:
commit
9c94b48bf1
16
INSTALL.md
16
INSTALL.md
@ -413,16 +413,18 @@ For a more detailed guide to configuring your server for federation, see
|
||||
|
||||
## Email
|
||||
|
||||
It is desirable for Synapse to have the capability to send email. For example,
|
||||
this is required to support the 'password reset' feature.
|
||||
It is desirable for Synapse to have the capability to send email. This allows
|
||||
Synapse to send password reset emails, send verifications when an email address
|
||||
is added to a user's account, and send email notifications to users when they
|
||||
receive new messages.
|
||||
|
||||
To configure an SMTP server for Synapse, modify the configuration section
|
||||
headed ``email``, and be sure to have at least the ``smtp_host``, ``smtp_port``
|
||||
and ``notif_from`` fields filled out. You may also need to set ``smtp_user``,
|
||||
``smtp_pass``, and ``require_transport_security``.
|
||||
headed `email`, and be sure to have at least the `smtp_host`, `smtp_port`
|
||||
and `notif_from` fields filled out. You may also need to set `smtp_user`,
|
||||
`smtp_pass`, and `require_transport_security`.
|
||||
|
||||
If Synapse is not configured with an SMTP server, password reset via email will
|
||||
be disabled by default.
|
||||
If email is not configured, password reset, registration and notifications via
|
||||
email will be disabled.
|
||||
|
||||
## Registering a user
|
||||
|
||||
|
1
changelog.d/6259.misc
Normal file
1
changelog.d/6259.misc
Normal file
@ -0,0 +1 @@
|
||||
Expose some homeserver functionality to spam checkers.
|
1
changelog.d/6271.misc
Normal file
1
changelog.d/6271.misc
Normal file
@ -0,0 +1 @@
|
||||
Replace every instance of `logger.warn` method with `logger.warning` as the former is deprecated.
|
1
changelog.d/6272.doc
Normal file
1
changelog.d/6272.doc
Normal file
@ -0,0 +1 @@
|
||||
Update `INSTALL.md` Email section to talk about `account_threepid_delegates`.
|
1
changelog.d/6294.misc
Normal file
1
changelog.d/6294.misc
Normal file
@ -0,0 +1 @@
|
||||
Split out state storage into separate data store.
|
1
changelog.d/6300.misc
Normal file
1
changelog.d/6300.misc
Normal file
@ -0,0 +1 @@
|
||||
Move `persist_events` out from main data store.
|
1
changelog.d/6304.misc
Normal file
1
changelog.d/6304.misc
Normal file
@ -0,0 +1 @@
|
||||
Update the version of black used to 19.10b0.
|
1
changelog.d/6307.bugfix
Normal file
1
changelog.d/6307.bugfix
Normal file
@ -0,0 +1 @@
|
||||
Fix `/purge_room` admin API.
|
@ -78,7 +78,7 @@ class InputOutput(object):
|
||||
m = re.match("^join (\S+)$", line)
|
||||
if m:
|
||||
# The `sender` wants to join a room.
|
||||
room_name, = m.groups()
|
||||
(room_name,) = m.groups()
|
||||
self.print_line("%s joining %s" % (self.user, room_name))
|
||||
self.server.join_room(room_name, self.user, self.user)
|
||||
# self.print_line("OK.")
|
||||
@ -105,7 +105,7 @@ class InputOutput(object):
|
||||
m = re.match("^backfill (\S+)$", line)
|
||||
if m:
|
||||
# we want to backfill a room
|
||||
room_name, = m.groups()
|
||||
(room_name,) = m.groups()
|
||||
self.print_line("backfill %s" % room_name)
|
||||
self.server.backfill(room_name)
|
||||
return
|
||||
|
@ -217,8 +217,9 @@ def main(args, environ):
|
||||
# backwards-compatibility generate-a-config-on-the-fly mode
|
||||
if "SYNAPSE_CONFIG_PATH" in environ:
|
||||
error(
|
||||
"SYNAPSE_SERVER_NAME and SYNAPSE_CONFIG_PATH are mutually exclusive "
|
||||
"except in `generate` or `migrate_config` mode."
|
||||
"SYNAPSE_SERVER_NAME can only be combined with SYNAPSE_CONFIG_PATH "
|
||||
"in `generate` or `migrate_config` mode. To start synapse using a "
|
||||
"config file, unset the SYNAPSE_SERVER_NAME environment variable."
|
||||
)
|
||||
|
||||
config_path = "/compiled/homeserver.yaml"
|
||||
|
11
mypy.ini
11
mypy.ini
@ -1,8 +1,11 @@
|
||||
[mypy]
|
||||
namespace_packages=True
|
||||
plugins=mypy_zope:plugin
|
||||
follow_imports=skip
|
||||
mypy_path=stubs
|
||||
namespace_packages = True
|
||||
plugins = mypy_zope:plugin
|
||||
follow_imports = normal
|
||||
check_untyped_defs = True
|
||||
show_error_codes = True
|
||||
show_traceback = True
|
||||
mypy_path = stubs
|
||||
|
||||
[mypy-zope]
|
||||
ignore_missing_imports = True
|
||||
|
@ -72,7 +72,7 @@ def move_media(origin_server, file_id, src_paths, dest_paths):
|
||||
# check that the original exists
|
||||
original_file = src_paths.remote_media_filepath(origin_server, file_id)
|
||||
if not os.path.exists(original_file):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Original for %s/%s (%s) does not exist",
|
||||
origin_server,
|
||||
file_id,
|
||||
|
@ -157,7 +157,7 @@ class Store(
|
||||
)
|
||||
except self.database_engine.module.DatabaseError as e:
|
||||
if self.database_engine.is_deadlock(e):
|
||||
logger.warn("[TXN DEADLOCK] {%s} %d/%d", desc, i, N)
|
||||
logger.warning("[TXN DEADLOCK] {%s} %d/%d", desc, i, N)
|
||||
if i < N:
|
||||
i += 1
|
||||
conn.rollback()
|
||||
@ -432,7 +432,7 @@ class Porter(object):
|
||||
for row in rows:
|
||||
d = dict(zip(headers, row))
|
||||
if "\0" in d['value']:
|
||||
logger.warn('dropping search row %s', d)
|
||||
logger.warning('dropping search row %s', d)
|
||||
else:
|
||||
rows_dict.append(d)
|
||||
|
||||
@ -647,7 +647,7 @@ class Porter(object):
|
||||
if isinstance(col, bytes):
|
||||
return bytearray(col)
|
||||
elif isinstance(col, string_types) and "\0" in col:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"DROPPING ROW: NUL value in table %s col %s: %r",
|
||||
table,
|
||||
headers[j],
|
||||
|
@ -497,7 +497,7 @@ class Auth(object):
|
||||
token = self.get_access_token_from_request(request)
|
||||
service = self.store.get_app_service_by_token(token)
|
||||
if not service:
|
||||
logger.warn("Unrecognised appservice access token.")
|
||||
logger.warning("Unrecognised appservice access token.")
|
||||
raise InvalidClientTokenError()
|
||||
request.authenticated_entity = service.sender
|
||||
return defer.succeed(service)
|
||||
|
@ -44,6 +44,8 @@ def check_bind_error(e, address, bind_addresses):
|
||||
bind_addresses (list): Addresses on which the service listens.
|
||||
"""
|
||||
if address == "0.0.0.0" and "::" in bind_addresses:
|
||||
logger.warn("Failed to listen on 0.0.0.0, continuing because listening on [::]")
|
||||
logger.warning(
|
||||
"Failed to listen on 0.0.0.0, continuing because listening on [::]"
|
||||
)
|
||||
else:
|
||||
raise e
|
||||
|
@ -94,7 +94,7 @@ class AppserviceServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@ -103,7 +103,7 @@ class AppserviceServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
@ -153,7 +153,7 @@ class ClientReaderServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@ -162,7 +162,7 @@ class ClientReaderServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
@ -147,7 +147,7 @@ class EventCreatorServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@ -156,7 +156,7 @@ class EventCreatorServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
@ -132,7 +132,7 @@ class FederationReaderServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@ -141,7 +141,7 @@ class FederationReaderServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
@ -123,7 +123,7 @@ class FederationSenderServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@ -132,7 +132,7 @@ class FederationSenderServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
@ -204,7 +204,7 @@ class FrontendProxyServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@ -213,7 +213,7 @@ class FrontendProxyServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
@ -282,7 +282,7 @@ class SynapseHomeServer(HomeServer):
|
||||
reactor.addSystemEventTrigger("before", "shutdown", s.stopListening)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@ -291,7 +291,7 @@ class SynapseHomeServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
def run_startup_checks(self, db_conn, database_engine):
|
||||
all_users_native = are_all_users_on_domain(
|
||||
@ -569,7 +569,7 @@ def run(hs):
|
||||
hs.config.report_stats_endpoint, stats
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn("Error reporting stats: %s", e)
|
||||
logger.warning("Error reporting stats: %s", e)
|
||||
|
||||
def performance_stats_init():
|
||||
try:
|
||||
|
@ -120,7 +120,7 @@ class MediaRepositoryServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@ -129,7 +129,7 @@ class MediaRepositoryServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
@ -114,7 +114,7 @@ class PusherServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@ -123,7 +123,7 @@ class PusherServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
@ -326,7 +326,7 @@ class SynchrotronServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@ -335,7 +335,7 @@ class SynchrotronServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
@ -150,7 +150,7 @@ class UserDirectoryServer(HomeServer):
|
||||
)
|
||||
elif listener["type"] == "metrics":
|
||||
if not self.get_config().enable_metrics:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
(
|
||||
"Metrics listener configured, but "
|
||||
"enable_metrics is not True!"
|
||||
@ -159,7 +159,7 @@ class UserDirectoryServer(HomeServer):
|
||||
else:
|
||||
_base.listen_metrics(listener["bind_addresses"], listener["port"])
|
||||
else:
|
||||
logger.warn("Unrecognized listener type: %s", listener["type"])
|
||||
logger.warning("Unrecognized listener type: %s", listener["type"])
|
||||
|
||||
self.get_tcp_replication().start_replication(self)
|
||||
|
||||
|
@ -125,7 +125,7 @@ class KeyConfig(Config):
|
||||
|
||||
# if neither trusted_key_servers nor perspectives are given, use the default.
|
||||
if "perspectives" not in config and "trusted_key_servers" not in config:
|
||||
logger.warn(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN)
|
||||
logger.warning(TRUSTED_KEY_SERVER_NOT_CONFIGURED_WARN)
|
||||
key_servers = [{"server_name": "matrix.org"}]
|
||||
else:
|
||||
key_servers = config.get("trusted_key_servers", [])
|
||||
@ -156,7 +156,7 @@ class KeyConfig(Config):
|
||||
if not self.macaroon_secret_key:
|
||||
# Unfortunately, there are people out there that don't have this
|
||||
# set. Lets just be "nice" and derive one from their secret key.
|
||||
logger.warn("Config is missing macaroon_secret_key")
|
||||
logger.warning("Config is missing macaroon_secret_key")
|
||||
seed = bytes(self.signing_key[0])
|
||||
self.macaroon_secret_key = hashlib.sha256(seed).digest()
|
||||
|
||||
|
@ -182,7 +182,7 @@ def _reload_stdlib_logging(*args, log_config=None):
|
||||
logger = logging.getLogger("")
|
||||
|
||||
if not log_config:
|
||||
logger.warn("Reloaded a blank config?")
|
||||
logger.warning("Reloaded a blank config?")
|
||||
|
||||
logging.config.dictConfig(log_config)
|
||||
|
||||
|
@ -77,7 +77,7 @@ def check(room_version, event, auth_events, do_sig_check=True, do_size_check=Tru
|
||||
if auth_events is None:
|
||||
# Oh, we don't know what the state of the room was, so we
|
||||
# are trusting that this is allowed (at least for now)
|
||||
logger.warn("Trusting event: %s", event.event_id)
|
||||
logger.warning("Trusting event: %s", event.event_id)
|
||||
return
|
||||
|
||||
if event.type == EventTypes.Create:
|
||||
|
@ -1,5 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2017 New Vector Ltd
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
@ -13,6 +14,10 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import inspect
|
||||
|
||||
from synapse.spam_checker_api import SpamCheckerApi
|
||||
|
||||
|
||||
class SpamChecker(object):
|
||||
def __init__(self, hs):
|
||||
@ -26,7 +31,14 @@ class SpamChecker(object):
|
||||
pass
|
||||
|
||||
if module is not None:
|
||||
self.spam_checker = module(config=config)
|
||||
# Older spam checkers don't accept the `api` argument, so we
|
||||
# try and detect support.
|
||||
spam_args = inspect.getfullargspec(module)
|
||||
if "api" in spam_args.args:
|
||||
api = SpamCheckerApi(hs)
|
||||
self.spam_checker = module(config=config, api=api)
|
||||
else:
|
||||
self.spam_checker = module(config=config)
|
||||
|
||||
def check_event_for_spam(self, event):
|
||||
"""Checks if a given event is considered "spammy" by this server.
|
||||
|
@ -102,7 +102,7 @@ class FederationBase(object):
|
||||
pass
|
||||
|
||||
if not res:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Failed to find copy of %s with valid signature", pdu.event_id
|
||||
)
|
||||
|
||||
@ -173,7 +173,7 @@ class FederationBase(object):
|
||||
return redacted_event
|
||||
|
||||
if self.spam_checker.check_event_for_spam(pdu):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Event contains spam, redacting %s: %s",
|
||||
pdu.event_id,
|
||||
pdu.get_pdu_json(),
|
||||
@ -185,7 +185,7 @@ class FederationBase(object):
|
||||
def errback(failure, pdu):
|
||||
failure.trap(SynapseError)
|
||||
with PreserveLoggingContext(ctx):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Signature check failed for %s: %s",
|
||||
pdu.event_id,
|
||||
failure.getErrorMessage(),
|
||||
|
@ -522,12 +522,12 @@ class FederationClient(FederationBase):
|
||||
res = yield callback(destination)
|
||||
return res
|
||||
except InvalidResponseError as e:
|
||||
logger.warn("Failed to %s via %s: %s", description, destination, e)
|
||||
logger.warning("Failed to %s via %s: %s", description, destination, e)
|
||||
except HttpResponseException as e:
|
||||
if not 500 <= e.code < 600:
|
||||
raise e.to_synapse_error()
|
||||
else:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Failed to %s via %s: %i %s",
|
||||
description,
|
||||
destination,
|
||||
@ -535,7 +535,9 @@ class FederationClient(FederationBase):
|
||||
e.args[0],
|
||||
)
|
||||
except Exception:
|
||||
logger.warn("Failed to %s via %s", description, destination, exc_info=1)
|
||||
logger.warning(
|
||||
"Failed to %s via %s", description, destination, exc_info=1
|
||||
)
|
||||
|
||||
raise SynapseError(502, "Failed to %s via any server" % (description,))
|
||||
|
||||
|
@ -220,7 +220,7 @@ class FederationServer(FederationBase):
|
||||
try:
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
except AuthError as e:
|
||||
logger.warn("Ignoring PDUs for room %s from banned server", room_id)
|
||||
logger.warning("Ignoring PDUs for room %s from banned server", room_id)
|
||||
for pdu in pdus_by_room[room_id]:
|
||||
event_id = pdu.event_id
|
||||
pdu_results[event_id] = e.error_dict()
|
||||
@ -233,7 +233,7 @@ class FederationServer(FederationBase):
|
||||
await self._handle_received_pdu(origin, pdu)
|
||||
pdu_results[event_id] = {}
|
||||
except FederationError as e:
|
||||
logger.warn("Error handling PDU %s: %s", event_id, e)
|
||||
logger.warning("Error handling PDU %s: %s", event_id, e)
|
||||
pdu_results[event_id] = {"error": str(e)}
|
||||
except Exception as e:
|
||||
f = failure.Failure()
|
||||
@ -333,7 +333,9 @@ class FederationServer(FederationBase):
|
||||
|
||||
room_version = await self.store.get_room_version(room_id)
|
||||
if room_version not in supported_versions:
|
||||
logger.warn("Room version %s not in %s", room_version, supported_versions)
|
||||
logger.warning(
|
||||
"Room version %s not in %s", room_version, supported_versions
|
||||
)
|
||||
raise IncompatibleRoomVersionError(room_version=room_version)
|
||||
|
||||
pdu = await self.handler.on_make_join_request(origin, room_id, user_id)
|
||||
@ -679,7 +681,7 @@ def server_matches_acl_event(server_name, acl_event):
|
||||
# server name is a literal IP
|
||||
allow_ip_literals = acl_event.content.get("allow_ip_literals", True)
|
||||
if not isinstance(allow_ip_literals, bool):
|
||||
logger.warn("Ignorning non-bool allow_ip_literals flag")
|
||||
logger.warning("Ignorning non-bool allow_ip_literals flag")
|
||||
allow_ip_literals = True
|
||||
if not allow_ip_literals:
|
||||
# check for ipv6 literals. These start with '['.
|
||||
@ -693,7 +695,7 @@ def server_matches_acl_event(server_name, acl_event):
|
||||
# next, check the deny list
|
||||
deny = acl_event.content.get("deny", [])
|
||||
if not isinstance(deny, (list, tuple)):
|
||||
logger.warn("Ignorning non-list deny ACL %s", deny)
|
||||
logger.warning("Ignorning non-list deny ACL %s", deny)
|
||||
deny = []
|
||||
for e in deny:
|
||||
if _acl_entry_matches(server_name, e):
|
||||
@ -703,7 +705,7 @@ def server_matches_acl_event(server_name, acl_event):
|
||||
# then the allow list.
|
||||
allow = acl_event.content.get("allow", [])
|
||||
if not isinstance(allow, (list, tuple)):
|
||||
logger.warn("Ignorning non-list allow ACL %s", allow)
|
||||
logger.warning("Ignorning non-list allow ACL %s", allow)
|
||||
allow = []
|
||||
for e in allow:
|
||||
if _acl_entry_matches(server_name, e):
|
||||
@ -717,7 +719,7 @@ def server_matches_acl_event(server_name, acl_event):
|
||||
|
||||
def _acl_entry_matches(server_name, acl_entry):
|
||||
if not isinstance(acl_entry, six.string_types):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Ignoring non-str ACL entry '%s' (is %s)", acl_entry, type(acl_entry)
|
||||
)
|
||||
return False
|
||||
@ -772,7 +774,7 @@ class FederationHandlerRegistry(object):
|
||||
async def on_edu(self, edu_type, origin, content):
|
||||
handler = self.edu_handlers.get(edu_type)
|
||||
if not handler:
|
||||
logger.warn("No handler registered for EDU type %s", edu_type)
|
||||
logger.warning("No handler registered for EDU type %s", edu_type)
|
||||
|
||||
with start_active_span_from_edu(content, "handle_edu"):
|
||||
try:
|
||||
@ -785,7 +787,7 @@ class FederationHandlerRegistry(object):
|
||||
def on_query(self, query_type, args):
|
||||
handler = self.query_handlers.get(query_type)
|
||||
if not handler:
|
||||
logger.warn("No handler registered for query type %s", query_type)
|
||||
logger.warning("No handler registered for query type %s", query_type)
|
||||
raise NotFoundError("No handler for Query type '%s'" % (query_type,))
|
||||
|
||||
return handler(args)
|
||||
|
@ -192,15 +192,16 @@ class PerDestinationQueue(object):
|
||||
# We have to keep 2 free slots for presence and rr_edus
|
||||
limit = MAX_EDUS_PER_TRANSACTION - 2
|
||||
|
||||
device_update_edus, dev_list_id = (
|
||||
yield self._get_device_update_edus(limit)
|
||||
device_update_edus, dev_list_id = yield self._get_device_update_edus(
|
||||
limit
|
||||
)
|
||||
|
||||
limit -= len(device_update_edus)
|
||||
|
||||
to_device_edus, device_stream_id = (
|
||||
yield self._get_to_device_message_edus(limit)
|
||||
)
|
||||
(
|
||||
to_device_edus,
|
||||
device_stream_id,
|
||||
) = yield self._get_to_device_message_edus(limit)
|
||||
|
||||
pending_edus = device_update_edus + to_device_edus
|
||||
|
||||
|
@ -146,7 +146,7 @@ class TransactionManager(object):
|
||||
if code == 200:
|
||||
for e_id, r in response.get("pdus", {}).items():
|
||||
if "error" in r:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"TX [%s] {%s} Remote returned error for %s: %s",
|
||||
destination,
|
||||
txn_id,
|
||||
@ -155,7 +155,7 @@ class TransactionManager(object):
|
||||
)
|
||||
else:
|
||||
for p in pdus:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"TX [%s] {%s} Failed to send event %s",
|
||||
destination,
|
||||
txn_id,
|
||||
|
@ -202,7 +202,7 @@ def _parse_auth_header(header_bytes):
|
||||
sig = strip_quotes(param_dict["sig"])
|
||||
return origin, key, sig
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Error parsing auth header '%s': %s",
|
||||
header_bytes.decode("ascii", "replace"),
|
||||
e,
|
||||
@ -287,10 +287,12 @@ class BaseFederationServlet(object):
|
||||
except NoAuthenticationError:
|
||||
origin = None
|
||||
if self.REQUIRE_AUTH:
|
||||
logger.warn("authenticate_request failed: missing authentication")
|
||||
logger.warning(
|
||||
"authenticate_request failed: missing authentication"
|
||||
)
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.warn("authenticate_request failed: %s", e)
|
||||
logger.warning("authenticate_request failed: %s", e)
|
||||
raise
|
||||
|
||||
request_tags = {
|
||||
|
@ -181,7 +181,7 @@ class GroupAttestionRenewer(object):
|
||||
elif not self.is_mine_id(user_id):
|
||||
destination = get_domain_from_id(user_id)
|
||||
else:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Incorrectly trying to do attestations for user: %r in %r",
|
||||
user_id,
|
||||
group_id,
|
||||
|
@ -488,7 +488,7 @@ class GroupsServerHandler(object):
|
||||
profile = yield self.profile_handler.get_profile_from_cache(user_id)
|
||||
user_profile.update(profile)
|
||||
except Exception as e:
|
||||
logger.warn("Error getting profile for %s: %s", user_id, e)
|
||||
logger.warning("Error getting profile for %s: %s", user_id, e)
|
||||
user_profiles.append(user_profile)
|
||||
|
||||
return {"chunk": user_profiles, "total_user_count_estimate": len(invited_users)}
|
||||
|
@ -38,9 +38,10 @@ class AccountDataEventSource(object):
|
||||
{"type": "m.tag", "content": {"tags": room_tags}, "room_id": room_id}
|
||||
)
|
||||
|
||||
account_data, room_account_data = (
|
||||
yield self.store.get_updated_account_data_for_user(user_id, last_stream_id)
|
||||
)
|
||||
(
|
||||
account_data,
|
||||
room_account_data,
|
||||
) = yield self.store.get_updated_account_data_for_user(user_id, last_stream_id)
|
||||
|
||||
for account_data_type, content in account_data.items():
|
||||
results.append({"type": account_data_type, "content": content})
|
||||
|
@ -30,6 +30,9 @@ class AdminHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(AdminHandler, self).__init__(hs)
|
||||
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_whois(self, user):
|
||||
connections = []
|
||||
@ -205,7 +208,7 @@ class AdminHandler(BaseHandler):
|
||||
|
||||
from_key = events[-1].internal_metadata.after
|
||||
|
||||
events = yield filter_events_for_client(self.store, user_id, events)
|
||||
events = yield filter_events_for_client(self.storage, user_id, events)
|
||||
|
||||
writer.write_events(room_id, events)
|
||||
|
||||
@ -241,7 +244,7 @@ class AdminHandler(BaseHandler):
|
||||
for event_id in extremities:
|
||||
if not event_to_unseen_prevs[event_id]:
|
||||
continue
|
||||
state = yield self.store.get_state_for_event(event_id)
|
||||
state = yield self.state_store.get_state_for_event(event_id)
|
||||
writer.write_state(room_id, event_id, state)
|
||||
|
||||
return writer.finished()
|
||||
|
@ -73,7 +73,10 @@ class ApplicationServicesHandler(object):
|
||||
try:
|
||||
limit = 100
|
||||
while True:
|
||||
upper_bound, events = yield self.store.get_new_events_for_appservice(
|
||||
(
|
||||
upper_bound,
|
||||
events,
|
||||
) = yield self.store.get_new_events_for_appservice(
|
||||
self.current_max, limit
|
||||
)
|
||||
|
||||
|
@ -525,7 +525,7 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
result = None
|
||||
if not user_infos:
|
||||
logger.warn("Attempted to login as %s but they do not exist", user_id)
|
||||
logger.warning("Attempted to login as %s but they do not exist", user_id)
|
||||
elif len(user_infos) == 1:
|
||||
# a single match (possibly not exact)
|
||||
result = user_infos.popitem()
|
||||
@ -534,7 +534,7 @@ class AuthHandler(BaseHandler):
|
||||
result = (user_id, user_infos[user_id])
|
||||
else:
|
||||
# multiple matches, none of them exact
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Attempted to login as %s but it matches more than one user "
|
||||
"inexactly: %r",
|
||||
user_id,
|
||||
@ -728,7 +728,7 @@ class AuthHandler(BaseHandler):
|
||||
|
||||
result = yield self.validate_hash(password, password_hash)
|
||||
if not result:
|
||||
logger.warn("Failed password login for user %s", user_id)
|
||||
logger.warning("Failed password login for user %s", user_id)
|
||||
return None
|
||||
return user_id
|
||||
|
||||
|
@ -46,6 +46,7 @@ class DeviceWorkerHandler(BaseHandler):
|
||||
|
||||
self.hs = hs
|
||||
self.state = hs.get_state_handler()
|
||||
self.state_store = hs.get_storage().state
|
||||
self._auth_handler = hs.get_auth_handler()
|
||||
|
||||
@trace
|
||||
@ -178,7 +179,7 @@ class DeviceWorkerHandler(BaseHandler):
|
||||
continue
|
||||
|
||||
# mapping from event_id -> state_dict
|
||||
prev_state_ids = yield self.store.get_state_ids_for_events(event_ids)
|
||||
prev_state_ids = yield self.state_store.get_state_ids_for_events(event_ids)
|
||||
|
||||
# Check if we've joined the room? If so we just blindly add all the users to
|
||||
# the "possibly changed" users.
|
||||
@ -656,7 +657,7 @@ class DeviceListUpdater(object):
|
||||
except (NotRetryingDestination, RequestSendFailed, HttpResponseException):
|
||||
# TODO: Remember that we are now out of sync and try again
|
||||
# later
|
||||
logger.warn("Failed to handle device list update for %s", user_id)
|
||||
logger.warning("Failed to handle device list update for %s", user_id)
|
||||
# We abort on exceptions rather than accepting the update
|
||||
# as otherwise synapse will 'forget' that its device list
|
||||
# is out of date. If we bail then we will retry the resync
|
||||
@ -694,7 +695,7 @@ class DeviceListUpdater(object):
|
||||
# up on storing the total list of devices and only handle the
|
||||
# delta instead.
|
||||
if len(devices) > 1000:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Ignoring device list snapshot for %s as it has >1K devs (%d)",
|
||||
user_id,
|
||||
len(devices),
|
||||
|
@ -52,7 +52,7 @@ class DeviceMessageHandler(object):
|
||||
local_messages = {}
|
||||
sender_user_id = content["sender"]
|
||||
if origin != get_domain_from_id(sender_user_id):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Dropping device message from %r with spoofed sender %r",
|
||||
origin,
|
||||
sender_user_id,
|
||||
|
@ -119,9 +119,10 @@ class E2eKeysHandler(object):
|
||||
else:
|
||||
query_list.append((user_id, None))
|
||||
|
||||
user_ids_not_in_cache, remote_results = (
|
||||
yield self.store.get_user_devices_from_cache(query_list)
|
||||
)
|
||||
(
|
||||
user_ids_not_in_cache,
|
||||
remote_results,
|
||||
) = yield self.store.get_user_devices_from_cache(query_list)
|
||||
for user_id, devices in iteritems(remote_results):
|
||||
user_devices = results.setdefault(user_id, {})
|
||||
for device_id, device in iteritems(devices):
|
||||
@ -688,17 +689,21 @@ class E2eKeysHandler(object):
|
||||
|
||||
try:
|
||||
# get our self-signing key to verify the signatures
|
||||
_, self_signing_key_id, self_signing_verify_key = yield self._get_e2e_cross_signing_verify_key(
|
||||
user_id, "self_signing"
|
||||
)
|
||||
(
|
||||
_,
|
||||
self_signing_key_id,
|
||||
self_signing_verify_key,
|
||||
) = yield self._get_e2e_cross_signing_verify_key(user_id, "self_signing")
|
||||
|
||||
# get our master key, since we may have received a signature of it.
|
||||
# We need to fetch it here so that we know what its key ID is, so
|
||||
# that we can check if a signature that was sent is a signature of
|
||||
# the master key or of a device
|
||||
master_key, _, master_verify_key = yield self._get_e2e_cross_signing_verify_key(
|
||||
user_id, "master"
|
||||
)
|
||||
(
|
||||
master_key,
|
||||
_,
|
||||
master_verify_key,
|
||||
) = yield self._get_e2e_cross_signing_verify_key(user_id, "master")
|
||||
|
||||
# fetch our stored devices. This is used to 1. verify
|
||||
# signatures on the master key, and 2. to compare with what
|
||||
@ -838,9 +843,11 @@ class E2eKeysHandler(object):
|
||||
|
||||
try:
|
||||
# get our user-signing key to verify the signatures
|
||||
user_signing_key, user_signing_key_id, user_signing_verify_key = yield self._get_e2e_cross_signing_verify_key(
|
||||
user_id, "user_signing"
|
||||
)
|
||||
(
|
||||
user_signing_key,
|
||||
user_signing_key_id,
|
||||
user_signing_verify_key,
|
||||
) = yield self._get_e2e_cross_signing_verify_key(user_id, "user_signing")
|
||||
except SynapseError as e:
|
||||
failure = _exception_to_failure(e)
|
||||
for user, devicemap in signatures.items():
|
||||
@ -859,7 +866,11 @@ class E2eKeysHandler(object):
|
||||
try:
|
||||
# get the target user's master key, to make sure it matches
|
||||
# what was sent
|
||||
master_key, master_key_id, _ = yield self._get_e2e_cross_signing_verify_key(
|
||||
(
|
||||
master_key,
|
||||
master_key_id,
|
||||
_,
|
||||
) = yield self._get_e2e_cross_signing_verify_key(
|
||||
target_user, "master", user_id
|
||||
)
|
||||
|
||||
|
@ -147,6 +147,10 @@ class EventStreamHandler(BaseHandler):
|
||||
|
||||
|
||||
class EventHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(EventHandler, self).__init__(hs)
|
||||
self.storage = hs.get_storage()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_event(self, user, room_id, event_id):
|
||||
"""Retrieve a single specified event.
|
||||
@ -172,7 +176,7 @@ class EventHandler(BaseHandler):
|
||||
is_peeking = user.to_string() not in users
|
||||
|
||||
filtered = yield filter_events_for_client(
|
||||
self.store, user.to_string(), [event], is_peeking=is_peeking
|
||||
self.storage, user.to_string(), [event], is_peeking=is_peeking
|
||||
)
|
||||
|
||||
if not filtered:
|
||||
|
@ -110,6 +110,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
self.store = hs.get_datastore()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
self.federation_client = hs.get_federation_client()
|
||||
self.state_handler = hs.get_state_handler()
|
||||
self.server_name = hs.hostname
|
||||
@ -181,7 +182,7 @@ class FederationHandler(BaseHandler):
|
||||
try:
|
||||
self._sanity_check_event(pdu)
|
||||
except SynapseError as err:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"[%s %s] Received event failed sanity checks", room_id, event_id
|
||||
)
|
||||
raise FederationError("ERROR", err.code, err.msg, affected=pdu.event_id)
|
||||
@ -302,7 +303,7 @@ class FederationHandler(BaseHandler):
|
||||
# following.
|
||||
|
||||
if sent_to_us_directly:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"[%s %s] Rejecting: failed to fetch %d prev events: %s",
|
||||
room_id,
|
||||
event_id,
|
||||
@ -325,7 +326,7 @@ class FederationHandler(BaseHandler):
|
||||
event_map = {event_id: pdu}
|
||||
try:
|
||||
# Get the state of the events we know about
|
||||
ours = yield self.store.get_state_groups_ids(room_id, seen)
|
||||
ours = yield self.state_store.get_state_groups_ids(room_id, seen)
|
||||
|
||||
# state_maps is a list of mappings from (type, state_key) to event_id
|
||||
state_maps = list(
|
||||
@ -351,10 +352,11 @@ class FederationHandler(BaseHandler):
|
||||
# note that if any of the missing prevs share missing state or
|
||||
# auth events, the requests to fetch those events are deduped
|
||||
# by the get_pdu_cache in federation_client.
|
||||
remote_state, got_auth_chain = (
|
||||
yield self.federation_client.get_state_for_room(
|
||||
origin, room_id, p
|
||||
)
|
||||
(
|
||||
remote_state,
|
||||
got_auth_chain,
|
||||
) = yield self.federation_client.get_state_for_room(
|
||||
origin, room_id, p
|
||||
)
|
||||
|
||||
# we want the state *after* p; get_state_for_room returns the
|
||||
@ -406,7 +408,7 @@ class FederationHandler(BaseHandler):
|
||||
state = [event_map[e] for e in six.itervalues(state_map)]
|
||||
auth_chain = list(auth_chains)
|
||||
except Exception:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"[%s %s] Error attempting to resolve state at missing "
|
||||
"prev_events",
|
||||
room_id,
|
||||
@ -519,7 +521,9 @@ class FederationHandler(BaseHandler):
|
||||
# We failed to get the missing events, but since we need to handle
|
||||
# the case of `get_missing_events` not returning the necessary
|
||||
# events anyway, it is safe to simply log the error and continue.
|
||||
logger.warn("[%s %s]: Failed to get prev_events: %s", room_id, event_id, e)
|
||||
logger.warning(
|
||||
"[%s %s]: Failed to get prev_events: %s", room_id, event_id, e
|
||||
)
|
||||
return
|
||||
|
||||
logger.info(
|
||||
@ -546,7 +550,7 @@ class FederationHandler(BaseHandler):
|
||||
yield self.on_receive_pdu(origin, ev, sent_to_us_directly=False)
|
||||
except FederationError as e:
|
||||
if e.code == 403:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"[%s %s] Received prev_event %s failed history check.",
|
||||
room_id,
|
||||
event_id,
|
||||
@ -889,7 +893,7 @@ class FederationHandler(BaseHandler):
|
||||
# We set `check_history_visibility_only` as we might otherwise get false
|
||||
# positives from users having been erased.
|
||||
filtered_extremities = yield filter_events_for_server(
|
||||
self.store,
|
||||
self.storage,
|
||||
self.server_name,
|
||||
list(extremities_events.values()),
|
||||
redact=False,
|
||||
@ -1060,7 +1064,7 @@ class FederationHandler(BaseHandler):
|
||||
SynapseError if the event does not pass muster
|
||||
"""
|
||||
if len(ev.prev_event_ids()) > 20:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Rejecting event %s which has %i prev_events",
|
||||
ev.event_id,
|
||||
len(ev.prev_event_ids()),
|
||||
@ -1068,7 +1072,7 @@ class FederationHandler(BaseHandler):
|
||||
raise SynapseError(http_client.BAD_REQUEST, "Too many prev_events")
|
||||
|
||||
if len(ev.auth_event_ids()) > 10:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Rejecting event %s which has %i auth_events",
|
||||
ev.event_id,
|
||||
len(ev.auth_event_ids()),
|
||||
@ -1204,7 +1208,7 @@ class FederationHandler(BaseHandler):
|
||||
with nested_logging_context(p.event_id):
|
||||
yield self.on_receive_pdu(origin, p, sent_to_us_directly=True)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Error handling queued PDU %s from %s: %s", p.event_id, origin, e
|
||||
)
|
||||
|
||||
@ -1251,7 +1255,7 @@ class FederationHandler(BaseHandler):
|
||||
builder=builder
|
||||
)
|
||||
except AuthError as e:
|
||||
logger.warn("Failed to create join to %s because %s", room_id, e)
|
||||
logger.warning("Failed to create join to %s because %s", room_id, e)
|
||||
raise e
|
||||
|
||||
event_allowed = yield self.third_party_event_rules.check_event_allowed(
|
||||
@ -1495,7 +1499,7 @@ class FederationHandler(BaseHandler):
|
||||
room_version, event, context, do_sig_check=False
|
||||
)
|
||||
except AuthError as e:
|
||||
logger.warn("Failed to create new leave %r because %s", event, e)
|
||||
logger.warning("Failed to create new leave %r because %s", event, e)
|
||||
raise e
|
||||
|
||||
return event
|
||||
@ -1550,7 +1554,7 @@ class FederationHandler(BaseHandler):
|
||||
event_id, allow_none=False, check_room_id=room_id
|
||||
)
|
||||
|
||||
state_groups = yield self.store.get_state_groups(room_id, [event_id])
|
||||
state_groups = yield self.state_store.get_state_groups(room_id, [event_id])
|
||||
|
||||
if state_groups:
|
||||
_, state = list(iteritems(state_groups)).pop()
|
||||
@ -1579,7 +1583,7 @@ class FederationHandler(BaseHandler):
|
||||
event_id, allow_none=False, check_room_id=room_id
|
||||
)
|
||||
|
||||
state_groups = yield self.store.get_state_groups_ids(room_id, [event_id])
|
||||
state_groups = yield self.state_store.get_state_groups_ids(room_id, [event_id])
|
||||
|
||||
if state_groups:
|
||||
_, state = list(state_groups.items()).pop()
|
||||
@ -1607,7 +1611,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
events = yield self.store.get_backfill_events(room_id, pdu_list, limit)
|
||||
|
||||
events = yield filter_events_for_server(self.store, origin, events)
|
||||
events = yield filter_events_for_server(self.storage, origin, events)
|
||||
|
||||
return events
|
||||
|
||||
@ -1637,7 +1641,7 @@ class FederationHandler(BaseHandler):
|
||||
if not in_room:
|
||||
raise AuthError(403, "Host not in room.")
|
||||
|
||||
events = yield filter_events_for_server(self.store, origin, [event])
|
||||
events = yield filter_events_for_server(self.storage, origin, [event])
|
||||
event = events[0]
|
||||
return event
|
||||
else:
|
||||
@ -1789,7 +1793,7 @@ class FederationHandler(BaseHandler):
|
||||
# cause SynapseErrors in auth.check. We don't want to give up
|
||||
# the attempt to federate altogether in such cases.
|
||||
|
||||
logger.warn("Rejecting %s because %s", e.event_id, err.msg)
|
||||
logger.warning("Rejecting %s because %s", e.event_id, err.msg)
|
||||
|
||||
if e == event:
|
||||
raise
|
||||
@ -1845,7 +1849,9 @@ class FederationHandler(BaseHandler):
|
||||
try:
|
||||
yield self.do_auth(origin, event, context, auth_events=auth_events)
|
||||
except AuthError as e:
|
||||
logger.warn("[%s %s] Rejecting: %s", event.room_id, event.event_id, e.msg)
|
||||
logger.warning(
|
||||
"[%s %s] Rejecting: %s", event.room_id, event.event_id, e.msg
|
||||
)
|
||||
|
||||
context.rejected = RejectedReason.AUTH_ERROR
|
||||
|
||||
@ -1903,7 +1909,7 @@ class FederationHandler(BaseHandler):
|
||||
# given state at the event. This should correctly handle cases
|
||||
# like bans, especially with state res v2.
|
||||
|
||||
state_sets = yield self.store.get_state_groups(
|
||||
state_sets = yield self.state_store.get_state_groups(
|
||||
event.room_id, extrem_ids
|
||||
)
|
||||
state_sets = list(state_sets.values())
|
||||
@ -1939,7 +1945,7 @@ class FederationHandler(BaseHandler):
|
||||
try:
|
||||
event_auth.check(room_version, event, auth_events=current_auth_events)
|
||||
except AuthError as e:
|
||||
logger.warn("Soft-failing %r because %s", event, e)
|
||||
logger.warning("Soft-failing %r because %s", event, e)
|
||||
event.internal_metadata.soft_failed = True
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@ -1994,7 +2000,7 @@ class FederationHandler(BaseHandler):
|
||||
)
|
||||
|
||||
missing_events = yield filter_events_for_server(
|
||||
self.store, origin, missing_events
|
||||
self.storage, origin, missing_events
|
||||
)
|
||||
|
||||
return missing_events
|
||||
@ -2038,7 +2044,7 @@ class FederationHandler(BaseHandler):
|
||||
try:
|
||||
event_auth.check(room_version, event, auth_events=auth_events)
|
||||
except AuthError as e:
|
||||
logger.warn("Failed auth resolution for %r because %s", event, e)
|
||||
logger.warning("Failed auth resolution for %r because %s", event, e)
|
||||
raise e
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@ -2235,7 +2241,7 @@ class FederationHandler(BaseHandler):
|
||||
|
||||
# create a new state group as a delta from the existing one.
|
||||
prev_group = context.state_group
|
||||
state_group = yield self.store.store_state_group(
|
||||
state_group = yield self.state_store.store_state_group(
|
||||
event.event_id,
|
||||
event.room_id,
|
||||
prev_group=prev_group,
|
||||
@ -2432,7 +2438,7 @@ class FederationHandler(BaseHandler):
|
||||
try:
|
||||
yield self.auth.check_from_context(room_version, event, context)
|
||||
except AuthError as e:
|
||||
logger.warn("Denying new third party invite %r because %s", event, e)
|
||||
logger.warning("Denying new third party invite %r because %s", event, e)
|
||||
raise e
|
||||
|
||||
yield self._check_signature(event, context)
|
||||
@ -2488,7 +2494,7 @@ class FederationHandler(BaseHandler):
|
||||
try:
|
||||
yield self.auth.check_from_context(room_version, event, context)
|
||||
except AuthError as e:
|
||||
logger.warn("Denying third party invite %r because %s", event, e)
|
||||
logger.warning("Denying third party invite %r because %s", event, e)
|
||||
raise e
|
||||
yield self._check_signature(event, context)
|
||||
|
||||
|
@ -392,7 +392,7 @@ class GroupsLocalHandler(object):
|
||||
try:
|
||||
user_profile = yield self.profile_handler.get_profile(user_id)
|
||||
except Exception as e:
|
||||
logger.warn("No profile for user %s: %s", user_id, e)
|
||||
logger.warning("No profile for user %s: %s", user_id, e)
|
||||
user_profile = {}
|
||||
|
||||
return {"state": "invite", "user_profile": user_profile}
|
||||
|
@ -272,7 +272,7 @@ class IdentityHandler(BaseHandler):
|
||||
changed = False
|
||||
if e.code in (400, 404, 501):
|
||||
# The remote server probably doesn't support unbinding (yet)
|
||||
logger.warn("Received %d response while unbinding threepid", e.code)
|
||||
logger.warning("Received %d response while unbinding threepid", e.code)
|
||||
else:
|
||||
logger.error("Failed to unbind threepid on identity server: %s", e)
|
||||
raise SynapseError(500, "Failed to contact identity server")
|
||||
@ -403,7 +403,7 @@ class IdentityHandler(BaseHandler):
|
||||
|
||||
if self.hs.config.using_identity_server_from_trusted_list:
|
||||
# Warn that a deprecated config option is in use
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
'The config option "trust_identity_server_for_password_resets" '
|
||||
'has been replaced by "account_threepid_delegate". '
|
||||
"Please consult the sample config at docs/sample_config.yaml for "
|
||||
@ -457,7 +457,7 @@ class IdentityHandler(BaseHandler):
|
||||
|
||||
if self.hs.config.using_identity_server_from_trusted_list:
|
||||
# Warn that a deprecated config option is in use
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
'The config option "trust_identity_server_for_password_resets" '
|
||||
'has been replaced by "account_threepid_delegate". '
|
||||
"Please consult the sample config at docs/sample_config.yaml for "
|
||||
|
@ -43,6 +43,8 @@ class InitialSyncHandler(BaseHandler):
|
||||
self.validator = EventValidator()
|
||||
self.snapshot_cache = SnapshotCache()
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
|
||||
def snapshot_all_rooms(
|
||||
self,
|
||||
@ -126,8 +128,8 @@ class InitialSyncHandler(BaseHandler):
|
||||
|
||||
tags_by_room = yield self.store.get_tags_for_user(user_id)
|
||||
|
||||
account_data, account_data_by_room = (
|
||||
yield self.store.get_account_data_for_user(user_id)
|
||||
account_data, account_data_by_room = yield self.store.get_account_data_for_user(
|
||||
user_id
|
||||
)
|
||||
|
||||
public_room_ids = yield self.store.get_public_room_ids()
|
||||
@ -169,7 +171,7 @@ class InitialSyncHandler(BaseHandler):
|
||||
elif event.membership == Membership.LEAVE:
|
||||
room_end_token = "s%d" % (event.stream_ordering,)
|
||||
deferred_room_state = run_in_background(
|
||||
self.store.get_state_for_events, [event.event_id]
|
||||
self.state_store.get_state_for_events, [event.event_id]
|
||||
)
|
||||
deferred_room_state.addCallback(
|
||||
lambda states: states[event.event_id]
|
||||
@ -189,7 +191,9 @@ class InitialSyncHandler(BaseHandler):
|
||||
)
|
||||
).addErrback(unwrapFirstError)
|
||||
|
||||
messages = yield filter_events_for_client(self.store, user_id, messages)
|
||||
messages = yield filter_events_for_client(
|
||||
self.storage, user_id, messages
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace("room_key", token)
|
||||
end_token = now_token.copy_and_replace("room_key", room_end_token)
|
||||
@ -307,7 +311,7 @@ class InitialSyncHandler(BaseHandler):
|
||||
def _room_initial_sync_parted(
|
||||
self, user_id, room_id, pagin_config, membership, member_event_id, is_peeking
|
||||
):
|
||||
room_state = yield self.store.get_state_for_events([member_event_id])
|
||||
room_state = yield self.state_store.get_state_for_events([member_event_id])
|
||||
|
||||
room_state = room_state[member_event_id]
|
||||
|
||||
@ -322,7 +326,7 @@ class InitialSyncHandler(BaseHandler):
|
||||
)
|
||||
|
||||
messages = yield filter_events_for_client(
|
||||
self.store, user_id, messages, is_peeking=is_peeking
|
||||
self.storage, user_id, messages, is_peeking=is_peeking
|
||||
)
|
||||
|
||||
start_token = StreamToken.START.copy_and_replace("room_key", token)
|
||||
@ -414,7 +418,7 @@ class InitialSyncHandler(BaseHandler):
|
||||
)
|
||||
|
||||
messages = yield filter_events_for_client(
|
||||
self.store, user_id, messages, is_peeking=is_peeking
|
||||
self.storage, user_id, messages, is_peeking=is_peeking
|
||||
)
|
||||
|
||||
start_token = now_token.copy_and_replace("room_key", token)
|
||||
|
@ -59,6 +59,8 @@ class MessageHandler(object):
|
||||
self.clock = hs.get_clock()
|
||||
self.state = hs.get_state_handler()
|
||||
self.store = hs.get_datastore()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@ -74,15 +76,16 @@ class MessageHandler(object):
|
||||
Raises:
|
||||
SynapseError if something went wrong.
|
||||
"""
|
||||
membership, membership_event_id = yield self.auth.check_in_room_or_world_readable(
|
||||
room_id, user_id
|
||||
)
|
||||
(
|
||||
membership,
|
||||
membership_event_id,
|
||||
) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
data = yield self.state.get_current_state(room_id, event_type, state_key)
|
||||
elif membership == Membership.LEAVE:
|
||||
key = (event_type, state_key)
|
||||
room_state = yield self.store.get_state_for_events(
|
||||
room_state = yield self.state_store.get_state_for_events(
|
||||
[membership_event_id], StateFilter.from_types([key])
|
||||
)
|
||||
data = room_state[membership_event_id].get(key)
|
||||
@ -135,12 +138,12 @@ class MessageHandler(object):
|
||||
raise NotFoundError("Can't find event for token %s" % (at_token,))
|
||||
|
||||
visible_events = yield filter_events_for_client(
|
||||
self.store, user_id, last_events
|
||||
self.storage, user_id, last_events
|
||||
)
|
||||
|
||||
event = last_events[0]
|
||||
if visible_events:
|
||||
room_state = yield self.store.get_state_for_events(
|
||||
room_state = yield self.state_store.get_state_for_events(
|
||||
[event.event_id], state_filter=state_filter
|
||||
)
|
||||
room_state = room_state[event.event_id]
|
||||
@ -151,9 +154,10 @@ class MessageHandler(object):
|
||||
% (user_id, room_id, at_token),
|
||||
)
|
||||
else:
|
||||
membership, membership_event_id = (
|
||||
yield self.auth.check_in_room_or_world_readable(room_id, user_id)
|
||||
)
|
||||
(
|
||||
membership,
|
||||
membership_event_id,
|
||||
) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
|
||||
|
||||
if membership == Membership.JOIN:
|
||||
state_ids = yield self.store.get_filtered_current_state_ids(
|
||||
@ -161,7 +165,7 @@ class MessageHandler(object):
|
||||
)
|
||||
room_state = yield self.store.get_events(state_ids.values())
|
||||
elif membership == Membership.LEAVE:
|
||||
room_state = yield self.store.get_state_for_events(
|
||||
room_state = yield self.state_store.get_state_for_events(
|
||||
[membership_event_id], state_filter=state_filter
|
||||
)
|
||||
room_state = room_state[membership_event_id]
|
||||
@ -688,7 +692,7 @@ class EventCreationHandler(object):
|
||||
try:
|
||||
yield self.auth.check_from_context(room_version, event, context)
|
||||
except AuthError as err:
|
||||
logger.warn("Denying new event %r because %s", event, err)
|
||||
logger.warning("Denying new event %r because %s", event, err)
|
||||
raise err
|
||||
|
||||
# Ensure that we can round trip before trying to persist in db
|
||||
|
@ -69,6 +69,8 @@ class PaginationHandler(object):
|
||||
self.hs = hs
|
||||
self.auth = hs.get_auth()
|
||||
self.store = hs.get_datastore()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
self.clock = hs.get_clock()
|
||||
self._server_name = hs.hostname
|
||||
|
||||
@ -210,9 +212,10 @@ class PaginationHandler(object):
|
||||
source_config = pagin_config.get_source_config("room")
|
||||
|
||||
with (yield self.pagination_lock.read(room_id)):
|
||||
membership, member_event_id = yield self.auth.check_in_room_or_world_readable(
|
||||
room_id, user_id
|
||||
)
|
||||
(
|
||||
membership,
|
||||
member_event_id,
|
||||
) = yield self.auth.check_in_room_or_world_readable(room_id, user_id)
|
||||
|
||||
if source_config.direction == "b":
|
||||
# if we're going backwards, we might need to backfill. This
|
||||
@ -255,7 +258,7 @@ class PaginationHandler(object):
|
||||
events = event_filter.filter(events)
|
||||
|
||||
events = yield filter_events_for_client(
|
||||
self.store, user_id, events, is_peeking=(member_event_id is None)
|
||||
self.storage, user_id, events, is_peeking=(member_event_id is None)
|
||||
)
|
||||
|
||||
if not events:
|
||||
@ -274,7 +277,7 @@ class PaginationHandler(object):
|
||||
(EventTypes.Member, event.sender) for event in events
|
||||
)
|
||||
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
state_ids = yield self.state_store.get_state_ids_for_event(
|
||||
events[0].event_id, state_filter=state_filter
|
||||
)
|
||||
|
||||
@ -295,10 +298,8 @@ class PaginationHandler(object):
|
||||
}
|
||||
|
||||
if state:
|
||||
chunk["state"] = (
|
||||
yield self._event_serializer.serialize_events(
|
||||
state, time_now, as_client_event=as_client_event
|
||||
)
|
||||
chunk["state"] = yield self._event_serializer.serialize_events(
|
||||
state, time_now, as_client_event=as_client_event
|
||||
)
|
||||
|
||||
return chunk
|
||||
|
@ -275,7 +275,7 @@ class BaseProfileHandler(BaseHandler):
|
||||
ratelimit=False, # Try to hide that these events aren't atomic.
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Failed to update join event for room %s - %s", room_id, str(e)
|
||||
)
|
||||
|
||||
|
@ -396,8 +396,8 @@ class RegistrationHandler(BaseHandler):
|
||||
room_id = room_identifier
|
||||
elif RoomAlias.is_valid(room_identifier):
|
||||
room_alias = RoomAlias.from_string(room_identifier)
|
||||
room_id, remote_room_hosts = (
|
||||
yield room_member_handler.lookup_room_alias(room_alias)
|
||||
room_id, remote_room_hosts = yield room_member_handler.lookup_room_alias(
|
||||
room_alias
|
||||
)
|
||||
room_id = room_id.to_string()
|
||||
else:
|
||||
|
@ -147,21 +147,22 @@ class RoomCreationHandler(BaseHandler):
|
||||
|
||||
# we create and auth the tombstone event before properly creating the new
|
||||
# room, to check our user has perms in the old room.
|
||||
tombstone_event, tombstone_context = (
|
||||
yield self.event_creation_handler.create_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Tombstone,
|
||||
"state_key": "",
|
||||
"room_id": old_room_id,
|
||||
"sender": user_id,
|
||||
"content": {
|
||||
"body": "This room has been replaced",
|
||||
"replacement_room": new_room_id,
|
||||
},
|
||||
(
|
||||
tombstone_event,
|
||||
tombstone_context,
|
||||
) = yield self.event_creation_handler.create_event(
|
||||
requester,
|
||||
{
|
||||
"type": EventTypes.Tombstone,
|
||||
"state_key": "",
|
||||
"room_id": old_room_id,
|
||||
"sender": user_id,
|
||||
"content": {
|
||||
"body": "This room has been replaced",
|
||||
"replacement_room": new_room_id,
|
||||
},
|
||||
token_id=requester.access_token_id,
|
||||
)
|
||||
},
|
||||
token_id=requester.access_token_id,
|
||||
)
|
||||
old_room_version = yield self.store.get_room_version(old_room_id)
|
||||
yield self.auth.check_from_context(
|
||||
@ -822,6 +823,8 @@ class RoomContextHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
self.store = hs.get_datastore()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_event_context(self, user, room_id, event_id, limit, event_filter):
|
||||
@ -848,7 +851,7 @@ class RoomContextHandler(object):
|
||||
|
||||
def filter_evts(events):
|
||||
return filter_events_for_client(
|
||||
self.store, user.to_string(), events, is_peeking=is_peeking
|
||||
self.storage, user.to_string(), events, is_peeking=is_peeking
|
||||
)
|
||||
|
||||
event = yield self.store.get_event(
|
||||
@ -890,7 +893,7 @@ class RoomContextHandler(object):
|
||||
# first? Shouldn't we be consistent with /sync?
|
||||
# https://github.com/matrix-org/matrix-doc/issues/687
|
||||
|
||||
state = yield self.store.get_state_for_events(
|
||||
state = yield self.state_store.get_state_for_events(
|
||||
[last_event_id], state_filter=state_filter
|
||||
)
|
||||
results["state"] = list(state[last_event_id].values())
|
||||
@ -922,7 +925,7 @@ class RoomEventSource(object):
|
||||
|
||||
from_token = RoomStreamToken.parse(from_key)
|
||||
if from_token.topological:
|
||||
logger.warn("Stream has topological part!!!! %r", from_key)
|
||||
logger.warning("Stream has topological part!!!! %r", from_key)
|
||||
from_key = "s%s" % (from_token.stream,)
|
||||
|
||||
app_service = self.store.get_app_service_by_user_id(user.to_string())
|
||||
|
@ -759,22 +759,25 @@ class RoomMemberHandler(object):
|
||||
if room_avatar_event:
|
||||
room_avatar_url = room_avatar_event.content.get("url", "")
|
||||
|
||||
token, public_keys, fallback_public_key, display_name = (
|
||||
yield self.identity_handler.ask_id_server_for_third_party_invite(
|
||||
requester=requester,
|
||||
id_server=id_server,
|
||||
medium=medium,
|
||||
address=address,
|
||||
room_id=room_id,
|
||||
inviter_user_id=user.to_string(),
|
||||
room_alias=canonical_room_alias,
|
||||
room_avatar_url=room_avatar_url,
|
||||
room_join_rules=room_join_rules,
|
||||
room_name=room_name,
|
||||
inviter_display_name=inviter_display_name,
|
||||
inviter_avatar_url=inviter_avatar_url,
|
||||
id_access_token=id_access_token,
|
||||
)
|
||||
(
|
||||
token,
|
||||
public_keys,
|
||||
fallback_public_key,
|
||||
display_name,
|
||||
) = yield self.identity_handler.ask_id_server_for_third_party_invite(
|
||||
requester=requester,
|
||||
id_server=id_server,
|
||||
medium=medium,
|
||||
address=address,
|
||||
room_id=room_id,
|
||||
inviter_user_id=user.to_string(),
|
||||
room_alias=canonical_room_alias,
|
||||
room_avatar_url=room_avatar_url,
|
||||
room_join_rules=room_join_rules,
|
||||
room_name=room_name,
|
||||
inviter_display_name=inviter_display_name,
|
||||
inviter_avatar_url=inviter_avatar_url,
|
||||
id_access_token=id_access_token,
|
||||
)
|
||||
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
|
@ -35,6 +35,8 @@ class SearchHandler(BaseHandler):
|
||||
def __init__(self, hs):
|
||||
super(SearchHandler, self).__init__(hs)
|
||||
self._event_serializer = hs.get_event_client_serializer()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_old_rooms_from_upgraded_room(self, room_id):
|
||||
@ -221,7 +223,7 @@ class SearchHandler(BaseHandler):
|
||||
filtered_events = search_filter.filter([r["event"] for r in results])
|
||||
|
||||
events = yield filter_events_for_client(
|
||||
self.store, user.to_string(), filtered_events
|
||||
self.storage, user.to_string(), filtered_events
|
||||
)
|
||||
|
||||
events.sort(key=lambda e: -rank_map[e.event_id])
|
||||
@ -271,7 +273,7 @@ class SearchHandler(BaseHandler):
|
||||
filtered_events = search_filter.filter([r["event"] for r in results])
|
||||
|
||||
events = yield filter_events_for_client(
|
||||
self.store, user.to_string(), filtered_events
|
||||
self.storage, user.to_string(), filtered_events
|
||||
)
|
||||
|
||||
room_events.extend(events)
|
||||
@ -340,11 +342,11 @@ class SearchHandler(BaseHandler):
|
||||
)
|
||||
|
||||
res["events_before"] = yield filter_events_for_client(
|
||||
self.store, user.to_string(), res["events_before"]
|
||||
self.storage, user.to_string(), res["events_before"]
|
||||
)
|
||||
|
||||
res["events_after"] = yield filter_events_for_client(
|
||||
self.store, user.to_string(), res["events_after"]
|
||||
self.storage, user.to_string(), res["events_after"]
|
||||
)
|
||||
|
||||
res["start"] = now_token.copy_and_replace(
|
||||
@ -372,7 +374,7 @@ class SearchHandler(BaseHandler):
|
||||
[(EventTypes.Member, sender) for sender in senders]
|
||||
)
|
||||
|
||||
state = yield self.store.get_state_for_event(
|
||||
state = yield self.state_store.get_state_for_event(
|
||||
last_event_id, state_filter
|
||||
)
|
||||
|
||||
@ -394,15 +396,11 @@ class SearchHandler(BaseHandler):
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
for context in contexts.values():
|
||||
context["events_before"] = (
|
||||
yield self._event_serializer.serialize_events(
|
||||
context["events_before"], time_now
|
||||
)
|
||||
context["events_before"] = yield self._event_serializer.serialize_events(
|
||||
context["events_before"], time_now
|
||||
)
|
||||
context["events_after"] = (
|
||||
yield self._event_serializer.serialize_events(
|
||||
context["events_after"], time_now
|
||||
)
|
||||
context["events_after"] = yield self._event_serializer.serialize_events(
|
||||
context["events_after"], time_now
|
||||
)
|
||||
|
||||
state_results = {}
|
||||
|
@ -108,7 +108,10 @@ class StatsHandler(StateDeltasHandler):
|
||||
user_deltas = {}
|
||||
|
||||
# Then count deltas for total_events and total_event_bytes.
|
||||
room_count, user_count = yield self.store.get_changes_room_total_events_and_bytes(
|
||||
(
|
||||
room_count,
|
||||
user_count,
|
||||
) = yield self.store.get_changes_room_total_events_and_bytes(
|
||||
self.pos, max_pos
|
||||
)
|
||||
|
||||
|
@ -230,6 +230,8 @@ class SyncHandler(object):
|
||||
self.response_cache = ResponseCache(hs, "sync")
|
||||
self.state = hs.get_state_handler()
|
||||
self.auth = hs.get_auth()
|
||||
self.storage = hs.get_storage()
|
||||
self.state_store = self.storage.state
|
||||
|
||||
# ExpiringCache((User, Device)) -> LruCache(state_key => event_id)
|
||||
self.lazy_loaded_members_cache = ExpiringCache(
|
||||
@ -417,7 +419,7 @@ class SyncHandler(object):
|
||||
current_state_ids = frozenset(itervalues(current_state_ids))
|
||||
|
||||
recents = yield filter_events_for_client(
|
||||
self.store,
|
||||
self.storage,
|
||||
sync_config.user.to_string(),
|
||||
recents,
|
||||
always_include_ids=current_state_ids,
|
||||
@ -470,7 +472,7 @@ class SyncHandler(object):
|
||||
current_state_ids = frozenset(itervalues(current_state_ids))
|
||||
|
||||
loaded_recents = yield filter_events_for_client(
|
||||
self.store,
|
||||
self.storage,
|
||||
sync_config.user.to_string(),
|
||||
loaded_recents,
|
||||
always_include_ids=current_state_ids,
|
||||
@ -509,7 +511,7 @@ class SyncHandler(object):
|
||||
Returns:
|
||||
A Deferred map from ((type, state_key)->Event)
|
||||
"""
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
state_ids = yield self.state_store.get_state_ids_for_event(
|
||||
event.event_id, state_filter=state_filter
|
||||
)
|
||||
if event.is_state():
|
||||
@ -580,7 +582,7 @@ class SyncHandler(object):
|
||||
return None
|
||||
|
||||
last_event = last_events[-1]
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
state_ids = yield self.state_store.get_state_ids_for_event(
|
||||
last_event.event_id,
|
||||
state_filter=StateFilter.from_types(
|
||||
[(EventTypes.Name, ""), (EventTypes.CanonicalAlias, "")]
|
||||
@ -757,11 +759,11 @@ class SyncHandler(object):
|
||||
|
||||
if full_state:
|
||||
if batch:
|
||||
current_state_ids = yield self.store.get_state_ids_for_event(
|
||||
current_state_ids = yield self.state_store.get_state_ids_for_event(
|
||||
batch.events[-1].event_id, state_filter=state_filter
|
||||
)
|
||||
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
state_ids = yield self.state_store.get_state_ids_for_event(
|
||||
batch.events[0].event_id, state_filter=state_filter
|
||||
)
|
||||
|
||||
@ -781,7 +783,7 @@ class SyncHandler(object):
|
||||
)
|
||||
elif batch.limited:
|
||||
if batch:
|
||||
state_at_timeline_start = yield self.store.get_state_ids_for_event(
|
||||
state_at_timeline_start = yield self.state_store.get_state_ids_for_event(
|
||||
batch.events[0].event_id, state_filter=state_filter
|
||||
)
|
||||
else:
|
||||
@ -810,7 +812,7 @@ class SyncHandler(object):
|
||||
)
|
||||
|
||||
if batch:
|
||||
current_state_ids = yield self.store.get_state_ids_for_event(
|
||||
current_state_ids = yield self.state_store.get_state_ids_for_event(
|
||||
batch.events[-1].event_id, state_filter=state_filter
|
||||
)
|
||||
else:
|
||||
@ -841,7 +843,7 @@ class SyncHandler(object):
|
||||
# So we fish out all the member events corresponding to the
|
||||
# timeline here, and then dedupe any redundant ones below.
|
||||
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
state_ids = yield self.state_store.get_state_ids_for_event(
|
||||
batch.events[0].event_id,
|
||||
# we only want members!
|
||||
state_filter=StateFilter.from_types(
|
||||
@ -1204,10 +1206,11 @@ class SyncHandler(object):
|
||||
since_token = sync_result_builder.since_token
|
||||
|
||||
if since_token and not sync_result_builder.full_state:
|
||||
account_data, account_data_by_room = (
|
||||
yield self.store.get_updated_account_data_for_user(
|
||||
user_id, since_token.account_data_key
|
||||
)
|
||||
(
|
||||
account_data,
|
||||
account_data_by_room,
|
||||
) = yield self.store.get_updated_account_data_for_user(
|
||||
user_id, since_token.account_data_key
|
||||
)
|
||||
|
||||
push_rules_changed = yield self.store.have_push_rules_changed_for_user(
|
||||
@ -1219,9 +1222,10 @@ class SyncHandler(object):
|
||||
sync_config.user
|
||||
)
|
||||
else:
|
||||
account_data, account_data_by_room = (
|
||||
yield self.store.get_account_data_for_user(sync_config.user.to_string())
|
||||
)
|
||||
(
|
||||
account_data,
|
||||
account_data_by_room,
|
||||
) = yield self.store.get_account_data_for_user(sync_config.user.to_string())
|
||||
|
||||
account_data["m.push_rules"] = yield self.push_rules_for_user(
|
||||
sync_config.user
|
||||
|
@ -535,7 +535,7 @@ class SimpleHttpClient(object):
|
||||
b"Content-Length" in resp_headers
|
||||
and int(resp_headers[b"Content-Length"][0]) > max_size
|
||||
):
|
||||
logger.warn("Requested URL is too large > %r bytes" % (self.max_size,))
|
||||
logger.warning("Requested URL is too large > %r bytes" % (self.max_size,))
|
||||
raise SynapseError(
|
||||
502,
|
||||
"Requested file is too large > %r bytes" % (self.max_size,),
|
||||
@ -543,7 +543,7 @@ class SimpleHttpClient(object):
|
||||
)
|
||||
|
||||
if response.code > 299:
|
||||
logger.warn("Got %d when downloading %s" % (response.code, url))
|
||||
logger.warning("Got %d when downloading %s" % (response.code, url))
|
||||
raise SynapseError(502, "Got error %d" % (response.code,), Codes.UNKNOWN)
|
||||
|
||||
# TODO: if our Content-Type is HTML or something, just read the first
|
||||
|
@ -148,7 +148,7 @@ class SrvResolver(object):
|
||||
# Try something in the cache, else rereaise
|
||||
cache_entry = self._cache.get(service_name, None)
|
||||
if cache_entry:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Failed to resolve %r, falling back to cache. %r", service_name, e
|
||||
)
|
||||
return list(cache_entry)
|
||||
|
@ -149,7 +149,7 @@ def _handle_json_response(reactor, timeout_sec, request, response):
|
||||
|
||||
body = yield make_deferred_yieldable(d)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"{%s} [%s] Error reading response: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
@ -457,7 +457,7 @@ class MatrixFederationHttpClient(object):
|
||||
except Exception as e:
|
||||
# Eh, we're already going to raise an exception so lets
|
||||
# ignore if this fails.
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"{%s} [%s] Failed to get error response: %s %s: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
@ -478,7 +478,7 @@ class MatrixFederationHttpClient(object):
|
||||
|
||||
break
|
||||
except RequestSendFailed as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"{%s} [%s] Request failed: %s %s: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
@ -513,7 +513,7 @@ class MatrixFederationHttpClient(object):
|
||||
raise
|
||||
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"{%s} [%s] Request failed: %s %s: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
@ -889,7 +889,7 @@ class MatrixFederationHttpClient(object):
|
||||
d.addTimeout(self.default_timeout, self.reactor)
|
||||
length = yield make_deferred_yieldable(d)
|
||||
except Exception as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"{%s} [%s] Error reading response: %s",
|
||||
request.txn_id,
|
||||
request.destination,
|
||||
|
@ -170,7 +170,7 @@ class RequestMetrics(object):
|
||||
tag = context.tag
|
||||
|
||||
if context != self.start_context:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Context have unexpectedly changed %r, %r",
|
||||
context,
|
||||
self.start_context,
|
||||
|
@ -454,7 +454,7 @@ def respond_with_json(
|
||||
# the Deferred fires, but since the flag is RIGHT THERE it seems like
|
||||
# a waste.
|
||||
if request._disconnected:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Not sending response to request %s, already disconnected.", request
|
||||
)
|
||||
return
|
||||
|
@ -219,13 +219,13 @@ def parse_json_value_from_request(request, allow_empty_body=False):
|
||||
try:
|
||||
content_unicode = content_bytes.decode("utf8")
|
||||
except UnicodeDecodeError:
|
||||
logger.warn("Unable to decode UTF-8")
|
||||
logger.warning("Unable to decode UTF-8")
|
||||
raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
|
||||
|
||||
try:
|
||||
content = json.loads(content_unicode)
|
||||
except Exception as e:
|
||||
logger.warn("Unable to parse JSON: %s", e)
|
||||
logger.warning("Unable to parse JSON: %s", e)
|
||||
raise SynapseError(400, "Content not JSON.", errcode=Codes.NOT_JSON)
|
||||
|
||||
return content
|
||||
|
@ -199,7 +199,7 @@ class SynapseRequest(Request):
|
||||
# It's useful to log it here so that we can get an idea of when
|
||||
# the client disconnects.
|
||||
with PreserveLoggingContext(self.logcontext):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Error processing request %r: %s %s", self, reason.type, reason.value
|
||||
)
|
||||
|
||||
@ -305,7 +305,7 @@ class SynapseRequest(Request):
|
||||
try:
|
||||
self.request_metrics.stop(self.finish_time, self.code, self.sentLength)
|
||||
except Exception as e:
|
||||
logger.warn("Failed to stop metrics: %r", e)
|
||||
logger.warning("Failed to stop metrics: %r", e)
|
||||
|
||||
|
||||
class XForwardedForRequest(SynapseRequest):
|
||||
|
@ -185,7 +185,7 @@ DEFAULT_LOGGERS = {"synapse": {"level": "INFO"}}
|
||||
|
||||
|
||||
def parse_drain_configs(
|
||||
drains: dict
|
||||
drains: dict,
|
||||
) -> typing.Generator[DrainConfiguration, None, None]:
|
||||
"""
|
||||
Parse the drain configurations.
|
||||
|
@ -294,7 +294,7 @@ class LoggingContext(object):
|
||||
"""Enters this logging context into thread local storage"""
|
||||
old_context = self.set_current_context(self)
|
||||
if self.previous_context != old_context:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Expected previous context %r, found %r",
|
||||
self.previous_context,
|
||||
old_context,
|
||||
|
@ -159,6 +159,7 @@ class Notifier(object):
|
||||
self.room_to_user_streams = {}
|
||||
|
||||
self.hs = hs
|
||||
self.storage = hs.get_storage()
|
||||
self.event_sources = hs.get_event_sources()
|
||||
self.store = hs.get_datastore()
|
||||
self.pending_new_room_events = []
|
||||
@ -425,7 +426,10 @@ class Notifier(object):
|
||||
|
||||
if name == "room":
|
||||
new_events = yield filter_events_for_client(
|
||||
self.store, user.to_string(), new_events, is_peeking=is_peeking
|
||||
self.storage,
|
||||
user.to_string(),
|
||||
new_events,
|
||||
is_peeking=is_peeking,
|
||||
)
|
||||
elif name == "presence":
|
||||
now = self.clock.time_msec()
|
||||
|
@ -149,9 +149,10 @@ class BulkPushRuleEvaluator(object):
|
||||
|
||||
room_members = yield self.store.get_joined_users_from_context(event, context)
|
||||
|
||||
(power_levels, sender_power_level) = (
|
||||
yield self._get_power_levels_and_sender_level(event, context)
|
||||
)
|
||||
(
|
||||
power_levels,
|
||||
sender_power_level,
|
||||
) = yield self._get_power_levels_and_sender_level(event, context)
|
||||
|
||||
evaluator = PushRuleEvaluatorForEvent(
|
||||
event, len(room_members), sender_power_level, power_levels
|
||||
|
@ -234,14 +234,12 @@ class EmailPusher(object):
|
||||
return
|
||||
|
||||
self.last_stream_ordering = last_stream_ordering
|
||||
pusher_still_exists = (
|
||||
yield self.store.update_pusher_last_stream_ordering_and_success(
|
||||
self.app_id,
|
||||
self.email,
|
||||
self.user_id,
|
||||
last_stream_ordering,
|
||||
self.clock.time_msec(),
|
||||
)
|
||||
pusher_still_exists = yield self.store.update_pusher_last_stream_ordering_and_success(
|
||||
self.app_id,
|
||||
self.email,
|
||||
self.user_id,
|
||||
last_stream_ordering,
|
||||
self.clock.time_msec(),
|
||||
)
|
||||
if not pusher_still_exists:
|
||||
# The pusher has been deleted while we were processing, so
|
||||
|
@ -64,6 +64,7 @@ class HttpPusher(object):
|
||||
def __init__(self, hs, pusherdict):
|
||||
self.hs = hs
|
||||
self.store = self.hs.get_datastore()
|
||||
self.storage = self.hs.get_storage()
|
||||
self.clock = self.hs.get_clock()
|
||||
self.state_handler = self.hs.get_state_handler()
|
||||
self.user_id = pusherdict["user_name"]
|
||||
@ -210,14 +211,12 @@ class HttpPusher(object):
|
||||
http_push_processed_counter.inc()
|
||||
self.backoff_delay = HttpPusher.INITIAL_BACKOFF_SEC
|
||||
self.last_stream_ordering = push_action["stream_ordering"]
|
||||
pusher_still_exists = (
|
||||
yield self.store.update_pusher_last_stream_ordering_and_success(
|
||||
self.app_id,
|
||||
self.pushkey,
|
||||
self.user_id,
|
||||
self.last_stream_ordering,
|
||||
self.clock.time_msec(),
|
||||
)
|
||||
pusher_still_exists = yield self.store.update_pusher_last_stream_ordering_and_success(
|
||||
self.app_id,
|
||||
self.pushkey,
|
||||
self.user_id,
|
||||
self.last_stream_ordering,
|
||||
self.clock.time_msec(),
|
||||
)
|
||||
if not pusher_still_exists:
|
||||
# The pusher has been deleted while we were processing, so
|
||||
@ -246,7 +245,7 @@ class HttpPusher(object):
|
||||
# we really only give up so that if the URL gets
|
||||
# fixed, we don't suddenly deliver a load
|
||||
# of old notifications.
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Giving up on a notification to user %s, " "pushkey %s",
|
||||
self.user_id,
|
||||
self.pushkey,
|
||||
@ -299,7 +298,7 @@ class HttpPusher(object):
|
||||
if pk != self.pushkey:
|
||||
# for sanity, we only remove the pushkey if it
|
||||
# was the one we actually sent...
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
("Ignoring rejected pushkey %s because we" " didn't send it"),
|
||||
pk,
|
||||
)
|
||||
@ -329,7 +328,7 @@ class HttpPusher(object):
|
||||
return d
|
||||
|
||||
ctx = yield push_tools.get_context_for_event(
|
||||
self.store, self.state_handler, event, self.user_id
|
||||
self.storage, self.state_handler, event, self.user_id
|
||||
)
|
||||
|
||||
d = {
|
||||
|
@ -119,6 +119,7 @@ class Mailer(object):
|
||||
self.store = self.hs.get_datastore()
|
||||
self.macaroon_gen = self.hs.get_macaroon_generator()
|
||||
self.state_handler = self.hs.get_state_handler()
|
||||
self.storage = hs.get_storage()
|
||||
self.app_name = app_name
|
||||
|
||||
logger.info("Created Mailer for app_name %s" % app_name)
|
||||
@ -389,7 +390,7 @@ class Mailer(object):
|
||||
}
|
||||
|
||||
the_events = yield filter_events_for_client(
|
||||
self.store, user_id, results["events_before"]
|
||||
self.storage, user_id, results["events_before"]
|
||||
)
|
||||
the_events.append(notif_event)
|
||||
|
||||
|
@ -117,7 +117,7 @@ class PushRuleEvaluatorForEvent(object):
|
||||
pattern = UserID.from_string(user_id).localpart
|
||||
|
||||
if not pattern:
|
||||
logger.warn("event_match condition with no pattern")
|
||||
logger.warning("event_match condition with no pattern")
|
||||
return False
|
||||
|
||||
# XXX: optimisation: cache our pattern regexps
|
||||
@ -173,7 +173,7 @@ def _glob_matches(glob, value, word_boundary=False):
|
||||
regex_cache[(glob, word_boundary)] = r
|
||||
return r.search(value)
|
||||
except re.error:
|
||||
logger.warn("Failed to parse glob to regex: %r", glob)
|
||||
logger.warning("Failed to parse glob to regex: %r", glob)
|
||||
return False
|
||||
|
||||
|
||||
|
@ -16,6 +16,7 @@
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.push.presentable_names import calculate_room_name, name_from_member_event
|
||||
from synapse.storage import Storage
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@ -43,22 +44,22 @@ def get_badge_count(store, user_id):
|
||||
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_context_for_event(store, state_handler, ev, user_id):
|
||||
def get_context_for_event(storage: Storage, state_handler, ev, user_id):
|
||||
ctx = {}
|
||||
|
||||
room_state_ids = yield store.get_state_ids_for_event(ev.event_id)
|
||||
room_state_ids = yield storage.state.get_state_ids_for_event(ev.event_id)
|
||||
|
||||
# we no longer bother setting room_alias, and make room_name the
|
||||
# human-readable name instead, be that m.room.name, an alias or
|
||||
# a list of people in the room
|
||||
name = yield calculate_room_name(
|
||||
store, room_state_ids, user_id, fallback_to_single_member=False
|
||||
storage.main, room_state_ids, user_id, fallback_to_single_member=False
|
||||
)
|
||||
if name:
|
||||
ctx["name"] = name
|
||||
|
||||
sender_state_event_id = room_state_ids[("m.room.member", ev.sender)]
|
||||
sender_state_event = yield store.get_event(sender_state_event_id)
|
||||
sender_state_event = yield storage.main.get_event(sender_state_event_id)
|
||||
ctx["sender_display_name"] = name_from_member_event(sender_state_event)
|
||||
|
||||
return ctx
|
||||
|
@ -103,9 +103,7 @@ class PusherPool:
|
||||
# create the pusher setting last_stream_ordering to the current maximum
|
||||
# stream ordering in event_push_actions, so it will process
|
||||
# pushes from this point onwards.
|
||||
last_stream_ordering = (
|
||||
yield self.store.get_latest_push_action_stream_ordering()
|
||||
)
|
||||
last_stream_ordering = yield self.store.get_latest_push_action_stream_ordering()
|
||||
|
||||
yield self.store.add_pusher(
|
||||
user_id=user_id,
|
||||
|
@ -180,7 +180,7 @@ class ReplicationEndpoint(object):
|
||||
if e.code != 504 or not cls.RETRY_ON_TIMEOUT:
|
||||
raise
|
||||
|
||||
logger.warn("%s request timed out", cls.NAME)
|
||||
logger.warning("%s request timed out", cls.NAME)
|
||||
|
||||
# If we timed out we probably don't need to worry about backing
|
||||
# off too much, but lets just wait a little anyway.
|
||||
|
@ -144,7 +144,7 @@ class ReplicationRemoteRejectInviteRestServlet(ReplicationEndpoint):
|
||||
# The 'except' clause is very broad, but we need to
|
||||
# capture everything from DNS failures upwards
|
||||
#
|
||||
logger.warn("Failed to reject invite: %s", e)
|
||||
logger.warning("Failed to reject invite: %s", e)
|
||||
|
||||
await self.store.locally_reject_invite(user_id, room_id)
|
||||
ret = {}
|
||||
|
@ -168,7 +168,7 @@ class ReplicationClientHandler(object):
|
||||
if self.connection:
|
||||
self.connection.send_command(cmd)
|
||||
else:
|
||||
logger.warn("Queuing command as not connected: %r", cmd.NAME)
|
||||
logger.warning("Queuing command as not connected: %r", cmd.NAME)
|
||||
self.pending_commands.append(cmd)
|
||||
|
||||
def send_federation_ack(self, token):
|
||||
|
@ -249,7 +249,7 @@ class BaseReplicationStreamProtocol(LineOnlyReceiver):
|
||||
return handler(cmd)
|
||||
|
||||
def close(self):
|
||||
logger.warn("[%s] Closing connection", self.id())
|
||||
logger.warning("[%s] Closing connection", self.id())
|
||||
self.time_we_closed = self.clock.time_msec()
|
||||
self.transport.loseConnection()
|
||||
self.on_connection_closed()
|
||||
|
@ -286,7 +286,7 @@ class PurgeHistoryRestServlet(RestServlet):
|
||||
room_id, stream_ordering
|
||||
)
|
||||
if not r:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"[purge] purging events not possible: No event found "
|
||||
"(received_ts %i => stream_ordering %i)",
|
||||
ts,
|
||||
|
@ -203,10 +203,11 @@ class LoginRestServlet(RestServlet):
|
||||
address = address.lower()
|
||||
|
||||
# Check for login providers that support 3pid login types
|
||||
canonical_user_id, callback_3pid = (
|
||||
yield self.auth_handler.check_password_provider_3pid(
|
||||
medium, address, login_submission["password"]
|
||||
)
|
||||
(
|
||||
canonical_user_id,
|
||||
callback_3pid,
|
||||
) = yield self.auth_handler.check_password_provider_3pid(
|
||||
medium, address, login_submission["password"]
|
||||
)
|
||||
if canonical_user_id:
|
||||
# Authentication through password provider and 3pid succeeded
|
||||
@ -221,7 +222,7 @@ class LoginRestServlet(RestServlet):
|
||||
medium, address
|
||||
)
|
||||
if not user_id:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"unknown 3pid identifier medium %s, address %r", medium, address
|
||||
)
|
||||
raise LoginError(403, "", errcode=Codes.FORBIDDEN)
|
||||
@ -280,8 +281,8 @@ class LoginRestServlet(RestServlet):
|
||||
def do_token_login(self, login_submission):
|
||||
token = login_submission["token"]
|
||||
auth_handler = self.auth_handler
|
||||
user_id = (
|
||||
yield auth_handler.validate_short_term_login_token_and_get_user_id(token)
|
||||
user_id = yield auth_handler.validate_short_term_login_token_and_get_user_id(
|
||||
token
|
||||
)
|
||||
|
||||
result = yield self._register_device_with_callback(user_id, login_submission)
|
||||
|
@ -71,7 +71,7 @@ class EmailPasswordRequestTokenRestServlet(RestServlet):
|
||||
def on_POST(self, request):
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
|
||||
if self.config.local_threepid_handling_disabled_due_to_email_config:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"User password resets have been disabled due to lack of email config"
|
||||
)
|
||||
raise SynapseError(
|
||||
@ -148,7 +148,7 @@ class PasswordResetSubmitTokenServlet(RestServlet):
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
self.failure_email_template, = load_jinja2_templates(
|
||||
(self.failure_email_template,) = load_jinja2_templates(
|
||||
self.config.email_template_dir,
|
||||
[self.config.email_password_reset_template_failure_html],
|
||||
)
|
||||
@ -162,7 +162,7 @@ class PasswordResetSubmitTokenServlet(RestServlet):
|
||||
)
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
|
||||
if self.config.local_threepid_handling_disabled_due_to_email_config:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Password reset emails have been disabled due to lack of an email config"
|
||||
)
|
||||
raise SynapseError(
|
||||
@ -183,7 +183,7 @@ class PasswordResetSubmitTokenServlet(RestServlet):
|
||||
# Perform a 302 redirect if next_link is set
|
||||
if next_link:
|
||||
if next_link.startswith("file:///"):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Not redirecting to next_link as it is a local file: address"
|
||||
)
|
||||
else:
|
||||
@ -350,7 +350,7 @@ class EmailThreepidRequestTokenRestServlet(RestServlet):
|
||||
def on_POST(self, request):
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
|
||||
if self.config.local_threepid_handling_disabled_due_to_email_config:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Adding emails have been disabled due to lack of an email config"
|
||||
)
|
||||
raise SynapseError(
|
||||
@ -441,7 +441,7 @@ class MsisdnThreepidRequestTokenRestServlet(RestServlet):
|
||||
raise SynapseError(400, "MSISDN is already in use", Codes.THREEPID_IN_USE)
|
||||
|
||||
if not self.hs.config.account_threepid_delegate_msisdn:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"No upstream msisdn account_threepid_delegate configured on the server to "
|
||||
"handle this request"
|
||||
)
|
||||
@ -479,7 +479,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
self.failure_email_template, = load_jinja2_templates(
|
||||
(self.failure_email_template,) = load_jinja2_templates(
|
||||
self.config.email_template_dir,
|
||||
[self.config.email_add_threepid_template_failure_html],
|
||||
)
|
||||
@ -488,7 +488,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
|
||||
def on_GET(self, request):
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
|
||||
if self.config.local_threepid_handling_disabled_due_to_email_config:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Adding emails have been disabled due to lack of an email config"
|
||||
)
|
||||
raise SynapseError(
|
||||
@ -515,7 +515,7 @@ class AddThreepidEmailSubmitTokenServlet(RestServlet):
|
||||
# Perform a 302 redirect if next_link is set
|
||||
if next_link:
|
||||
if next_link.startswith("file:///"):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Not redirecting to next_link as it is a local file: address"
|
||||
)
|
||||
else:
|
||||
|
@ -106,7 +106,7 @@ class EmailRegisterRequestTokenRestServlet(RestServlet):
|
||||
def on_POST(self, request):
|
||||
if self.hs.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
|
||||
if self.hs.config.local_threepid_handling_disabled_due_to_email_config:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Email registration has been disabled due to lack of email config"
|
||||
)
|
||||
raise SynapseError(
|
||||
@ -207,7 +207,7 @@ class MsisdnRegisterRequestTokenRestServlet(RestServlet):
|
||||
)
|
||||
|
||||
if not self.hs.config.account_threepid_delegate_msisdn:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"No upstream msisdn account_threepid_delegate configured on the server to "
|
||||
"handle this request"
|
||||
)
|
||||
@ -247,13 +247,13 @@ class RegistrationSubmitTokenServlet(RestServlet):
|
||||
self.store = hs.get_datastore()
|
||||
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
self.failure_email_template, = load_jinja2_templates(
|
||||
(self.failure_email_template,) = load_jinja2_templates(
|
||||
self.config.email_template_dir,
|
||||
[self.config.email_registration_template_failure_html],
|
||||
)
|
||||
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.LOCAL:
|
||||
self.failure_email_template, = load_jinja2_templates(
|
||||
(self.failure_email_template,) = load_jinja2_templates(
|
||||
self.config.email_template_dir,
|
||||
[self.config.email_registration_template_failure_html],
|
||||
)
|
||||
@ -266,7 +266,7 @@ class RegistrationSubmitTokenServlet(RestServlet):
|
||||
)
|
||||
if self.config.threepid_behaviour_email == ThreepidBehaviour.OFF:
|
||||
if self.config.local_threepid_handling_disabled_due_to_email_config:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"User registration via email has been disabled due to lack of email config"
|
||||
)
|
||||
raise SynapseError(
|
||||
@ -287,7 +287,7 @@ class RegistrationSubmitTokenServlet(RestServlet):
|
||||
# Perform a 302 redirect if next_link is set
|
||||
if next_link:
|
||||
if next_link.startswith("file:///"):
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Not redirecting to next_link as it is a local file: address"
|
||||
)
|
||||
else:
|
||||
@ -480,7 +480,7 @@ class RegisterRestServlet(RestServlet):
|
||||
# a password to work around a client bug where it sent
|
||||
# the 'initial_device_display_name' param alone, wiping out
|
||||
# the original registration params
|
||||
logger.warn("Ignoring initial_device_display_name without password")
|
||||
logger.warning("Ignoring initial_device_display_name without password")
|
||||
del body["initial_device_display_name"]
|
||||
|
||||
session_id = self.auth_handler.get_session_id(body)
|
||||
|
@ -394,7 +394,7 @@ class SyncRestServlet(RestServlet):
|
||||
# We've had bug reports that events were coming down under the
|
||||
# wrong room.
|
||||
if event.room_id != room.room_id:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Event %r is under room %r instead of %r",
|
||||
event.event_id,
|
||||
room.room_id,
|
||||
|
@ -102,7 +102,7 @@ class RemoteKey(DirectServeResource):
|
||||
@wrap_json_request_handler
|
||||
async def _async_render_GET(self, request):
|
||||
if len(request.postpath) == 1:
|
||||
server, = request.postpath
|
||||
(server,) = request.postpath
|
||||
query = {server.decode("ascii"): {}}
|
||||
elif len(request.postpath) == 2:
|
||||
server, key_id = request.postpath
|
||||
|
@ -363,7 +363,7 @@ class MediaRepository(object):
|
||||
},
|
||||
)
|
||||
except RequestSendFailed as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Request failed fetching remote media %s/%s: %r",
|
||||
server_name,
|
||||
media_id,
|
||||
@ -372,7 +372,7 @@ class MediaRepository(object):
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
|
||||
except HttpResponseException as e:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"HTTP error fetching remote media %s/%s: %s",
|
||||
server_name,
|
||||
media_id,
|
||||
@ -383,10 +383,12 @@ class MediaRepository(object):
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
|
||||
except SynapseError:
|
||||
logger.warn("Failed to fetch remote media %s/%s", server_name, media_id)
|
||||
logger.warning(
|
||||
"Failed to fetch remote media %s/%s", server_name, media_id
|
||||
)
|
||||
raise
|
||||
except NotRetryingDestination:
|
||||
logger.warn("Not retrying destination %r", server_name)
|
||||
logger.warning("Not retrying destination %r", server_name)
|
||||
raise SynapseError(502, "Failed to fetch remote media")
|
||||
except Exception:
|
||||
logger.exception(
|
||||
@ -691,7 +693,7 @@ class MediaRepository(object):
|
||||
try:
|
||||
os.remove(full_path)
|
||||
except OSError as e:
|
||||
logger.warn("Failed to remove file: %r", full_path)
|
||||
logger.warning("Failed to remove file: %r", full_path)
|
||||
if e.errno == errno.ENOENT:
|
||||
pass
|
||||
else:
|
||||
|
@ -136,7 +136,7 @@ class PreviewUrlResource(DirectServeResource):
|
||||
match = False
|
||||
continue
|
||||
if match:
|
||||
logger.warn("URL %s blocked by url_blacklist entry %s", url, entry)
|
||||
logger.warning("URL %s blocked by url_blacklist entry %s", url, entry)
|
||||
raise SynapseError(
|
||||
403, "URL blocked by url pattern blacklist entry", Codes.UNKNOWN
|
||||
)
|
||||
@ -208,7 +208,7 @@ class PreviewUrlResource(DirectServeResource):
|
||||
og["og:image:width"] = dims["width"]
|
||||
og["og:image:height"] = dims["height"]
|
||||
else:
|
||||
logger.warn("Couldn't get dims for %s" % url)
|
||||
logger.warning("Couldn't get dims for %s" % url)
|
||||
|
||||
# define our OG response for this media
|
||||
elif _is_html(media_info["media_type"]):
|
||||
@ -256,7 +256,7 @@ class PreviewUrlResource(DirectServeResource):
|
||||
og["og:image:width"] = dims["width"]
|
||||
og["og:image:height"] = dims["height"]
|
||||
else:
|
||||
logger.warn("Couldn't get dims for %s", og["og:image"])
|
||||
logger.warning("Couldn't get dims for %s", og["og:image"])
|
||||
|
||||
og["og:image"] = "mxc://%s/%s" % (
|
||||
self.server_name,
|
||||
@ -267,7 +267,7 @@ class PreviewUrlResource(DirectServeResource):
|
||||
else:
|
||||
del og["og:image"]
|
||||
else:
|
||||
logger.warn("Failed to find any OG data in %s", url)
|
||||
logger.warning("Failed to find any OG data in %s", url)
|
||||
og = {}
|
||||
|
||||
logger.debug("Calculated OG for %s as %s", url, og)
|
||||
@ -319,7 +319,7 @@ class PreviewUrlResource(DirectServeResource):
|
||||
)
|
||||
except Exception as e:
|
||||
# FIXME: pass through 404s and other error messages nicely
|
||||
logger.warn("Error downloading %s: %r", url, e)
|
||||
logger.warning("Error downloading %s: %r", url, e)
|
||||
|
||||
raise SynapseError(
|
||||
500,
|
||||
@ -400,7 +400,7 @@ class PreviewUrlResource(DirectServeResource):
|
||||
except OSError as e:
|
||||
# If the path doesn't exist, meh
|
||||
if e.errno != errno.ENOENT:
|
||||
logger.warn("Failed to remove media: %r: %s", media_id, e)
|
||||
logger.warning("Failed to remove media: %r: %s", media_id, e)
|
||||
continue
|
||||
|
||||
removed_media.append(media_id)
|
||||
@ -432,7 +432,7 @@ class PreviewUrlResource(DirectServeResource):
|
||||
except OSError as e:
|
||||
# If the path doesn't exist, meh
|
||||
if e.errno != errno.ENOENT:
|
||||
logger.warn("Failed to remove media: %r: %s", media_id, e)
|
||||
logger.warning("Failed to remove media: %r: %s", media_id, e)
|
||||
continue
|
||||
|
||||
try:
|
||||
@ -448,7 +448,7 @@ class PreviewUrlResource(DirectServeResource):
|
||||
except OSError as e:
|
||||
# If the path doesn't exist, meh
|
||||
if e.errno != errno.ENOENT:
|
||||
logger.warn("Failed to remove media: %r: %s", media_id, e)
|
||||
logger.warning("Failed to remove media: %r: %s", media_id, e)
|
||||
continue
|
||||
|
||||
removed_media.append(media_id)
|
||||
|
@ -182,7 +182,7 @@ class ThumbnailResource(DirectServeResource):
|
||||
if file_path:
|
||||
yield respond_with_file(request, desired_type, file_path)
|
||||
else:
|
||||
logger.warn("Failed to generate thumbnail")
|
||||
logger.warning("Failed to generate thumbnail")
|
||||
respond_404(request)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
@ -245,7 +245,7 @@ class ThumbnailResource(DirectServeResource):
|
||||
if file_path:
|
||||
yield respond_with_file(request, desired_type, file_path)
|
||||
else:
|
||||
logger.warn("Failed to generate thumbnail")
|
||||
logger.warning("Failed to generate thumbnail")
|
||||
respond_404(request)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
@ -39,7 +39,7 @@ class HomeServer(object):
|
||||
def get_state_resolution_handler(self) -> synapse.state.StateResolutionHandler:
|
||||
pass
|
||||
def get_deactivate_account_handler(
|
||||
self
|
||||
self,
|
||||
) -> synapse.handlers.deactivate_account.DeactivateAccountHandler:
|
||||
pass
|
||||
def get_room_creation_handler(self) -> synapse.handlers.room.RoomCreationHandler:
|
||||
@ -47,32 +47,32 @@ class HomeServer(object):
|
||||
def get_room_member_handler(self) -> synapse.handlers.room_member.RoomMemberHandler:
|
||||
pass
|
||||
def get_event_creation_handler(
|
||||
self
|
||||
self,
|
||||
) -> synapse.handlers.message.EventCreationHandler:
|
||||
pass
|
||||
def get_set_password_handler(
|
||||
self
|
||||
self,
|
||||
) -> synapse.handlers.set_password.SetPasswordHandler:
|
||||
pass
|
||||
def get_federation_sender(self) -> synapse.federation.sender.FederationSender:
|
||||
pass
|
||||
def get_federation_transport_client(
|
||||
self
|
||||
self,
|
||||
) -> synapse.federation.transport.client.TransportLayerClient:
|
||||
pass
|
||||
def get_media_repository_resource(
|
||||
self
|
||||
self,
|
||||
) -> synapse.rest.media.v1.media_repository.MediaRepositoryResource:
|
||||
pass
|
||||
def get_media_repository(
|
||||
self
|
||||
self,
|
||||
) -> synapse.rest.media.v1.media_repository.MediaRepository:
|
||||
pass
|
||||
def get_server_notices_manager(
|
||||
self
|
||||
self,
|
||||
) -> synapse.server_notices.server_notices_manager.ServerNoticesManager:
|
||||
pass
|
||||
def get_server_notices_sender(
|
||||
self
|
||||
self,
|
||||
) -> synapse.server_notices.server_notices_sender.ServerNoticesSender:
|
||||
pass
|
||||
|
@ -83,7 +83,7 @@ class ResourceLimitsServerNotices(object):
|
||||
room_id = yield self._server_notices_manager.get_notice_room_for_user(user_id)
|
||||
|
||||
if not room_id:
|
||||
logger.warn("Failed to get server notices room")
|
||||
logger.warning("Failed to get server notices room")
|
||||
return
|
||||
|
||||
yield self._check_and_set_tags(user_id, room_id)
|
||||
|
51
synapse/spam_checker_api/__init__.py
Normal file
51
synapse/spam_checker_api/__init__.py
Normal file
@ -0,0 +1,51 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright 2019 The Matrix.org Foundation C.I.C.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
|
||||
from twisted.internet import defer
|
||||
|
||||
from synapse.storage.state import StateFilter
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SpamCheckerApi(object):
|
||||
"""A proxy object that gets passed to spam checkers so they can get
|
||||
access to rooms and other relevant information.
|
||||
"""
|
||||
|
||||
def __init__(self, hs):
|
||||
self.hs = hs
|
||||
|
||||
self._store = hs.get_datastore()
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def get_state_events_in_room(self, room_id, types):
|
||||
"""Gets state events for the given room.
|
||||
|
||||
Args:
|
||||
room_id (string): The room ID to get state events in.
|
||||
types (tuple): The event type and state key (using None
|
||||
to represent 'any') of the room state to acquire.
|
||||
|
||||
Returns:
|
||||
twisted.internet.defer.Deferred[list(synapse.events.FrozenEvent)]:
|
||||
The filtered state events in the room.
|
||||
"""
|
||||
state_ids = yield self._store.get_filtered_current_state_ids(
|
||||
room_id=room_id, state_filter=StateFilter.from_types(types)
|
||||
)
|
||||
state = yield self._store.get_events(state_ids.values())
|
||||
return state.values()
|
@ -103,6 +103,7 @@ class StateHandler(object):
|
||||
def __init__(self, hs):
|
||||
self.clock = hs.get_clock()
|
||||
self.store = hs.get_datastore()
|
||||
self.state_store = hs.get_storage().state
|
||||
self.hs = hs
|
||||
self._state_resolution_handler = hs.get_state_resolution_handler()
|
||||
|
||||
@ -271,7 +272,7 @@ class StateHandler(object):
|
||||
else:
|
||||
current_state_ids = prev_state_ids
|
||||
|
||||
state_group = yield self.store.store_state_group(
|
||||
state_group = yield self.state_store.store_state_group(
|
||||
event.event_id,
|
||||
event.room_id,
|
||||
prev_group=None,
|
||||
@ -321,7 +322,7 @@ class StateHandler(object):
|
||||
delta_ids = dict(entry.delta_ids)
|
||||
delta_ids[key] = event.event_id
|
||||
|
||||
state_group = yield self.store.store_state_group(
|
||||
state_group = yield self.state_store.store_state_group(
|
||||
event.event_id,
|
||||
event.room_id,
|
||||
prev_group=prev_group,
|
||||
@ -334,7 +335,7 @@ class StateHandler(object):
|
||||
delta_ids = entry.delta_ids
|
||||
|
||||
if entry.state_group is None:
|
||||
entry.state_group = yield self.store.store_state_group(
|
||||
entry.state_group = yield self.state_store.store_state_group(
|
||||
event.event_id,
|
||||
event.room_id,
|
||||
prev_group=entry.prev_group,
|
||||
@ -376,14 +377,16 @@ class StateHandler(object):
|
||||
# map from state group id to the state in that state group (where
|
||||
# 'state' is a map from state key to event id)
|
||||
# dict[int, dict[(str, str), str]]
|
||||
state_groups_ids = yield self.store.get_state_groups_ids(room_id, event_ids)
|
||||
state_groups_ids = yield self.state_store.get_state_groups_ids(
|
||||
room_id, event_ids
|
||||
)
|
||||
|
||||
if len(state_groups_ids) == 0:
|
||||
return _StateCacheEntry(state={}, state_group=None)
|
||||
elif len(state_groups_ids) == 1:
|
||||
name, state_list = list(state_groups_ids.items()).pop()
|
||||
|
||||
prev_group, delta_ids = yield self.store.get_state_group_delta(name)
|
||||
prev_group, delta_ids = yield self.state_store.get_state_group_delta(name)
|
||||
|
||||
return _StateCacheEntry(
|
||||
state=state_list,
|
||||
|
@ -30,6 +30,7 @@ stored in `synapse.storage.schema`.
|
||||
from synapse.storage.data_stores import DataStores
|
||||
from synapse.storage.data_stores.main import DataStore
|
||||
from synapse.storage.persist_events import EventsPersistenceStorage
|
||||
from synapse.storage.state import StateGroupStorage
|
||||
|
||||
__all__ = ["DataStores", "DataStore"]
|
||||
|
||||
@ -45,6 +46,7 @@ class Storage(object):
|
||||
self.main = stores.main
|
||||
|
||||
self.persistence = EventsPersistenceStorage(hs, stores)
|
||||
self.state = StateGroupStorage(hs, stores)
|
||||
|
||||
|
||||
def are_all_users_on_domain(txn, database_engine, domain):
|
||||
|
@ -494,7 +494,7 @@ class SQLBaseStore(object):
|
||||
exception_callbacks = []
|
||||
|
||||
if LoggingContext.current_context() == LoggingContext.sentinel:
|
||||
logger.warn("Starting db txn '%s' from sentinel context", desc)
|
||||
logger.warning("Starting db txn '%s' from sentinel context", desc)
|
||||
|
||||
try:
|
||||
result = yield self.runWithConnection(
|
||||
@ -532,7 +532,7 @@ class SQLBaseStore(object):
|
||||
"""
|
||||
parent_context = LoggingContext.current_context()
|
||||
if parent_context == LoggingContext.sentinel:
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"Starting db connection from sentinel context: metrics will be lost"
|
||||
)
|
||||
parent_context = None
|
||||
@ -719,7 +719,7 @@ class SQLBaseStore(object):
|
||||
raise
|
||||
|
||||
# presumably we raced with another transaction: let's retry.
|
||||
logger.warn(
|
||||
logger.warning(
|
||||
"IntegrityError when upserting into %s; retrying: %s", table, e
|
||||
)
|
||||
|
||||
|
@ -320,7 +320,7 @@ class DataStore(
|
||||
) u
|
||||
"""
|
||||
txn.execute(sql, (time_from,))
|
||||
count, = txn.fetchone()
|
||||
(count,) = txn.fetchone()
|
||||
return count
|
||||
|
||||
def count_r30_users(self):
|
||||
@ -399,7 +399,7 @@ class DataStore(
|
||||
|
||||
txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))
|
||||
|
||||
count, = txn.fetchone()
|
||||
(count,) = txn.fetchone()
|
||||
results["all"] = count
|
||||
|
||||
return results
|
||||
|
@ -863,7 +863,7 @@ class EventPushActionsStore(EventPushActionsWorkerStore):
|
||||
)
|
||||
stream_row = txn.fetchone()
|
||||
if stream_row:
|
||||
offset_stream_ordering, = stream_row
|
||||
(offset_stream_ordering,) = stream_row
|
||||
rotate_to_stream_ordering = min(
|
||||
self.stream_ordering_day_ago, offset_stream_ordering
|
||||
)
|
||||
|
@ -82,7 +82,7 @@ def _retry_on_integrity_error(func):
|
||||
@defer.inlineCallbacks
|
||||
def f(self, *args, **kwargs):
|
||||
try:
|
||||
res = yield func(self, *args, **kwargs)
|
||||
res = yield func(self, *args, delete_existing=False, **kwargs)
|
||||
except self.database_engine.module.IntegrityError:
|
||||
logger.exception("IntegrityError, retrying.")
|
||||
res = yield func(self, *args, delete_existing=True, **kwargs)
|
||||
@ -1125,7 +1125,7 @@ class EventsStore(
|
||||
AND stream_ordering > ?
|
||||
"""
|
||||
txn.execute(sql, (self.stream_ordering_day_ago,))
|
||||
count, = txn.fetchone()
|
||||
(count,) = txn.fetchone()
|
||||
return count
|
||||
|
||||
ret = yield self.runInteraction("count_messages", _count_messages)
|
||||
@ -1146,7 +1146,7 @@ class EventsStore(
|
||||
"""
|
||||
|
||||
txn.execute(sql, (like_clause, self.stream_ordering_day_ago))
|
||||
count, = txn.fetchone()
|
||||
(count,) = txn.fetchone()
|
||||
return count
|
||||
|
||||
ret = yield self.runInteraction("count_daily_sent_messages", _count_messages)
|
||||
@ -1161,7 +1161,7 @@ class EventsStore(
|
||||
AND stream_ordering > ?
|
||||
"""
|
||||
txn.execute(sql, (self.stream_ordering_day_ago,))
|
||||
count, = txn.fetchone()
|
||||
(count,) = txn.fetchone()
|
||||
return count
|
||||
|
||||
ret = yield self.runInteraction("count_daily_active_rooms", _count)
|
||||
@ -1646,7 +1646,7 @@ class EventsStore(
|
||||
""",
|
||||
(room_id,),
|
||||
)
|
||||
min_depth, = txn.fetchone()
|
||||
(min_depth,) = txn.fetchone()
|
||||
|
||||
logger.info("[purge] updating room_depth to %d", min_depth)
|
||||
|
||||
@ -1838,7 +1838,6 @@ class EventsStore(
|
||||
"room_stats_earliest_token",
|
||||
"rooms",
|
||||
"stream_ordering_to_exterm",
|
||||
"topics",
|
||||
"users_in_public_rooms",
|
||||
"users_who_share_private_rooms",
|
||||
# no useful index, but let's clear them anyway
|
||||
|
@ -438,7 +438,7 @@ class EventsBackgroundUpdatesStore(BackgroundUpdateStore):
|
||||
if not rows:
|
||||
return 0
|
||||
|
||||
upper_event_id, = rows[-1]
|
||||
(upper_event_id,) = rows[-1]
|
||||
|
||||
# Update the redactions with the received_ts.
|
||||
#
|
||||
|
@ -249,7 +249,7 @@ class GroupServerStore(SQLBaseStore):
|
||||
WHERE group_id = ? AND category_id = ?
|
||||
"""
|
||||
txn.execute(sql, (group_id, category_id))
|
||||
order, = txn.fetchone()
|
||||
(order,) = txn.fetchone()
|
||||
|
||||
if existing:
|
||||
to_update = {}
|
||||
@ -509,7 +509,7 @@ class GroupServerStore(SQLBaseStore):
|
||||
WHERE group_id = ? AND role_id = ?
|
||||
"""
|
||||
txn.execute(sql, (group_id, role_id))
|
||||
order, = txn.fetchone()
|
||||
(order,) = txn.fetchone()
|
||||
|
||||
if existing:
|
||||
to_update = {}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user