mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-09-21 23:54:35 -04:00
Merge branch 'develop' of github.com:matrix-org/synapse into erikj/faster_typing
This commit is contained in:
commit
ce846bb620
177 changed files with 2541 additions and 1341 deletions
|
@ -895,22 +895,24 @@ class AuthHandler(BaseHandler):
|
|||
|
||||
Args:
|
||||
password (unicode): Password to hash.
|
||||
stored_hash (unicode): Expected hash value.
|
||||
stored_hash (bytes): Expected hash value.
|
||||
|
||||
Returns:
|
||||
Deferred(bool): Whether self.hash(password) == stored_hash.
|
||||
"""
|
||||
|
||||
def _do_validate_hash():
|
||||
# Normalise the Unicode in the password
|
||||
pw = unicodedata.normalize("NFKC", password)
|
||||
|
||||
return bcrypt.checkpw(
|
||||
pw.encode('utf8') + self.hs.config.password_pepper.encode("utf8"),
|
||||
stored_hash.encode('utf8')
|
||||
stored_hash
|
||||
)
|
||||
|
||||
if stored_hash:
|
||||
if not isinstance(stored_hash, bytes):
|
||||
stored_hash = stored_hash.encode('ascii')
|
||||
|
||||
return make_deferred_yieldable(
|
||||
threads.deferToThreadPool(
|
||||
self.hs.get_reactor(),
|
||||
|
|
|
@ -20,7 +20,14 @@ import string
|
|||
from twisted.internet import defer
|
||||
|
||||
from synapse.api.constants import EventTypes
|
||||
from synapse.api.errors import AuthError, CodeMessageException, Codes, SynapseError
|
||||
from synapse.api.errors import (
|
||||
AuthError,
|
||||
CodeMessageException,
|
||||
Codes,
|
||||
NotFoundError,
|
||||
StoreError,
|
||||
SynapseError,
|
||||
)
|
||||
from synapse.types import RoomAlias, UserID, get_domain_from_id
|
||||
|
||||
from ._base import BaseHandler
|
||||
|
@ -109,7 +116,13 @@ class DirectoryHandler(BaseHandler):
|
|||
def delete_association(self, requester, user_id, room_alias):
|
||||
# association deletion for human users
|
||||
|
||||
can_delete = yield self._user_can_delete_alias(room_alias, user_id)
|
||||
try:
|
||||
can_delete = yield self._user_can_delete_alias(room_alias, user_id)
|
||||
except StoreError as e:
|
||||
if e.code == 404:
|
||||
raise NotFoundError("Unknown room alias")
|
||||
raise
|
||||
|
||||
if not can_delete:
|
||||
raise AuthError(
|
||||
403, "You don't have permission to delete the alias.",
|
||||
|
@ -320,7 +333,7 @@ class DirectoryHandler(BaseHandler):
|
|||
def _user_can_delete_alias(self, alias, user_id):
|
||||
creator = yield self.store.get_room_alias_creator(alias.to_string())
|
||||
|
||||
if creator and creator == user_id:
|
||||
if creator is not None and creator == user_id:
|
||||
defer.returnValue(True)
|
||||
|
||||
is_admin = yield self.auth.is_server_admin(UserID.from_string(user_id))
|
||||
|
|
|
@ -330,7 +330,8 @@ class E2eKeysHandler(object):
|
|||
(algorithm, key_id, ex_json, key)
|
||||
)
|
||||
else:
|
||||
new_keys.append((algorithm, key_id, encode_canonical_json(key)))
|
||||
new_keys.append((
|
||||
algorithm, key_id, encode_canonical_json(key).decode('ascii')))
|
||||
|
||||
yield self.store.add_e2e_one_time_keys(
|
||||
user_id, device_id, time_now, new_keys
|
||||
|
@ -358,7 +359,7 @@ def _exception_to_failure(e):
|
|||
# Note that some Exceptions (notably twisted's ResponseFailed etc) don't
|
||||
# give a string for e.message, which json then fails to serialize.
|
||||
return {
|
||||
"status": 503, "message": str(e.message),
|
||||
"status": 503, "message": str(e),
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -360,6 +360,35 @@ class FederationHandler(BaseHandler):
|
|||
# apparently.
|
||||
#
|
||||
# see https://github.com/matrix-org/synapse/pull/1744
|
||||
#
|
||||
# ----
|
||||
#
|
||||
# Update richvdh 2018/09/18: There are a number of problems with timing this
|
||||
# request out agressively on the client side:
|
||||
#
|
||||
# - it plays badly with the server-side rate-limiter, which starts tarpitting you
|
||||
# if you send too many requests at once, so you end up with the server carefully
|
||||
# working through the backlog of your requests, which you have already timed
|
||||
# out.
|
||||
#
|
||||
# - for this request in particular, we now (as of
|
||||
# https://github.com/matrix-org/synapse/pull/3456) reject any PDUs where the
|
||||
# server can't produce a plausible-looking set of prev_events - so we becone
|
||||
# much more likely to reject the event.
|
||||
#
|
||||
# - contrary to what it says above, we do *not* fall back to fetching fresh state
|
||||
# for the room if get_missing_events times out. Rather, we give up processing
|
||||
# the PDU whose prevs we are missing, which then makes it much more likely that
|
||||
# we'll end up back here for the *next* PDU in the list, which exacerbates the
|
||||
# problem.
|
||||
#
|
||||
# - the agressive 10s timeout was introduced to deal with incoming federation
|
||||
# requests taking 8 hours to process. It's not entirely clear why that was going
|
||||
# on; certainly there were other issues causing traffic storms which are now
|
||||
# resolved, and I think in any case we may be more sensible about our locking
|
||||
# now. We're *certainly* more sensible about our logging.
|
||||
#
|
||||
# All that said: Let's try increasing the timout to 60s and see what happens.
|
||||
|
||||
missing_events = yield self.federation_client.get_missing_events(
|
||||
origin,
|
||||
|
@ -368,7 +397,7 @@ class FederationHandler(BaseHandler):
|
|||
latest_events=[pdu],
|
||||
limit=10,
|
||||
min_depth=min_depth,
|
||||
timeout=10000,
|
||||
timeout=60000,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
|
@ -594,7 +623,7 @@ class FederationHandler(BaseHandler):
|
|||
|
||||
required_auth = set(
|
||||
a_id
|
||||
for event in events + state_events.values() + auth_events.values()
|
||||
for event in events + list(state_events.values()) + list(auth_events.values())
|
||||
for a_id, _ in event.auth_events
|
||||
)
|
||||
auth_events.update({
|
||||
|
@ -802,7 +831,7 @@ class FederationHandler(BaseHandler):
|
|||
)
|
||||
continue
|
||||
except NotRetryingDestination as e:
|
||||
logger.info(e.message)
|
||||
logger.info(str(e))
|
||||
continue
|
||||
except FederationDeniedError as e:
|
||||
logger.info(e)
|
||||
|
@ -1358,7 +1387,7 @@ class FederationHandler(BaseHandler):
|
|||
)
|
||||
|
||||
if state_groups:
|
||||
_, state = state_groups.items().pop()
|
||||
_, state = list(state_groups.items()).pop()
|
||||
results = state
|
||||
|
||||
if event.is_state():
|
||||
|
|
|
@ -269,14 +269,7 @@ class PaginationHandler(object):
|
|||
|
||||
if state_ids:
|
||||
state = yield self.store.get_events(list(state_ids.values()))
|
||||
|
||||
if state:
|
||||
state = yield filter_events_for_client(
|
||||
self.store,
|
||||
user_id,
|
||||
state.values(),
|
||||
is_peeking=(member_event_id is None),
|
||||
)
|
||||
state = state.values()
|
||||
|
||||
time_now = self.clock.time_msec()
|
||||
|
||||
|
|
|
@ -534,4 +534,5 @@ class RegistrationHandler(BaseHandler):
|
|||
room_id=room_id,
|
||||
remote_room_hosts=remote_room_hosts,
|
||||
action="join",
|
||||
ratelimit=False,
|
||||
)
|
||||
|
|
|
@ -162,7 +162,7 @@ class RoomListHandler(BaseHandler):
|
|||
# Filter out rooms that we don't want to return
|
||||
rooms_to_scan = [
|
||||
r for r in sorted_rooms
|
||||
if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0
|
||||
if r not in newly_unpublished and rooms_to_num_joined[r] > 0
|
||||
]
|
||||
|
||||
total_room_count = len(rooms_to_scan)
|
||||
|
|
|
@ -583,6 +583,11 @@ class RoomMemberHandler(object):
|
|||
room_id = mapping["room_id"]
|
||||
servers = mapping["servers"]
|
||||
|
||||
# put the server which owns the alias at the front of the server list.
|
||||
if room_alias.domain in servers:
|
||||
servers.remove(room_alias.domain)
|
||||
servers.insert(0, room_alias.domain)
|
||||
|
||||
defer.returnValue((RoomID.from_string(room_id), servers))
|
||||
|
||||
@defer.inlineCallbacks
|
||||
|
|
|
@ -54,7 +54,7 @@ class SearchHandler(BaseHandler):
|
|||
batch_token = None
|
||||
if batch:
|
||||
try:
|
||||
b = decode_base64(batch)
|
||||
b = decode_base64(batch).decode('ascii')
|
||||
batch_group, batch_group_key, batch_token = b.split("\n")
|
||||
|
||||
assert batch_group is not None
|
||||
|
@ -258,18 +258,18 @@ class SearchHandler(BaseHandler):
|
|||
# it returns more from the same group (if applicable) rather
|
||||
# than reverting to searching all results again.
|
||||
if batch_group and batch_group_key:
|
||||
global_next_batch = encode_base64("%s\n%s\n%s" % (
|
||||
global_next_batch = encode_base64(("%s\n%s\n%s" % (
|
||||
batch_group, batch_group_key, pagination_token
|
||||
))
|
||||
)).encode('ascii'))
|
||||
else:
|
||||
global_next_batch = encode_base64("%s\n%s\n%s" % (
|
||||
global_next_batch = encode_base64(("%s\n%s\n%s" % (
|
||||
"all", "", pagination_token
|
||||
))
|
||||
)).encode('ascii'))
|
||||
|
||||
for room_id, group in room_groups.items():
|
||||
group["next_batch"] = encode_base64("%s\n%s\n%s" % (
|
||||
group["next_batch"] = encode_base64(("%s\n%s\n%s" % (
|
||||
"room_id", room_id, pagination_token
|
||||
))
|
||||
)).encode('ascii'))
|
||||
|
||||
allowed_events.extend(room_events)
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ from twisted.internet import defer
|
|||
|
||||
from synapse.api.constants import EventTypes, Membership
|
||||
from synapse.push.clientformat import format_push_rules_for_user
|
||||
from synapse.storage.roommember import MemberSummary
|
||||
from synapse.types import RoomStreamToken
|
||||
from synapse.util.async_helpers import concurrently_execute
|
||||
from synapse.util.caches.expiringcache import ExpiringCache
|
||||
|
@ -525,6 +526,8 @@ class SyncHandler(object):
|
|||
A deferred dict describing the room summary
|
||||
"""
|
||||
|
||||
# FIXME: we could/should get this from room_stats when matthew/stats lands
|
||||
|
||||
# FIXME: this promulgates https://github.com/matrix-org/synapse/issues/3305
|
||||
last_events, _ = yield self.store.get_recent_event_ids_for_room(
|
||||
room_id, end_token=now_token.room_key, limit=1,
|
||||
|
@ -537,44 +540,67 @@ class SyncHandler(object):
|
|||
last_event = last_events[-1]
|
||||
state_ids = yield self.store.get_state_ids_for_event(
|
||||
last_event.event_id, [
|
||||
(EventTypes.Member, None),
|
||||
(EventTypes.Name, ''),
|
||||
(EventTypes.CanonicalAlias, ''),
|
||||
]
|
||||
)
|
||||
|
||||
member_ids = {
|
||||
state_key: event_id
|
||||
for (t, state_key), event_id in state_ids.iteritems()
|
||||
if t == EventTypes.Member
|
||||
}
|
||||
# this is heavily cached, thus: fast.
|
||||
details = yield self.store.get_room_summary(room_id)
|
||||
|
||||
name_id = state_ids.get((EventTypes.Name, ''))
|
||||
canonical_alias_id = state_ids.get((EventTypes.CanonicalAlias, ''))
|
||||
|
||||
summary = {}
|
||||
|
||||
# FIXME: it feels very heavy to load up every single membership event
|
||||
# just to calculate the counts.
|
||||
member_events = yield self.store.get_events(member_ids.values())
|
||||
|
||||
joined_user_ids = []
|
||||
invited_user_ids = []
|
||||
|
||||
for ev in member_events.values():
|
||||
if ev.content.get("membership") == Membership.JOIN:
|
||||
joined_user_ids.append(ev.state_key)
|
||||
elif ev.content.get("membership") == Membership.INVITE:
|
||||
invited_user_ids.append(ev.state_key)
|
||||
empty_ms = MemberSummary([], 0)
|
||||
|
||||
# TODO: only send these when they change.
|
||||
summary["m.joined_member_count"] = len(joined_user_ids)
|
||||
summary["m.invited_member_count"] = len(invited_user_ids)
|
||||
summary["m.joined_member_count"] = (
|
||||
details.get(Membership.JOIN, empty_ms).count
|
||||
)
|
||||
summary["m.invited_member_count"] = (
|
||||
details.get(Membership.INVITE, empty_ms).count
|
||||
)
|
||||
|
||||
if name_id or canonical_alias_id:
|
||||
defer.returnValue(summary)
|
||||
# if the room has a name or canonical_alias set, we can skip
|
||||
# calculating heroes. we assume that if the event has contents, it'll
|
||||
# be a valid name or canonical_alias - i.e. we're checking that they
|
||||
# haven't been "deleted" by blatting {} over the top.
|
||||
if name_id:
|
||||
name = yield self.store.get_event(name_id, allow_none=False)
|
||||
if name and name.content:
|
||||
defer.returnValue(summary)
|
||||
|
||||
# FIXME: order by stream ordering, not alphabetic
|
||||
if canonical_alias_id:
|
||||
canonical_alias = yield self.store.get_event(
|
||||
canonical_alias_id, allow_none=False,
|
||||
)
|
||||
if canonical_alias and canonical_alias.content:
|
||||
defer.returnValue(summary)
|
||||
|
||||
joined_user_ids = [
|
||||
r[0] for r in details.get(Membership.JOIN, empty_ms).members
|
||||
]
|
||||
invited_user_ids = [
|
||||
r[0] for r in details.get(Membership.INVITE, empty_ms).members
|
||||
]
|
||||
gone_user_ids = (
|
||||
[r[0] for r in details.get(Membership.LEAVE, empty_ms).members] +
|
||||
[r[0] for r in details.get(Membership.BAN, empty_ms).members]
|
||||
)
|
||||
|
||||
# FIXME: only build up a member_ids list for our heroes
|
||||
member_ids = {}
|
||||
for membership in (
|
||||
Membership.JOIN,
|
||||
Membership.INVITE,
|
||||
Membership.LEAVE,
|
||||
Membership.BAN
|
||||
):
|
||||
for user_id, event_id in details.get(membership, empty_ms).members:
|
||||
member_ids[user_id] = event_id
|
||||
|
||||
# FIXME: order by stream ordering rather than as returned by SQL
|
||||
me = sync_config.user.to_string()
|
||||
if (joined_user_ids or invited_user_ids):
|
||||
summary['m.heroes'] = sorted(
|
||||
|
@ -586,7 +612,11 @@ class SyncHandler(object):
|
|||
)[0:5]
|
||||
else:
|
||||
summary['m.heroes'] = sorted(
|
||||
[user_id for user_id in member_ids.keys() if user_id != me]
|
||||
[
|
||||
user_id
|
||||
for user_id in gone_user_ids
|
||||
if user_id != me
|
||||
]
|
||||
)[0:5]
|
||||
|
||||
if not sync_config.filter_collection.lazy_load_members():
|
||||
|
@ -719,6 +749,26 @@ class SyncHandler(object):
|
|||
lazy_load_members=lazy_load_members,
|
||||
)
|
||||
elif batch.limited:
|
||||
state_at_timeline_start = yield self.store.get_state_ids_for_event(
|
||||
batch.events[0].event_id, types=types,
|
||||
filtered_types=filtered_types,
|
||||
)
|
||||
|
||||
# for now, we disable LL for gappy syncs - see
|
||||
# https://github.com/vector-im/riot-web/issues/7211#issuecomment-419976346
|
||||
# N.B. this slows down incr syncs as we are now processing way
|
||||
# more state in the server than if we were LLing.
|
||||
#
|
||||
# We still have to filter timeline_start to LL entries (above) in order
|
||||
# for _calculate_state's LL logic to work, as we have to include LL
|
||||
# members for timeline senders in case they weren't loaded in the initial
|
||||
# sync. We do this by (counterintuitively) by filtering timeline_start
|
||||
# members to just be ones which were timeline senders, which then ensures
|
||||
# all of the rest get included in the state block (if we need to know
|
||||
# about them).
|
||||
types = None
|
||||
filtered_types = None
|
||||
|
||||
state_at_previous_sync = yield self.get_state_at(
|
||||
room_id, stream_position=since_token, types=types,
|
||||
filtered_types=filtered_types,
|
||||
|
@ -729,24 +779,21 @@ class SyncHandler(object):
|
|||
filtered_types=filtered_types,
|
||||
)
|
||||
|
||||
state_at_timeline_start = yield self.store.get_state_ids_for_event(
|
||||
batch.events[0].event_id, types=types,
|
||||
filtered_types=filtered_types,
|
||||
)
|
||||
|
||||
state_ids = _calculate_state(
|
||||
timeline_contains=timeline_state,
|
||||
timeline_start=state_at_timeline_start,
|
||||
previous=state_at_previous_sync,
|
||||
current=current_state_ids,
|
||||
# we have to include LL members in case LL initial sync missed them
|
||||
lazy_load_members=lazy_load_members,
|
||||
)
|
||||
else:
|
||||
state_ids = {}
|
||||
if lazy_load_members:
|
||||
if types:
|
||||
# We're returning an incremental sync, with no "gap" since
|
||||
# the previous sync, so normally there would be no state to return
|
||||
# We're returning an incremental sync, with no
|
||||
# "gap" since the previous sync, so normally there would be
|
||||
# no state to return.
|
||||
# But we're lazy-loading, so the client might need some more
|
||||
# member events to understand the events in this timeline.
|
||||
# So we fish out all the member events corresponding to the
|
||||
|
@ -774,7 +821,7 @@ class SyncHandler(object):
|
|||
logger.debug("filtering state from %r...", state_ids)
|
||||
state_ids = {
|
||||
t: event_id
|
||||
for t, event_id in state_ids.iteritems()
|
||||
for t, event_id in iteritems(state_ids)
|
||||
if cache.get(t[1]) != event_id
|
||||
}
|
||||
logger.debug("...to %r", state_ids)
|
||||
|
@ -1575,6 +1622,19 @@ class SyncHandler(object):
|
|||
newly_joined_room=newly_joined,
|
||||
)
|
||||
|
||||
# When we join the room (or the client requests full_state), we should
|
||||
# send down any existing tags. Usually the user won't have tags in a
|
||||
# newly joined room, unless either a) they've joined before or b) the
|
||||
# tag was added by synapse e.g. for server notice rooms.
|
||||
if full_state:
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
tags = yield self.store.get_tags_for_room(user_id, room_id)
|
||||
|
||||
# If there aren't any tags, don't send the empty tags list down
|
||||
# sync
|
||||
if not tags:
|
||||
tags = None
|
||||
|
||||
account_data_events = []
|
||||
if tags is not None:
|
||||
account_data_events.append({
|
||||
|
@ -1603,10 +1663,24 @@ class SyncHandler(object):
|
|||
)
|
||||
|
||||
summary = {}
|
||||
|
||||
# we include a summary in room responses when we're lazy loading
|
||||
# members (as the client otherwise doesn't have enough info to form
|
||||
# the name itself).
|
||||
if (
|
||||
sync_config.filter_collection.lazy_load_members() and
|
||||
(
|
||||
# we recalulate the summary:
|
||||
# if there are membership changes in the timeline, or
|
||||
# if membership has changed during a gappy sync, or
|
||||
# if this is an initial sync.
|
||||
any(ev.type == EventTypes.Member for ev in batch.events) or
|
||||
(
|
||||
# XXX: this may include false positives in the form of LL
|
||||
# members which have snuck into state
|
||||
batch.limited and
|
||||
any(t == EventTypes.Member for (t, k) in state)
|
||||
) or
|
||||
since_token is None
|
||||
)
|
||||
):
|
||||
|
@ -1636,6 +1710,16 @@ class SyncHandler(object):
|
|||
unread_notifications["highlight_count"] = notifs["highlight_count"]
|
||||
|
||||
sync_result_builder.joined.append(room_sync)
|
||||
|
||||
if batch.limited and since_token:
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
logger.info(
|
||||
"Incremental gappy sync of %s for user %s with %d state events" % (
|
||||
room_id,
|
||||
user_id,
|
||||
len(state),
|
||||
)
|
||||
)
|
||||
elif room_builder.rtype == "archived":
|
||||
room_sync = ArchivedSyncResult(
|
||||
room_id=room_id,
|
||||
|
@ -1729,17 +1813,17 @@ def _calculate_state(
|
|||
event_id_to_key = {
|
||||
e: key
|
||||
for key, e in itertools.chain(
|
||||
timeline_contains.items(),
|
||||
previous.items(),
|
||||
timeline_start.items(),
|
||||
current.items(),
|
||||
iteritems(timeline_contains),
|
||||
iteritems(previous),
|
||||
iteritems(timeline_start),
|
||||
iteritems(current),
|
||||
)
|
||||
}
|
||||
|
||||
c_ids = set(e for e in current.values())
|
||||
ts_ids = set(e for e in timeline_start.values())
|
||||
p_ids = set(e for e in previous.values())
|
||||
tc_ids = set(e for e in timeline_contains.values())
|
||||
c_ids = set(e for e in itervalues(current))
|
||||
ts_ids = set(e for e in itervalues(timeline_start))
|
||||
p_ids = set(e for e in itervalues(previous))
|
||||
tc_ids = set(e for e in itervalues(timeline_contains))
|
||||
|
||||
# If we are lazyloading room members, we explicitly add the membership events
|
||||
# for the senders in the timeline into the state block returned by /sync,
|
||||
|
@ -1753,7 +1837,7 @@ def _calculate_state(
|
|||
|
||||
if lazy_load_members:
|
||||
p_ids.difference_update(
|
||||
e for t, e in timeline_start.iteritems()
|
||||
e for t, e in iteritems(timeline_start)
|
||||
if t[0] == EventTypes.Member
|
||||
)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue