mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2025-02-05 07:15:20 -05:00
Merge branch 'develop' into matthew/preview_urls
This commit is contained in:
commit
9f7dc2bef7
@ -54,3 +54,6 @@ Mads Robin Christensen <mads at v42 dot dk>
|
|||||||
|
|
||||||
Florent Violleau <floviolleau at gmail dot com>
|
Florent Violleau <floviolleau at gmail dot com>
|
||||||
* Add Raspberry Pi installation instructions and general troubleshooting items
|
* Add Raspberry Pi installation instructions and general troubleshooting items
|
||||||
|
|
||||||
|
Niklas Riekenbrauck <nikriek at gmail dot.com>
|
||||||
|
* Add JWT support for registration and login
|
||||||
|
65
CHANGES.rst
65
CHANGES.rst
@ -1,3 +1,68 @@
|
|||||||
|
Changes in synapse v0.14.0 (2016-03-30)
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
No changes from v0.14.0-rc2
|
||||||
|
|
||||||
|
Changes in synapse v0.14.0-rc2 (2016-03-23)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Add published room list API (PR #657)
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Change various caches to consume less memory (PR #656, #658, #660, #662,
|
||||||
|
#663, #665)
|
||||||
|
* Allow rooms to be published without requiring an alias (PR #664)
|
||||||
|
* Intern common strings in caches to reduce memory footprint (#666)
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix reject invites over federation (PR #646)
|
||||||
|
* Fix bug where registration was not idempotent (PR #649)
|
||||||
|
* Update aliases event after deleting aliases (PR #652)
|
||||||
|
* Fix unread notification count, which was sometimes wrong (PR #661)
|
||||||
|
|
||||||
|
Changes in synapse v0.14.0-rc1 (2016-03-14)
|
||||||
|
===========================================
|
||||||
|
|
||||||
|
Features:
|
||||||
|
|
||||||
|
* Add event_id to response to state event PUT (PR #581)
|
||||||
|
* Allow guest users access to messages in rooms they have joined (PR #587)
|
||||||
|
* Add config for what state is included in a room invite (PR #598)
|
||||||
|
* Send the inviter's member event in room invite state (PR #607)
|
||||||
|
* Add error codes for malformed/bad JSON in /login (PR #608)
|
||||||
|
* Add support for changing the actions for default rules (PR #609)
|
||||||
|
* Add environment variable SYNAPSE_CACHE_FACTOR, default it to 0.1 (PR #612)
|
||||||
|
* Add ability for alias creators to delete aliases (PR #614)
|
||||||
|
* Add profile information to invites (PR #624)
|
||||||
|
|
||||||
|
Changes:
|
||||||
|
|
||||||
|
* Enforce user_id exclusivity for AS registrations (PR #572)
|
||||||
|
* Make adding push rules idempotent (PR #587)
|
||||||
|
* Improve presence performance (PR #582, #586)
|
||||||
|
* Change presence semantics for ``last_active_ago`` (PR #582, #586)
|
||||||
|
* Don't allow ``m.room.create`` to be changed (PR #596)
|
||||||
|
* Add 800x600 to default list of valid thumbnail sizes (PR #616)
|
||||||
|
* Always include kicks and bans in full /sync (PR #625)
|
||||||
|
* Send history visibility on boundary changes (PR #626)
|
||||||
|
* Register endpoint now returns a refresh_token (PR #637)
|
||||||
|
|
||||||
|
Bug fixes:
|
||||||
|
|
||||||
|
* Fix bug where we returned incorrect state in /sync (PR #573)
|
||||||
|
* Always return a JSON object from push rule API (PR #606)
|
||||||
|
* Fix bug where registering without a user id sometimes failed (PR #610)
|
||||||
|
* Report size of ExpiringCache in cache size metrics (PR #611)
|
||||||
|
* Fix rejection of invites to empty rooms (PR #615)
|
||||||
|
* Fix usage of ``bcrypt`` to not use ``checkpw`` (PR #619)
|
||||||
|
* Pin ``pysaml2`` dependency (PR #634)
|
||||||
|
* Fix bug in ``/sync`` where timeline order was incorrect for backfilled events
|
||||||
|
(PR #635)
|
||||||
|
|
||||||
Changes in synapse v0.13.3 (2016-02-11)
|
Changes in synapse v0.13.3 (2016-02-11)
|
||||||
=======================================
|
=======================================
|
||||||
|
|
||||||
|
@ -17,3 +17,6 @@ ignore =
|
|||||||
[flake8]
|
[flake8]
|
||||||
max-line-length = 90
|
max-line-length = 90
|
||||||
ignore = W503 ; W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
ignore = W503 ; W503 requires that binary operators be at the end, not start, of lines. Erik doesn't like it.
|
||||||
|
|
||||||
|
[pep8]
|
||||||
|
max-line-length = 90
|
||||||
|
@ -16,4 +16,4 @@
|
|||||||
""" This is a reference implementation of a Matrix home server.
|
""" This is a reference implementation of a Matrix home server.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = "0.13.3"
|
__version__ = "0.14.0"
|
||||||
|
@ -29,13 +29,14 @@ from .key import KeyConfig
|
|||||||
from .saml2 import SAML2Config
|
from .saml2 import SAML2Config
|
||||||
from .cas import CasConfig
|
from .cas import CasConfig
|
||||||
from .password import PasswordConfig
|
from .password import PasswordConfig
|
||||||
|
from .jwt import JWTConfig
|
||||||
|
|
||||||
|
|
||||||
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
class HomeServerConfig(TlsConfig, ServerConfig, DatabaseConfig, LoggingConfig,
|
||||||
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
RatelimitConfig, ContentRepositoryConfig, CaptchaConfig,
|
||||||
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
VoipConfig, RegistrationConfig, MetricsConfig, ApiConfig,
|
||||||
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
AppServiceConfig, KeyConfig, SAML2Config, CasConfig,
|
||||||
PasswordConfig,):
|
JWTConfig, PasswordConfig,):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
37
synapse/config/jwt.py
Normal file
37
synapse/config/jwt.py
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2015 Niklas Riekenbrauck
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from ._base import Config
|
||||||
|
|
||||||
|
|
||||||
|
class JWTConfig(Config):
|
||||||
|
def read_config(self, config):
|
||||||
|
jwt_config = config.get("jwt_config", None)
|
||||||
|
if jwt_config:
|
||||||
|
self.jwt_enabled = jwt_config.get("enabled", False)
|
||||||
|
self.jwt_secret = jwt_config["secret"]
|
||||||
|
self.jwt_algorithm = jwt_config["algorithm"]
|
||||||
|
else:
|
||||||
|
self.jwt_enabled = False
|
||||||
|
self.jwt_secret = None
|
||||||
|
self.jwt_algorithm = None
|
||||||
|
|
||||||
|
def default_config(self, **kwargs):
|
||||||
|
return """\
|
||||||
|
# jwt_config:
|
||||||
|
# enabled: true
|
||||||
|
# secret: "a secret"
|
||||||
|
# algorithm: "HS256"
|
||||||
|
"""
|
@ -31,7 +31,10 @@ class _EventInternalMetadata(object):
|
|||||||
return dict(self.__dict__)
|
return dict(self.__dict__)
|
||||||
|
|
||||||
def is_outlier(self):
|
def is_outlier(self):
|
||||||
return hasattr(self, "outlier") and self.outlier
|
return getattr(self, "outlier", False)
|
||||||
|
|
||||||
|
def is_invite_from_remote(self):
|
||||||
|
return getattr(self, "invite_from_remote", False)
|
||||||
|
|
||||||
|
|
||||||
def _event_dict_property(key):
|
def _event_dict_property(key):
|
||||||
|
@ -17,8 +17,9 @@ from synapse.appservice.scheduler import AppServiceScheduler
|
|||||||
from synapse.appservice.api import ApplicationServiceApi
|
from synapse.appservice.api import ApplicationServiceApi
|
||||||
from .register import RegistrationHandler
|
from .register import RegistrationHandler
|
||||||
from .room import (
|
from .room import (
|
||||||
RoomCreationHandler, RoomMemberHandler, RoomListHandler, RoomContextHandler,
|
RoomCreationHandler, RoomListHandler, RoomContextHandler,
|
||||||
)
|
)
|
||||||
|
from .room_member import RoomMemberHandler
|
||||||
from .message import MessageHandler
|
from .message import MessageHandler
|
||||||
from .events import EventStreamHandler, EventHandler
|
from .events import EventStreamHandler, EventHandler
|
||||||
from .federation import FederationHandler
|
from .federation import FederationHandler
|
||||||
|
@ -41,8 +41,9 @@ class BaseHandler(object):
|
|||||||
"""
|
"""
|
||||||
Common base class for the event handlers.
|
Common base class for the event handlers.
|
||||||
|
|
||||||
:type store: synapse.storage.events.StateStore
|
Attributes:
|
||||||
:type state_handler: synapse.state.StateHandler
|
store (synapse.storage.events.StateStore):
|
||||||
|
state_handler (synapse.state.StateHandler):
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
@ -65,11 +66,12 @@ class BaseHandler(object):
|
|||||||
""" Returns dict of user_id -> list of events that user is allowed to
|
""" Returns dict of user_id -> list of events that user is allowed to
|
||||||
see.
|
see.
|
||||||
|
|
||||||
:param (str, bool) user_tuples: (user id, is_peeking) for each
|
Args:
|
||||||
user to be checked. is_peeking should be true if:
|
user_tuples (str, bool): (user id, is_peeking) for each user to be
|
||||||
* the user is not currently a member of the room, and:
|
checked. is_peeking should be true if:
|
||||||
* the user has not been a member of the room since the given
|
* the user is not currently a member of the room, and:
|
||||||
events
|
* the user has not been a member of the room since the
|
||||||
|
given events
|
||||||
"""
|
"""
|
||||||
forgotten = yield defer.gatherResults([
|
forgotten = yield defer.gatherResults([
|
||||||
self.store.who_forgot_in_room(
|
self.store.who_forgot_in_room(
|
||||||
@ -165,13 +167,16 @@ class BaseHandler(object):
|
|||||||
"""
|
"""
|
||||||
Check which events a user is allowed to see
|
Check which events a user is allowed to see
|
||||||
|
|
||||||
:param str user_id: user id to be checked
|
Args:
|
||||||
:param [synapse.events.EventBase] events: list of events to be checked
|
user_id(str): user id to be checked
|
||||||
:param bool is_peeking should be True if:
|
events([synapse.events.EventBase]): list of events to be checked
|
||||||
|
is_peeking(bool): should be True if:
|
||||||
* the user is not currently a member of the room, and:
|
* the user is not currently a member of the room, and:
|
||||||
* the user has not been a member of the room since the given
|
* the user has not been a member of the room since the given
|
||||||
events
|
events
|
||||||
:rtype [synapse.events.EventBase]
|
|
||||||
|
Returns:
|
||||||
|
[synapse.events.EventBase]
|
||||||
"""
|
"""
|
||||||
types = (
|
types = (
|
||||||
(EventTypes.RoomHistoryVisibility, ""),
|
(EventTypes.RoomHistoryVisibility, ""),
|
||||||
@ -261,8 +266,7 @@ class BaseHandler(object):
|
|||||||
|
|
||||||
context = yield state_handler.compute_event_context(
|
context = yield state_handler.compute_event_context(
|
||||||
builder,
|
builder,
|
||||||
old_state=(prev_member_event,),
|
old_state=(prev_member_event,)
|
||||||
outlier=True
|
|
||||||
)
|
)
|
||||||
|
|
||||||
if builder.is_state():
|
if builder.is_state():
|
||||||
|
@ -163,9 +163,13 @@ class AuthHandler(BaseHandler):
|
|||||||
def get_session_id(self, clientdict):
|
def get_session_id(self, clientdict):
|
||||||
"""
|
"""
|
||||||
Gets the session ID for a client given the client dictionary
|
Gets the session ID for a client given the client dictionary
|
||||||
:param clientdict: The dictionary sent by the client in the request
|
|
||||||
:return: The string session ID the client sent. If the client did not
|
Args:
|
||||||
send a session ID, returns None.
|
clientdict: The dictionary sent by the client in the request
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str|None: The string session ID the client sent. If the client did
|
||||||
|
not send a session ID, returns None.
|
||||||
"""
|
"""
|
||||||
sid = None
|
sid = None
|
||||||
if clientdict and 'auth' in clientdict:
|
if clientdict and 'auth' in clientdict:
|
||||||
@ -179,9 +183,11 @@ class AuthHandler(BaseHandler):
|
|||||||
Store a key-value pair into the sessions data associated with this
|
Store a key-value pair into the sessions data associated with this
|
||||||
request. This data is stored server-side and cannot be modified by
|
request. This data is stored server-side and cannot be modified by
|
||||||
the client.
|
the client.
|
||||||
:param session_id: (string) The ID of this session as returned from check_auth
|
|
||||||
:param key: (string) The key to store the data under
|
Args:
|
||||||
:param value: (any) The data to store
|
session_id (string): The ID of this session as returned from check_auth
|
||||||
|
key (string): The key to store the data under
|
||||||
|
value (any): The data to store
|
||||||
"""
|
"""
|
||||||
sess = self._get_session_info(session_id)
|
sess = self._get_session_info(session_id)
|
||||||
sess.setdefault('serverdict', {})[key] = value
|
sess.setdefault('serverdict', {})[key] = value
|
||||||
@ -190,9 +196,11 @@ class AuthHandler(BaseHandler):
|
|||||||
def get_session_data(self, session_id, key, default=None):
|
def get_session_data(self, session_id, key, default=None):
|
||||||
"""
|
"""
|
||||||
Retrieve data stored with set_session_data
|
Retrieve data stored with set_session_data
|
||||||
:param session_id: (string) The ID of this session as returned from check_auth
|
|
||||||
:param key: (string) The key to store the data under
|
Args:
|
||||||
:param default: (any) Value to return if the key has not been set
|
session_id (string): The ID of this session as returned from check_auth
|
||||||
|
key (string): The key to store the data under
|
||||||
|
default (any): Value to return if the key has not been set
|
||||||
"""
|
"""
|
||||||
sess = self._get_session_info(session_id)
|
sess = self._get_session_info(session_id)
|
||||||
return sess.setdefault('serverdict', {}).get(key, default)
|
return sess.setdefault('serverdict', {}).get(key, default)
|
||||||
|
@ -102,8 +102,7 @@ class FederationHandler(BaseHandler):
|
|||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def on_receive_pdu(self, origin, pdu, state=None,
|
def on_receive_pdu(self, origin, pdu, state=None, auth_chain=None):
|
||||||
auth_chain=None):
|
|
||||||
""" Called by the ReplicationLayer when we have a new pdu. We need to
|
""" Called by the ReplicationLayer when we have a new pdu. We need to
|
||||||
do auth checks and put it through the StateHandler.
|
do auth checks and put it through the StateHandler.
|
||||||
"""
|
"""
|
||||||
@ -174,11 +173,7 @@ class FederationHandler(BaseHandler):
|
|||||||
})
|
})
|
||||||
seen_ids.add(e.event_id)
|
seen_ids.add(e.event_id)
|
||||||
|
|
||||||
yield self._handle_new_events(
|
yield self._handle_new_events(origin, event_infos)
|
||||||
origin,
|
|
||||||
event_infos,
|
|
||||||
outliers=True
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
context, event_stream_id, max_stream_id = yield self._handle_new_event(
|
context, event_stream_id, max_stream_id = yield self._handle_new_event(
|
||||||
@ -761,6 +756,7 @@ class FederationHandler(BaseHandler):
|
|||||||
event = pdu
|
event = pdu
|
||||||
|
|
||||||
event.internal_metadata.outlier = True
|
event.internal_metadata.outlier = True
|
||||||
|
event.internal_metadata.invite_from_remote = True
|
||||||
|
|
||||||
event.signatures.update(
|
event.signatures.update(
|
||||||
compute_event_signature(
|
compute_event_signature(
|
||||||
@ -1069,9 +1065,6 @@ class FederationHandler(BaseHandler):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def _handle_new_event(self, origin, event, state=None, auth_events=None):
|
def _handle_new_event(self, origin, event, state=None, auth_events=None):
|
||||||
|
|
||||||
outlier = event.internal_metadata.is_outlier()
|
|
||||||
|
|
||||||
context = yield self._prep_event(
|
context = yield self._prep_event(
|
||||||
origin, event,
|
origin, event,
|
||||||
state=state,
|
state=state,
|
||||||
@ -1087,14 +1080,12 @@ class FederationHandler(BaseHandler):
|
|||||||
event_stream_id, max_stream_id = yield self.store.persist_event(
|
event_stream_id, max_stream_id = yield self.store.persist_event(
|
||||||
event,
|
event,
|
||||||
context=context,
|
context=context,
|
||||||
is_new_state=not outlier,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue((context, event_stream_id, max_stream_id))
|
defer.returnValue((context, event_stream_id, max_stream_id))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _handle_new_events(self, origin, event_infos, backfilled=False,
|
def _handle_new_events(self, origin, event_infos, backfilled=False):
|
||||||
outliers=False):
|
|
||||||
contexts = yield defer.gatherResults(
|
contexts = yield defer.gatherResults(
|
||||||
[
|
[
|
||||||
self._prep_event(
|
self._prep_event(
|
||||||
@ -1113,7 +1104,6 @@ class FederationHandler(BaseHandler):
|
|||||||
for ev_info, context in itertools.izip(event_infos, contexts)
|
for ev_info, context in itertools.izip(event_infos, contexts)
|
||||||
],
|
],
|
||||||
backfilled=backfilled,
|
backfilled=backfilled,
|
||||||
is_new_state=(not outliers and not backfilled),
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@ -1128,11 +1118,9 @@ class FederationHandler(BaseHandler):
|
|||||||
"""
|
"""
|
||||||
events_to_context = {}
|
events_to_context = {}
|
||||||
for e in itertools.chain(auth_events, state):
|
for e in itertools.chain(auth_events, state):
|
||||||
ctx = yield self.state_handler.compute_event_context(
|
|
||||||
e, outlier=True,
|
|
||||||
)
|
|
||||||
events_to_context[e.event_id] = ctx
|
|
||||||
e.internal_metadata.outlier = True
|
e.internal_metadata.outlier = True
|
||||||
|
ctx = yield self.state_handler.compute_event_context(e)
|
||||||
|
events_to_context[e.event_id] = ctx
|
||||||
|
|
||||||
event_map = {
|
event_map = {
|
||||||
e.event_id: e
|
e.event_id: e
|
||||||
@ -1176,16 +1164,14 @@ class FederationHandler(BaseHandler):
|
|||||||
(e, events_to_context[e.event_id])
|
(e, events_to_context[e.event_id])
|
||||||
for e in itertools.chain(auth_events, state)
|
for e in itertools.chain(auth_events, state)
|
||||||
],
|
],
|
||||||
is_new_state=False,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
new_event_context = yield self.state_handler.compute_event_context(
|
new_event_context = yield self.state_handler.compute_event_context(
|
||||||
event, old_state=state, outlier=False,
|
event, old_state=state
|
||||||
)
|
)
|
||||||
|
|
||||||
event_stream_id, max_stream_id = yield self.store.persist_event(
|
event_stream_id, max_stream_id = yield self.store.persist_event(
|
||||||
event, new_event_context,
|
event, new_event_context,
|
||||||
is_new_state=True,
|
|
||||||
current_state=state,
|
current_state=state,
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -1193,10 +1179,9 @@ class FederationHandler(BaseHandler):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def _prep_event(self, origin, event, state=None, auth_events=None):
|
def _prep_event(self, origin, event, state=None, auth_events=None):
|
||||||
outlier = event.internal_metadata.is_outlier()
|
|
||||||
|
|
||||||
context = yield self.state_handler.compute_event_context(
|
context = yield self.state_handler.compute_event_context(
|
||||||
event, old_state=state, outlier=outlier,
|
event, old_state=state,
|
||||||
)
|
)
|
||||||
|
|
||||||
if not auth_events:
|
if not auth_events:
|
||||||
@ -1718,13 +1703,15 @@ class FederationHandler(BaseHandler):
|
|||||||
def _check_signature(self, event, auth_events):
|
def _check_signature(self, event, auth_events):
|
||||||
"""
|
"""
|
||||||
Checks that the signature in the event is consistent with its invite.
|
Checks that the signature in the event is consistent with its invite.
|
||||||
:param event (Event): The m.room.member event to check
|
|
||||||
:param auth_events (dict<(event type, state_key), event>)
|
|
||||||
|
|
||||||
:raises
|
Args:
|
||||||
AuthError if signature didn't match any keys, or key has been
|
event (Event): The m.room.member event to check
|
||||||
|
auth_events (dict<(event type, state_key), event>):
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
AuthError: if signature didn't match any keys, or key has been
|
||||||
revoked,
|
revoked,
|
||||||
SynapseError if a transient error meant a key couldn't be checked
|
SynapseError: if a transient error meant a key couldn't be checked
|
||||||
for revocation.
|
for revocation.
|
||||||
"""
|
"""
|
||||||
signed = event.content["third_party_invite"]["signed"]
|
signed = event.content["third_party_invite"]["signed"]
|
||||||
@ -1766,12 +1753,13 @@ class FederationHandler(BaseHandler):
|
|||||||
"""
|
"""
|
||||||
Checks whether public_key has been revoked.
|
Checks whether public_key has been revoked.
|
||||||
|
|
||||||
:param public_key (str): base-64 encoded public key.
|
Args:
|
||||||
:param url (str): Key revocation URL.
|
public_key (str): base-64 encoded public key.
|
||||||
|
url (str): Key revocation URL.
|
||||||
|
|
||||||
:raises
|
Raises:
|
||||||
AuthError if they key has been revoked.
|
AuthError: if they key has been revoked.
|
||||||
SynapseError if a transient error meant a key couldn't be checked
|
SynapseError: if a transient error meant a key couldn't be checked
|
||||||
for revocation.
|
for revocation.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
|
@ -21,6 +21,7 @@ from synapse.streams.config import PaginationConfig
|
|||||||
from synapse.events.utils import serialize_event
|
from synapse.events.utils import serialize_event
|
||||||
from synapse.events.validator import EventValidator
|
from synapse.events.validator import EventValidator
|
||||||
from synapse.util import unwrapFirstError
|
from synapse.util import unwrapFirstError
|
||||||
|
from synapse.util.async import concurrently_execute
|
||||||
from synapse.util.caches.snapshot_cache import SnapshotCache
|
from synapse.util.caches.snapshot_cache import SnapshotCache
|
||||||
from synapse.types import UserID, RoomStreamToken, StreamToken
|
from synapse.types import UserID, RoomStreamToken, StreamToken
|
||||||
|
|
||||||
@ -556,14 +557,7 @@ class MessageHandler(BaseHandler):
|
|||||||
except:
|
except:
|
||||||
logger.exception("Failed to get snapshot")
|
logger.exception("Failed to get snapshot")
|
||||||
|
|
||||||
# Only do N rooms at once
|
yield concurrently_execute(handle_room, room_list, 10)
|
||||||
n = 5
|
|
||||||
d_list = [handle_room(e) for e in room_list]
|
|
||||||
for i in range(0, len(d_list), n):
|
|
||||||
yield defer.gatherResults(
|
|
||||||
d_list[i:i + n],
|
|
||||||
consumeErrors=True
|
|
||||||
).addErrback(unwrapFirstError)
|
|
||||||
|
|
||||||
account_data_events = []
|
account_data_events = []
|
||||||
for account_data_type, content in account_data.items():
|
for account_data_type, content in account_data.items():
|
||||||
|
@ -18,20 +18,17 @@ from twisted.internet import defer
|
|||||||
|
|
||||||
from ._base import BaseHandler
|
from ._base import BaseHandler
|
||||||
|
|
||||||
from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken, Requester
|
from synapse.types import UserID, RoomAlias, RoomID, RoomStreamToken
|
||||||
from synapse.api.constants import (
|
from synapse.api.constants import (
|
||||||
EventTypes, Membership, JoinRules, RoomCreationPreset,
|
EventTypes, JoinRules, RoomCreationPreset,
|
||||||
)
|
)
|
||||||
from synapse.api.errors import AuthError, StoreError, SynapseError, Codes
|
from synapse.api.errors import AuthError, StoreError, SynapseError
|
||||||
from synapse.util import stringutils, unwrapFirstError
|
from synapse.util import stringutils
|
||||||
|
from synapse.util.async import concurrently_execute
|
||||||
from synapse.util.logcontext import preserve_context_over_fn
|
from synapse.util.logcontext import preserve_context_over_fn
|
||||||
from synapse.util.caches.response_cache import ResponseCache
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
|
|
||||||
from signedjson.sign import verify_signed_json
|
|
||||||
from signedjson.key import decode_verify_key_bytes
|
|
||||||
|
|
||||||
from collections import OrderedDict
|
from collections import OrderedDict
|
||||||
from unpaddedbase64 import decode_base64
|
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import math
|
import math
|
||||||
@ -357,588 +354,6 @@ class RoomCreationHandler(BaseHandler):
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class RoomMemberHandler(BaseHandler):
|
|
||||||
# TODO(paul): This handler currently contains a messy conflation of
|
|
||||||
# low-level API that works on UserID objects and so on, and REST-level
|
|
||||||
# API that takes ID strings and returns pagination chunks. These concerns
|
|
||||||
# ought to be separated out a lot better.
|
|
||||||
|
|
||||||
def __init__(self, hs):
|
|
||||||
super(RoomMemberHandler, self).__init__(hs)
|
|
||||||
|
|
||||||
self.clock = hs.get_clock()
|
|
||||||
|
|
||||||
self.distributor = hs.get_distributor()
|
|
||||||
self.distributor.declare("user_joined_room")
|
|
||||||
self.distributor.declare("user_left_room")
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_room_members(self, room_id):
|
|
||||||
users = yield self.store.get_users_in_room(room_id)
|
|
||||||
|
|
||||||
defer.returnValue([UserID.from_string(u) for u in users])
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def fetch_room_distributions_into(self, room_id, localusers=None,
|
|
||||||
remotedomains=None, ignore_user=None):
|
|
||||||
"""Fetch the distribution of a room, adding elements to either
|
|
||||||
'localusers' or 'remotedomains', which should be a set() if supplied.
|
|
||||||
If ignore_user is set, ignore that user.
|
|
||||||
|
|
||||||
This function returns nothing; its result is performed by the
|
|
||||||
side-effect on the two passed sets. This allows easy accumulation of
|
|
||||||
member lists of multiple rooms at once if required.
|
|
||||||
"""
|
|
||||||
members = yield self.get_room_members(room_id)
|
|
||||||
for member in members:
|
|
||||||
if ignore_user is not None and member == ignore_user:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if self.hs.is_mine(member):
|
|
||||||
if localusers is not None:
|
|
||||||
localusers.add(member)
|
|
||||||
else:
|
|
||||||
if remotedomains is not None:
|
|
||||||
remotedomains.add(member.domain)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def update_membership(
|
|
||||||
self,
|
|
||||||
requester,
|
|
||||||
target,
|
|
||||||
room_id,
|
|
||||||
action,
|
|
||||||
txn_id=None,
|
|
||||||
remote_room_hosts=None,
|
|
||||||
third_party_signed=None,
|
|
||||||
ratelimit=True,
|
|
||||||
):
|
|
||||||
effective_membership_state = action
|
|
||||||
if action in ["kick", "unban"]:
|
|
||||||
effective_membership_state = "leave"
|
|
||||||
elif action == "forget":
|
|
||||||
effective_membership_state = "leave"
|
|
||||||
|
|
||||||
if third_party_signed is not None:
|
|
||||||
replication = self.hs.get_replication_layer()
|
|
||||||
yield replication.exchange_third_party_invite(
|
|
||||||
third_party_signed["sender"],
|
|
||||||
target.to_string(),
|
|
||||||
room_id,
|
|
||||||
third_party_signed,
|
|
||||||
)
|
|
||||||
|
|
||||||
msg_handler = self.hs.get_handlers().message_handler
|
|
||||||
|
|
||||||
content = {"membership": effective_membership_state}
|
|
||||||
if requester.is_guest:
|
|
||||||
content["kind"] = "guest"
|
|
||||||
|
|
||||||
event, context = yield msg_handler.create_event(
|
|
||||||
{
|
|
||||||
"type": EventTypes.Member,
|
|
||||||
"content": content,
|
|
||||||
"room_id": room_id,
|
|
||||||
"sender": requester.user.to_string(),
|
|
||||||
"state_key": target.to_string(),
|
|
||||||
|
|
||||||
# For backwards compatibility:
|
|
||||||
"membership": effective_membership_state,
|
|
||||||
},
|
|
||||||
token_id=requester.access_token_id,
|
|
||||||
txn_id=txn_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
old_state = context.current_state.get((EventTypes.Member, event.state_key))
|
|
||||||
old_membership = old_state.content.get("membership") if old_state else None
|
|
||||||
if action == "unban" and old_membership != "ban":
|
|
||||||
raise SynapseError(
|
|
||||||
403,
|
|
||||||
"Cannot unban user who was not banned (membership=%s)" % old_membership,
|
|
||||||
errcode=Codes.BAD_STATE
|
|
||||||
)
|
|
||||||
if old_membership == "ban" and action != "unban":
|
|
||||||
raise SynapseError(
|
|
||||||
403,
|
|
||||||
"Cannot %s user who was is banned" % (action,),
|
|
||||||
errcode=Codes.BAD_STATE
|
|
||||||
)
|
|
||||||
|
|
||||||
member_handler = self.hs.get_handlers().room_member_handler
|
|
||||||
yield member_handler.send_membership_event(
|
|
||||||
requester,
|
|
||||||
event,
|
|
||||||
context,
|
|
||||||
ratelimit=ratelimit,
|
|
||||||
remote_room_hosts=remote_room_hosts,
|
|
||||||
)
|
|
||||||
|
|
||||||
if action == "forget":
|
|
||||||
yield self.forget(requester.user, room_id)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def send_membership_event(
|
|
||||||
self,
|
|
||||||
requester,
|
|
||||||
event,
|
|
||||||
context,
|
|
||||||
remote_room_hosts=None,
|
|
||||||
ratelimit=True,
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Change the membership status of a user in a room.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
requester (Requester): The local user who requested the membership
|
|
||||||
event. If None, certain checks, like whether this homeserver can
|
|
||||||
act as the sender, will be skipped.
|
|
||||||
event (SynapseEvent): The membership event.
|
|
||||||
context: The context of the event.
|
|
||||||
is_guest (bool): Whether the sender is a guest.
|
|
||||||
room_hosts ([str]): Homeservers which are likely to already be in
|
|
||||||
the room, and could be danced with in order to join this
|
|
||||||
homeserver for the first time.
|
|
||||||
ratelimit (bool): Whether to rate limit this request.
|
|
||||||
Raises:
|
|
||||||
SynapseError if there was a problem changing the membership.
|
|
||||||
"""
|
|
||||||
remote_room_hosts = remote_room_hosts or []
|
|
||||||
|
|
||||||
target_user = UserID.from_string(event.state_key)
|
|
||||||
room_id = event.room_id
|
|
||||||
|
|
||||||
if requester is not None:
|
|
||||||
sender = UserID.from_string(event.sender)
|
|
||||||
assert sender == requester.user, (
|
|
||||||
"Sender (%s) must be same as requester (%s)" %
|
|
||||||
(sender, requester.user)
|
|
||||||
)
|
|
||||||
assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,)
|
|
||||||
else:
|
|
||||||
requester = Requester(target_user, None, False)
|
|
||||||
|
|
||||||
message_handler = self.hs.get_handlers().message_handler
|
|
||||||
prev_event = message_handler.deduplicate_state_event(event, context)
|
|
||||||
if prev_event is not None:
|
|
||||||
return
|
|
||||||
|
|
||||||
action = "send"
|
|
||||||
|
|
||||||
if event.membership == Membership.JOIN:
|
|
||||||
if requester.is_guest and not self._can_guest_join(context.current_state):
|
|
||||||
# This should be an auth check, but guests are a local concept,
|
|
||||||
# so don't really fit into the general auth process.
|
|
||||||
raise AuthError(403, "Guest access not allowed")
|
|
||||||
do_remote_join_dance, remote_room_hosts = self._should_do_dance(
|
|
||||||
context,
|
|
||||||
(self.get_inviter(event.state_key, context.current_state)),
|
|
||||||
remote_room_hosts,
|
|
||||||
)
|
|
||||||
if do_remote_join_dance:
|
|
||||||
action = "remote_join"
|
|
||||||
elif event.membership == Membership.LEAVE:
|
|
||||||
is_host_in_room = self.is_host_in_room(context.current_state)
|
|
||||||
|
|
||||||
if not is_host_in_room:
|
|
||||||
# perhaps we've been invited
|
|
||||||
inviter = self.get_inviter(target_user.to_string(), context.current_state)
|
|
||||||
if not inviter:
|
|
||||||
raise SynapseError(404, "Not a known room")
|
|
||||||
|
|
||||||
if self.hs.is_mine(inviter):
|
|
||||||
# the inviter was on our server, but has now left. Carry on
|
|
||||||
# with the normal rejection codepath.
|
|
||||||
#
|
|
||||||
# This is a bit of a hack, because the room might still be
|
|
||||||
# active on other servers.
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
# send the rejection to the inviter's HS.
|
|
||||||
remote_room_hosts = remote_room_hosts + [inviter.domain]
|
|
||||||
action = "remote_reject"
|
|
||||||
|
|
||||||
federation_handler = self.hs.get_handlers().federation_handler
|
|
||||||
|
|
||||||
if action == "remote_join":
|
|
||||||
if len(remote_room_hosts) == 0:
|
|
||||||
raise SynapseError(404, "No known servers")
|
|
||||||
|
|
||||||
# We don't do an auth check if we are doing an invite
|
|
||||||
# join dance for now, since we're kinda implicitly checking
|
|
||||||
# that we are allowed to join when we decide whether or not we
|
|
||||||
# need to do the invite/join dance.
|
|
||||||
yield federation_handler.do_invite_join(
|
|
||||||
remote_room_hosts,
|
|
||||||
event.room_id,
|
|
||||||
event.user_id,
|
|
||||||
event.content,
|
|
||||||
)
|
|
||||||
elif action == "remote_reject":
|
|
||||||
yield federation_handler.do_remotely_reject_invite(
|
|
||||||
remote_room_hosts,
|
|
||||||
room_id,
|
|
||||||
event.user_id
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
yield self.handle_new_client_event(
|
|
||||||
requester,
|
|
||||||
event,
|
|
||||||
context,
|
|
||||||
extra_users=[target_user],
|
|
||||||
ratelimit=ratelimit,
|
|
||||||
)
|
|
||||||
|
|
||||||
prev_member_event = context.current_state.get(
|
|
||||||
(EventTypes.Member, target_user.to_string()),
|
|
||||||
None
|
|
||||||
)
|
|
||||||
|
|
||||||
if event.membership == Membership.JOIN:
|
|
||||||
if not prev_member_event or prev_member_event.membership != Membership.JOIN:
|
|
||||||
# Only fire user_joined_room if the user has acutally joined the
|
|
||||||
# room. Don't bother if the user is just changing their profile
|
|
||||||
# info.
|
|
||||||
yield user_joined_room(self.distributor, target_user, room_id)
|
|
||||||
elif event.membership == Membership.LEAVE:
|
|
||||||
if prev_member_event and prev_member_event.membership == Membership.JOIN:
|
|
||||||
user_left_room(self.distributor, target_user, room_id)
|
|
||||||
|
|
||||||
def _can_guest_join(self, current_state):
|
|
||||||
"""
|
|
||||||
Returns whether a guest can join a room based on its current state.
|
|
||||||
"""
|
|
||||||
guest_access = current_state.get((EventTypes.GuestAccess, ""), None)
|
|
||||||
return (
|
|
||||||
guest_access
|
|
||||||
and guest_access.content
|
|
||||||
and "guest_access" in guest_access.content
|
|
||||||
and guest_access.content["guest_access"] == "can_join"
|
|
||||||
)
|
|
||||||
|
|
||||||
def _should_do_dance(self, context, inviter, room_hosts=None):
|
|
||||||
# TODO: Shouldn't this be remote_room_host?
|
|
||||||
room_hosts = room_hosts or []
|
|
||||||
|
|
||||||
is_host_in_room = self.is_host_in_room(context.current_state)
|
|
||||||
if is_host_in_room:
|
|
||||||
return False, room_hosts
|
|
||||||
|
|
||||||
if inviter and not self.hs.is_mine(inviter):
|
|
||||||
room_hosts.append(inviter.domain)
|
|
||||||
|
|
||||||
return True, room_hosts
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def lookup_room_alias(self, room_alias):
|
|
||||||
"""
|
|
||||||
Get the room ID associated with a room alias.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
room_alias (RoomAlias): The alias to look up.
|
|
||||||
Returns:
|
|
||||||
A tuple of:
|
|
||||||
The room ID as a RoomID object.
|
|
||||||
Hosts likely to be participating in the room ([str]).
|
|
||||||
Raises:
|
|
||||||
SynapseError if room alias could not be found.
|
|
||||||
"""
|
|
||||||
directory_handler = self.hs.get_handlers().directory_handler
|
|
||||||
mapping = yield directory_handler.get_association(room_alias)
|
|
||||||
|
|
||||||
if not mapping:
|
|
||||||
raise SynapseError(404, "No such room alias")
|
|
||||||
|
|
||||||
room_id = mapping["room_id"]
|
|
||||||
servers = mapping["servers"]
|
|
||||||
|
|
||||||
defer.returnValue((RoomID.from_string(room_id), servers))
|
|
||||||
|
|
||||||
def get_inviter(self, user_id, current_state):
|
|
||||||
prev_state = current_state.get((EventTypes.Member, user_id))
|
|
||||||
if prev_state and prev_state.membership == Membership.INVITE:
|
|
||||||
return UserID.from_string(prev_state.user_id)
|
|
||||||
return None
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def get_joined_rooms_for_user(self, user):
|
|
||||||
"""Returns a list of roomids that the user has any of the given
|
|
||||||
membership states in."""
|
|
||||||
|
|
||||||
rooms = yield self.store.get_rooms_for_user(
|
|
||||||
user.to_string(),
|
|
||||||
)
|
|
||||||
|
|
||||||
# For some reason the list of events contains duplicates
|
|
||||||
# TODO(paul): work out why because I really don't think it should
|
|
||||||
room_ids = set(r.room_id for r in rooms)
|
|
||||||
|
|
||||||
defer.returnValue(room_ids)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def do_3pid_invite(
|
|
||||||
self,
|
|
||||||
room_id,
|
|
||||||
inviter,
|
|
||||||
medium,
|
|
||||||
address,
|
|
||||||
id_server,
|
|
||||||
requester,
|
|
||||||
txn_id
|
|
||||||
):
|
|
||||||
invitee = yield self._lookup_3pid(
|
|
||||||
id_server, medium, address
|
|
||||||
)
|
|
||||||
|
|
||||||
if invitee:
|
|
||||||
handler = self.hs.get_handlers().room_member_handler
|
|
||||||
yield handler.update_membership(
|
|
||||||
requester,
|
|
||||||
UserID.from_string(invitee),
|
|
||||||
room_id,
|
|
||||||
"invite",
|
|
||||||
txn_id=txn_id,
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
yield self._make_and_store_3pid_invite(
|
|
||||||
requester,
|
|
||||||
id_server,
|
|
||||||
medium,
|
|
||||||
address,
|
|
||||||
room_id,
|
|
||||||
inviter,
|
|
||||||
txn_id=txn_id
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _lookup_3pid(self, id_server, medium, address):
|
|
||||||
"""Looks up a 3pid in the passed identity server.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
id_server (str): The server name (including port, if required)
|
|
||||||
of the identity server to use.
|
|
||||||
medium (str): The type of the third party identifier (e.g. "email").
|
|
||||||
address (str): The third party identifier (e.g. "foo@example.com").
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
(str) the matrix ID of the 3pid, or None if it is not recognized.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
data = yield self.hs.get_simple_http_client().get_json(
|
|
||||||
"%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server,),
|
|
||||||
{
|
|
||||||
"medium": medium,
|
|
||||||
"address": address,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
if "mxid" in data:
|
|
||||||
if "signatures" not in data:
|
|
||||||
raise AuthError(401, "No signatures on 3pid binding")
|
|
||||||
self.verify_any_signature(data, id_server)
|
|
||||||
defer.returnValue(data["mxid"])
|
|
||||||
|
|
||||||
except IOError as e:
|
|
||||||
logger.warn("Error from identity server lookup: %s" % (e,))
|
|
||||||
defer.returnValue(None)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def verify_any_signature(self, data, server_hostname):
|
|
||||||
if server_hostname not in data["signatures"]:
|
|
||||||
raise AuthError(401, "No signature from server %s" % (server_hostname,))
|
|
||||||
for key_name, signature in data["signatures"][server_hostname].items():
|
|
||||||
key_data = yield self.hs.get_simple_http_client().get_json(
|
|
||||||
"%s%s/_matrix/identity/api/v1/pubkey/%s" %
|
|
||||||
(id_server_scheme, server_hostname, key_name,),
|
|
||||||
)
|
|
||||||
if "public_key" not in key_data:
|
|
||||||
raise AuthError(401, "No public key named %s from %s" %
|
|
||||||
(key_name, server_hostname,))
|
|
||||||
verify_signed_json(
|
|
||||||
data,
|
|
||||||
server_hostname,
|
|
||||||
decode_verify_key_bytes(key_name, decode_base64(key_data["public_key"]))
|
|
||||||
)
|
|
||||||
return
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _make_and_store_3pid_invite(
|
|
||||||
self,
|
|
||||||
requester,
|
|
||||||
id_server,
|
|
||||||
medium,
|
|
||||||
address,
|
|
||||||
room_id,
|
|
||||||
user,
|
|
||||||
txn_id
|
|
||||||
):
|
|
||||||
room_state = yield self.hs.get_state_handler().get_current_state(room_id)
|
|
||||||
|
|
||||||
inviter_display_name = ""
|
|
||||||
inviter_avatar_url = ""
|
|
||||||
member_event = room_state.get((EventTypes.Member, user.to_string()))
|
|
||||||
if member_event:
|
|
||||||
inviter_display_name = member_event.content.get("displayname", "")
|
|
||||||
inviter_avatar_url = member_event.content.get("avatar_url", "")
|
|
||||||
|
|
||||||
canonical_room_alias = ""
|
|
||||||
canonical_alias_event = room_state.get((EventTypes.CanonicalAlias, ""))
|
|
||||||
if canonical_alias_event:
|
|
||||||
canonical_room_alias = canonical_alias_event.content.get("alias", "")
|
|
||||||
|
|
||||||
room_name = ""
|
|
||||||
room_name_event = room_state.get((EventTypes.Name, ""))
|
|
||||||
if room_name_event:
|
|
||||||
room_name = room_name_event.content.get("name", "")
|
|
||||||
|
|
||||||
room_join_rules = ""
|
|
||||||
join_rules_event = room_state.get((EventTypes.JoinRules, ""))
|
|
||||||
if join_rules_event:
|
|
||||||
room_join_rules = join_rules_event.content.get("join_rule", "")
|
|
||||||
|
|
||||||
room_avatar_url = ""
|
|
||||||
room_avatar_event = room_state.get((EventTypes.RoomAvatar, ""))
|
|
||||||
if room_avatar_event:
|
|
||||||
room_avatar_url = room_avatar_event.content.get("url", "")
|
|
||||||
|
|
||||||
token, public_keys, fallback_public_key, display_name = (
|
|
||||||
yield self._ask_id_server_for_third_party_invite(
|
|
||||||
id_server=id_server,
|
|
||||||
medium=medium,
|
|
||||||
address=address,
|
|
||||||
room_id=room_id,
|
|
||||||
inviter_user_id=user.to_string(),
|
|
||||||
room_alias=canonical_room_alias,
|
|
||||||
room_avatar_url=room_avatar_url,
|
|
||||||
room_join_rules=room_join_rules,
|
|
||||||
room_name=room_name,
|
|
||||||
inviter_display_name=inviter_display_name,
|
|
||||||
inviter_avatar_url=inviter_avatar_url
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
msg_handler = self.hs.get_handlers().message_handler
|
|
||||||
yield msg_handler.create_and_send_nonmember_event(
|
|
||||||
requester,
|
|
||||||
{
|
|
||||||
"type": EventTypes.ThirdPartyInvite,
|
|
||||||
"content": {
|
|
||||||
"display_name": display_name,
|
|
||||||
"public_keys": public_keys,
|
|
||||||
|
|
||||||
# For backwards compatibility:
|
|
||||||
"key_validity_url": fallback_public_key["key_validity_url"],
|
|
||||||
"public_key": fallback_public_key["public_key"],
|
|
||||||
},
|
|
||||||
"room_id": room_id,
|
|
||||||
"sender": user.to_string(),
|
|
||||||
"state_key": token,
|
|
||||||
},
|
|
||||||
txn_id=txn_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
|
||||||
def _ask_id_server_for_third_party_invite(
|
|
||||||
self,
|
|
||||||
id_server,
|
|
||||||
medium,
|
|
||||||
address,
|
|
||||||
room_id,
|
|
||||||
inviter_user_id,
|
|
||||||
room_alias,
|
|
||||||
room_avatar_url,
|
|
||||||
room_join_rules,
|
|
||||||
room_name,
|
|
||||||
inviter_display_name,
|
|
||||||
inviter_avatar_url
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
Asks an identity server for a third party invite.
|
|
||||||
|
|
||||||
:param id_server (str): hostname + optional port for the identity server.
|
|
||||||
:param medium (str): The literal string "email".
|
|
||||||
:param address (str): The third party address being invited.
|
|
||||||
:param room_id (str): The ID of the room to which the user is invited.
|
|
||||||
:param inviter_user_id (str): The user ID of the inviter.
|
|
||||||
:param room_alias (str): An alias for the room, for cosmetic
|
|
||||||
notifications.
|
|
||||||
:param room_avatar_url (str): The URL of the room's avatar, for cosmetic
|
|
||||||
notifications.
|
|
||||||
:param room_join_rules (str): The join rules of the email
|
|
||||||
(e.g. "public").
|
|
||||||
:param room_name (str): The m.room.name of the room.
|
|
||||||
:param inviter_display_name (str): The current display name of the
|
|
||||||
inviter.
|
|
||||||
:param inviter_avatar_url (str): The URL of the inviter's avatar.
|
|
||||||
|
|
||||||
:return: A deferred tuple containing:
|
|
||||||
token (str): The token which must be signed to prove authenticity.
|
|
||||||
public_keys ([{"public_key": str, "key_validity_url": str}]):
|
|
||||||
public_key is a base64-encoded ed25519 public key.
|
|
||||||
fallback_public_key: One element from public_keys.
|
|
||||||
display_name (str): A user-friendly name to represent the invited
|
|
||||||
user.
|
|
||||||
"""
|
|
||||||
|
|
||||||
is_url = "%s%s/_matrix/identity/api/v1/store-invite" % (
|
|
||||||
id_server_scheme, id_server,
|
|
||||||
)
|
|
||||||
|
|
||||||
invite_config = {
|
|
||||||
"medium": medium,
|
|
||||||
"address": address,
|
|
||||||
"room_id": room_id,
|
|
||||||
"room_alias": room_alias,
|
|
||||||
"room_avatar_url": room_avatar_url,
|
|
||||||
"room_join_rules": room_join_rules,
|
|
||||||
"room_name": room_name,
|
|
||||||
"sender": inviter_user_id,
|
|
||||||
"sender_display_name": inviter_display_name,
|
|
||||||
"sender_avatar_url": inviter_avatar_url,
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.hs.config.invite_3pid_guest:
|
|
||||||
registration_handler = self.hs.get_handlers().registration_handler
|
|
||||||
guest_access_token = yield registration_handler.guest_access_token_for(
|
|
||||||
medium=medium,
|
|
||||||
address=address,
|
|
||||||
inviter_user_id=inviter_user_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
guest_user_info = yield self.hs.get_auth().get_user_by_access_token(
|
|
||||||
guest_access_token
|
|
||||||
)
|
|
||||||
|
|
||||||
invite_config.update({
|
|
||||||
"guest_access_token": guest_access_token,
|
|
||||||
"guest_user_id": guest_user_info["user"].to_string(),
|
|
||||||
})
|
|
||||||
|
|
||||||
data = yield self.hs.get_simple_http_client().post_urlencoded_get_json(
|
|
||||||
is_url,
|
|
||||||
invite_config
|
|
||||||
)
|
|
||||||
# TODO: Check for success
|
|
||||||
token = data["token"]
|
|
||||||
public_keys = data.get("public_keys", [])
|
|
||||||
if "public_key" in data:
|
|
||||||
fallback_public_key = {
|
|
||||||
"public_key": data["public_key"],
|
|
||||||
"key_validity_url": "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % (
|
|
||||||
id_server_scheme, id_server,
|
|
||||||
),
|
|
||||||
}
|
|
||||||
else:
|
|
||||||
fallback_public_key = public_keys[0]
|
|
||||||
|
|
||||||
if not public_keys:
|
|
||||||
public_keys.append(fallback_public_key)
|
|
||||||
display_name = data["display_name"]
|
|
||||||
defer.returnValue((token, public_keys, fallback_public_key, display_name))
|
|
||||||
|
|
||||||
def forget(self, user, room_id):
|
|
||||||
return self.store.forget(user.to_string(), room_id)
|
|
||||||
|
|
||||||
|
|
||||||
class RoomListHandler(BaseHandler):
|
class RoomListHandler(BaseHandler):
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(RoomListHandler, self).__init__(hs)
|
super(RoomListHandler, self).__init__(hs)
|
||||||
@ -954,6 +369,8 @@ class RoomListHandler(BaseHandler):
|
|||||||
def _get_public_room_list(self):
|
def _get_public_room_list(self):
|
||||||
room_ids = yield self.store.get_public_room_ids()
|
room_ids = yield self.store.get_public_room_ids()
|
||||||
|
|
||||||
|
results = []
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def handle_room(room_id):
|
def handle_room(room_id):
|
||||||
aliases = yield self.store.get_aliases_for_room(room_id)
|
aliases = yield self.store.get_aliases_for_room(room_id)
|
||||||
@ -1014,18 +431,12 @@ class RoomListHandler(BaseHandler):
|
|||||||
joined_users = yield self.store.get_users_in_room(room_id)
|
joined_users = yield self.store.get_users_in_room(room_id)
|
||||||
result["num_joined_members"] = len(joined_users)
|
result["num_joined_members"] = len(joined_users)
|
||||||
|
|
||||||
defer.returnValue(result)
|
results.append(result)
|
||||||
|
|
||||||
result = []
|
yield concurrently_execute(handle_room, room_ids, 10)
|
||||||
for chunk in (room_ids[i:i + 10] for i in xrange(0, len(room_ids), 10)):
|
|
||||||
chunk_result = yield defer.gatherResults([
|
|
||||||
handle_room(room_id)
|
|
||||||
for room_id in chunk
|
|
||||||
], consumeErrors=True).addErrback(unwrapFirstError)
|
|
||||||
result.extend(v for v in chunk_result if v)
|
|
||||||
|
|
||||||
# FIXME (erikj): START is no longer a valid value
|
# FIXME (erikj): START is no longer a valid value
|
||||||
defer.returnValue({"start": "START", "end": "END", "chunk": result})
|
defer.returnValue({"start": "START", "end": "END", "chunk": results})
|
||||||
|
|
||||||
|
|
||||||
class RoomContextHandler(BaseHandler):
|
class RoomContextHandler(BaseHandler):
|
||||||
|
646
synapse/handlers/room_member.py
Normal file
646
synapse/handlers/room_member.py
Normal file
@ -0,0 +1,646 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
# Copyright 2016 OpenMarket Ltd
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
from twisted.internet import defer
|
||||||
|
|
||||||
|
from ._base import BaseHandler
|
||||||
|
|
||||||
|
from synapse.types import UserID, RoomID, Requester
|
||||||
|
from synapse.api.constants import (
|
||||||
|
EventTypes, Membership,
|
||||||
|
)
|
||||||
|
from synapse.api.errors import AuthError, SynapseError, Codes
|
||||||
|
from synapse.util.logcontext import preserve_context_over_fn
|
||||||
|
|
||||||
|
from signedjson.sign import verify_signed_json
|
||||||
|
from signedjson.key import decode_verify_key_bytes
|
||||||
|
|
||||||
|
from unpaddedbase64 import decode_base64
|
||||||
|
|
||||||
|
import logging
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
id_server_scheme = "https://"
|
||||||
|
|
||||||
|
|
||||||
|
def user_left_room(distributor, user, room_id):
|
||||||
|
return preserve_context_over_fn(
|
||||||
|
distributor.fire,
|
||||||
|
"user_left_room", user=user, room_id=room_id
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def user_joined_room(distributor, user, room_id):
|
||||||
|
return preserve_context_over_fn(
|
||||||
|
distributor.fire,
|
||||||
|
"user_joined_room", user=user, room_id=room_id
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
class RoomMemberHandler(BaseHandler):
|
||||||
|
# TODO(paul): This handler currently contains a messy conflation of
|
||||||
|
# low-level API that works on UserID objects and so on, and REST-level
|
||||||
|
# API that takes ID strings and returns pagination chunks. These concerns
|
||||||
|
# ought to be separated out a lot better.
|
||||||
|
|
||||||
|
def __init__(self, hs):
|
||||||
|
super(RoomMemberHandler, self).__init__(hs)
|
||||||
|
|
||||||
|
self.clock = hs.get_clock()
|
||||||
|
|
||||||
|
self.distributor = hs.get_distributor()
|
||||||
|
self.distributor.declare("user_joined_room")
|
||||||
|
self.distributor.declare("user_left_room")
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_room_members(self, room_id):
|
||||||
|
users = yield self.store.get_users_in_room(room_id)
|
||||||
|
|
||||||
|
defer.returnValue([UserID.from_string(u) for u in users])
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def fetch_room_distributions_into(self, room_id, localusers=None,
|
||||||
|
remotedomains=None, ignore_user=None):
|
||||||
|
"""Fetch the distribution of a room, adding elements to either
|
||||||
|
'localusers' or 'remotedomains', which should be a set() if supplied.
|
||||||
|
If ignore_user is set, ignore that user.
|
||||||
|
|
||||||
|
This function returns nothing; its result is performed by the
|
||||||
|
side-effect on the two passed sets. This allows easy accumulation of
|
||||||
|
member lists of multiple rooms at once if required.
|
||||||
|
"""
|
||||||
|
members = yield self.get_room_members(room_id)
|
||||||
|
for member in members:
|
||||||
|
if ignore_user is not None and member == ignore_user:
|
||||||
|
continue
|
||||||
|
|
||||||
|
if self.hs.is_mine(member):
|
||||||
|
if localusers is not None:
|
||||||
|
localusers.add(member)
|
||||||
|
else:
|
||||||
|
if remotedomains is not None:
|
||||||
|
remotedomains.add(member.domain)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def update_membership(
|
||||||
|
self,
|
||||||
|
requester,
|
||||||
|
target,
|
||||||
|
room_id,
|
||||||
|
action,
|
||||||
|
txn_id=None,
|
||||||
|
remote_room_hosts=None,
|
||||||
|
third_party_signed=None,
|
||||||
|
ratelimit=True,
|
||||||
|
):
|
||||||
|
effective_membership_state = action
|
||||||
|
if action in ["kick", "unban"]:
|
||||||
|
effective_membership_state = "leave"
|
||||||
|
|
||||||
|
if third_party_signed is not None:
|
||||||
|
replication = self.hs.get_replication_layer()
|
||||||
|
yield replication.exchange_third_party_invite(
|
||||||
|
third_party_signed["sender"],
|
||||||
|
target.to_string(),
|
||||||
|
room_id,
|
||||||
|
third_party_signed,
|
||||||
|
)
|
||||||
|
|
||||||
|
msg_handler = self.hs.get_handlers().message_handler
|
||||||
|
|
||||||
|
content = {"membership": effective_membership_state}
|
||||||
|
if requester.is_guest:
|
||||||
|
content["kind"] = "guest"
|
||||||
|
|
||||||
|
event, context = yield msg_handler.create_event(
|
||||||
|
{
|
||||||
|
"type": EventTypes.Member,
|
||||||
|
"content": content,
|
||||||
|
"room_id": room_id,
|
||||||
|
"sender": requester.user.to_string(),
|
||||||
|
"state_key": target.to_string(),
|
||||||
|
|
||||||
|
# For backwards compatibility:
|
||||||
|
"membership": effective_membership_state,
|
||||||
|
},
|
||||||
|
token_id=requester.access_token_id,
|
||||||
|
txn_id=txn_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
old_state = context.current_state.get((EventTypes.Member, event.state_key))
|
||||||
|
old_membership = old_state.content.get("membership") if old_state else None
|
||||||
|
if action == "unban" and old_membership != "ban":
|
||||||
|
raise SynapseError(
|
||||||
|
403,
|
||||||
|
"Cannot unban user who was not banned (membership=%s)" % old_membership,
|
||||||
|
errcode=Codes.BAD_STATE
|
||||||
|
)
|
||||||
|
if old_membership == "ban" and action != "unban":
|
||||||
|
raise SynapseError(
|
||||||
|
403,
|
||||||
|
"Cannot %s user who was is banned" % (action,),
|
||||||
|
errcode=Codes.BAD_STATE
|
||||||
|
)
|
||||||
|
|
||||||
|
member_handler = self.hs.get_handlers().room_member_handler
|
||||||
|
yield member_handler.send_membership_event(
|
||||||
|
requester,
|
||||||
|
event,
|
||||||
|
context,
|
||||||
|
ratelimit=ratelimit,
|
||||||
|
remote_room_hosts=remote_room_hosts,
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def send_membership_event(
|
||||||
|
self,
|
||||||
|
requester,
|
||||||
|
event,
|
||||||
|
context,
|
||||||
|
remote_room_hosts=None,
|
||||||
|
ratelimit=True,
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Change the membership status of a user in a room.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
requester (Requester): The local user who requested the membership
|
||||||
|
event. If None, certain checks, like whether this homeserver can
|
||||||
|
act as the sender, will be skipped.
|
||||||
|
event (SynapseEvent): The membership event.
|
||||||
|
context: The context of the event.
|
||||||
|
is_guest (bool): Whether the sender is a guest.
|
||||||
|
room_hosts ([str]): Homeservers which are likely to already be in
|
||||||
|
the room, and could be danced with in order to join this
|
||||||
|
homeserver for the first time.
|
||||||
|
ratelimit (bool): Whether to rate limit this request.
|
||||||
|
Raises:
|
||||||
|
SynapseError if there was a problem changing the membership.
|
||||||
|
"""
|
||||||
|
remote_room_hosts = remote_room_hosts or []
|
||||||
|
|
||||||
|
target_user = UserID.from_string(event.state_key)
|
||||||
|
room_id = event.room_id
|
||||||
|
|
||||||
|
if requester is not None:
|
||||||
|
sender = UserID.from_string(event.sender)
|
||||||
|
assert sender == requester.user, (
|
||||||
|
"Sender (%s) must be same as requester (%s)" %
|
||||||
|
(sender, requester.user)
|
||||||
|
)
|
||||||
|
assert self.hs.is_mine(sender), "Sender must be our own: %s" % (sender,)
|
||||||
|
else:
|
||||||
|
requester = Requester(target_user, None, False)
|
||||||
|
|
||||||
|
message_handler = self.hs.get_handlers().message_handler
|
||||||
|
prev_event = message_handler.deduplicate_state_event(event, context)
|
||||||
|
if prev_event is not None:
|
||||||
|
return
|
||||||
|
|
||||||
|
action = "send"
|
||||||
|
|
||||||
|
if event.membership == Membership.JOIN:
|
||||||
|
if requester.is_guest and not self._can_guest_join(context.current_state):
|
||||||
|
# This should be an auth check, but guests are a local concept,
|
||||||
|
# so don't really fit into the general auth process.
|
||||||
|
raise AuthError(403, "Guest access not allowed")
|
||||||
|
do_remote_join_dance, remote_room_hosts = self._should_do_dance(
|
||||||
|
context,
|
||||||
|
(self.get_inviter(event.state_key, context.current_state)),
|
||||||
|
remote_room_hosts,
|
||||||
|
)
|
||||||
|
if do_remote_join_dance:
|
||||||
|
action = "remote_join"
|
||||||
|
elif event.membership == Membership.LEAVE:
|
||||||
|
is_host_in_room = self.is_host_in_room(context.current_state)
|
||||||
|
|
||||||
|
if not is_host_in_room:
|
||||||
|
# perhaps we've been invited
|
||||||
|
inviter = self.get_inviter(
|
||||||
|
target_user.to_string(), context.current_state
|
||||||
|
)
|
||||||
|
if not inviter:
|
||||||
|
raise SynapseError(404, "Not a known room")
|
||||||
|
|
||||||
|
if self.hs.is_mine(inviter):
|
||||||
|
# the inviter was on our server, but has now left. Carry on
|
||||||
|
# with the normal rejection codepath.
|
||||||
|
#
|
||||||
|
# This is a bit of a hack, because the room might still be
|
||||||
|
# active on other servers.
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
# send the rejection to the inviter's HS.
|
||||||
|
remote_room_hosts = remote_room_hosts + [inviter.domain]
|
||||||
|
action = "remote_reject"
|
||||||
|
|
||||||
|
federation_handler = self.hs.get_handlers().federation_handler
|
||||||
|
|
||||||
|
if action == "remote_join":
|
||||||
|
if len(remote_room_hosts) == 0:
|
||||||
|
raise SynapseError(404, "No known servers")
|
||||||
|
|
||||||
|
# We don't do an auth check if we are doing an invite
|
||||||
|
# join dance for now, since we're kinda implicitly checking
|
||||||
|
# that we are allowed to join when we decide whether or not we
|
||||||
|
# need to do the invite/join dance.
|
||||||
|
yield federation_handler.do_invite_join(
|
||||||
|
remote_room_hosts,
|
||||||
|
event.room_id,
|
||||||
|
event.user_id,
|
||||||
|
event.content,
|
||||||
|
)
|
||||||
|
elif action == "remote_reject":
|
||||||
|
yield federation_handler.do_remotely_reject_invite(
|
||||||
|
remote_room_hosts,
|
||||||
|
room_id,
|
||||||
|
event.user_id
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
yield self.handle_new_client_event(
|
||||||
|
requester,
|
||||||
|
event,
|
||||||
|
context,
|
||||||
|
extra_users=[target_user],
|
||||||
|
ratelimit=ratelimit,
|
||||||
|
)
|
||||||
|
|
||||||
|
prev_member_event = context.current_state.get(
|
||||||
|
(EventTypes.Member, target_user.to_string()),
|
||||||
|
None
|
||||||
|
)
|
||||||
|
|
||||||
|
if event.membership == Membership.JOIN:
|
||||||
|
if not prev_member_event or prev_member_event.membership != Membership.JOIN:
|
||||||
|
# Only fire user_joined_room if the user has acutally joined the
|
||||||
|
# room. Don't bother if the user is just changing their profile
|
||||||
|
# info.
|
||||||
|
yield user_joined_room(self.distributor, target_user, room_id)
|
||||||
|
elif event.membership == Membership.LEAVE:
|
||||||
|
if prev_member_event and prev_member_event.membership == Membership.JOIN:
|
||||||
|
user_left_room(self.distributor, target_user, room_id)
|
||||||
|
|
||||||
|
def _can_guest_join(self, current_state):
|
||||||
|
"""
|
||||||
|
Returns whether a guest can join a room based on its current state.
|
||||||
|
"""
|
||||||
|
guest_access = current_state.get((EventTypes.GuestAccess, ""), None)
|
||||||
|
return (
|
||||||
|
guest_access
|
||||||
|
and guest_access.content
|
||||||
|
and "guest_access" in guest_access.content
|
||||||
|
and guest_access.content["guest_access"] == "can_join"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _should_do_dance(self, context, inviter, room_hosts=None):
|
||||||
|
# TODO: Shouldn't this be remote_room_host?
|
||||||
|
room_hosts = room_hosts or []
|
||||||
|
|
||||||
|
is_host_in_room = self.is_host_in_room(context.current_state)
|
||||||
|
if is_host_in_room:
|
||||||
|
return False, room_hosts
|
||||||
|
|
||||||
|
if inviter and not self.hs.is_mine(inviter):
|
||||||
|
room_hosts.append(inviter.domain)
|
||||||
|
|
||||||
|
return True, room_hosts
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def lookup_room_alias(self, room_alias):
|
||||||
|
"""
|
||||||
|
Get the room ID associated with a room alias.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
room_alias (RoomAlias): The alias to look up.
|
||||||
|
Returns:
|
||||||
|
A tuple of:
|
||||||
|
The room ID as a RoomID object.
|
||||||
|
Hosts likely to be participating in the room ([str]).
|
||||||
|
Raises:
|
||||||
|
SynapseError if room alias could not be found.
|
||||||
|
"""
|
||||||
|
directory_handler = self.hs.get_handlers().directory_handler
|
||||||
|
mapping = yield directory_handler.get_association(room_alias)
|
||||||
|
|
||||||
|
if not mapping:
|
||||||
|
raise SynapseError(404, "No such room alias")
|
||||||
|
|
||||||
|
room_id = mapping["room_id"]
|
||||||
|
servers = mapping["servers"]
|
||||||
|
|
||||||
|
defer.returnValue((RoomID.from_string(room_id), servers))
|
||||||
|
|
||||||
|
def get_inviter(self, user_id, current_state):
|
||||||
|
prev_state = current_state.get((EventTypes.Member, user_id))
|
||||||
|
if prev_state and prev_state.membership == Membership.INVITE:
|
||||||
|
return UserID.from_string(prev_state.user_id)
|
||||||
|
return None
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def get_joined_rooms_for_user(self, user):
|
||||||
|
"""Returns a list of roomids that the user has any of the given
|
||||||
|
membership states in."""
|
||||||
|
|
||||||
|
rooms = yield self.store.get_rooms_for_user(
|
||||||
|
user.to_string(),
|
||||||
|
)
|
||||||
|
|
||||||
|
# For some reason the list of events contains duplicates
|
||||||
|
# TODO(paul): work out why because I really don't think it should
|
||||||
|
room_ids = set(r.room_id for r in rooms)
|
||||||
|
|
||||||
|
defer.returnValue(room_ids)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_3pid_invite(
|
||||||
|
self,
|
||||||
|
room_id,
|
||||||
|
inviter,
|
||||||
|
medium,
|
||||||
|
address,
|
||||||
|
id_server,
|
||||||
|
requester,
|
||||||
|
txn_id
|
||||||
|
):
|
||||||
|
invitee = yield self._lookup_3pid(
|
||||||
|
id_server, medium, address
|
||||||
|
)
|
||||||
|
|
||||||
|
if invitee:
|
||||||
|
handler = self.hs.get_handlers().room_member_handler
|
||||||
|
yield handler.update_membership(
|
||||||
|
requester,
|
||||||
|
UserID.from_string(invitee),
|
||||||
|
room_id,
|
||||||
|
"invite",
|
||||||
|
txn_id=txn_id,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
yield self._make_and_store_3pid_invite(
|
||||||
|
requester,
|
||||||
|
id_server,
|
||||||
|
medium,
|
||||||
|
address,
|
||||||
|
room_id,
|
||||||
|
inviter,
|
||||||
|
txn_id=txn_id
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _lookup_3pid(self, id_server, medium, address):
|
||||||
|
"""Looks up a 3pid in the passed identity server.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
id_server (str): The server name (including port, if required)
|
||||||
|
of the identity server to use.
|
||||||
|
medium (str): The type of the third party identifier (e.g. "email").
|
||||||
|
address (str): The third party identifier (e.g. "foo@example.com").
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str: the matrix ID of the 3pid, or None if it is not recognized.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
data = yield self.hs.get_simple_http_client().get_json(
|
||||||
|
"%s%s/_matrix/identity/api/v1/lookup" % (id_server_scheme, id_server,),
|
||||||
|
{
|
||||||
|
"medium": medium,
|
||||||
|
"address": address,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
if "mxid" in data:
|
||||||
|
if "signatures" not in data:
|
||||||
|
raise AuthError(401, "No signatures on 3pid binding")
|
||||||
|
self.verify_any_signature(data, id_server)
|
||||||
|
defer.returnValue(data["mxid"])
|
||||||
|
|
||||||
|
except IOError as e:
|
||||||
|
logger.warn("Error from identity server lookup: %s" % (e,))
|
||||||
|
defer.returnValue(None)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def verify_any_signature(self, data, server_hostname):
|
||||||
|
if server_hostname not in data["signatures"]:
|
||||||
|
raise AuthError(401, "No signature from server %s" % (server_hostname,))
|
||||||
|
for key_name, signature in data["signatures"][server_hostname].items():
|
||||||
|
key_data = yield self.hs.get_simple_http_client().get_json(
|
||||||
|
"%s%s/_matrix/identity/api/v1/pubkey/%s" %
|
||||||
|
(id_server_scheme, server_hostname, key_name,),
|
||||||
|
)
|
||||||
|
if "public_key" not in key_data:
|
||||||
|
raise AuthError(401, "No public key named %s from %s" %
|
||||||
|
(key_name, server_hostname,))
|
||||||
|
verify_signed_json(
|
||||||
|
data,
|
||||||
|
server_hostname,
|
||||||
|
decode_verify_key_bytes(key_name, decode_base64(key_data["public_key"]))
|
||||||
|
)
|
||||||
|
return
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _make_and_store_3pid_invite(
|
||||||
|
self,
|
||||||
|
requester,
|
||||||
|
id_server,
|
||||||
|
medium,
|
||||||
|
address,
|
||||||
|
room_id,
|
||||||
|
user,
|
||||||
|
txn_id
|
||||||
|
):
|
||||||
|
room_state = yield self.hs.get_state_handler().get_current_state(room_id)
|
||||||
|
|
||||||
|
inviter_display_name = ""
|
||||||
|
inviter_avatar_url = ""
|
||||||
|
member_event = room_state.get((EventTypes.Member, user.to_string()))
|
||||||
|
if member_event:
|
||||||
|
inviter_display_name = member_event.content.get("displayname", "")
|
||||||
|
inviter_avatar_url = member_event.content.get("avatar_url", "")
|
||||||
|
|
||||||
|
canonical_room_alias = ""
|
||||||
|
canonical_alias_event = room_state.get((EventTypes.CanonicalAlias, ""))
|
||||||
|
if canonical_alias_event:
|
||||||
|
canonical_room_alias = canonical_alias_event.content.get("alias", "")
|
||||||
|
|
||||||
|
room_name = ""
|
||||||
|
room_name_event = room_state.get((EventTypes.Name, ""))
|
||||||
|
if room_name_event:
|
||||||
|
room_name = room_name_event.content.get("name", "")
|
||||||
|
|
||||||
|
room_join_rules = ""
|
||||||
|
join_rules_event = room_state.get((EventTypes.JoinRules, ""))
|
||||||
|
if join_rules_event:
|
||||||
|
room_join_rules = join_rules_event.content.get("join_rule", "")
|
||||||
|
|
||||||
|
room_avatar_url = ""
|
||||||
|
room_avatar_event = room_state.get((EventTypes.RoomAvatar, ""))
|
||||||
|
if room_avatar_event:
|
||||||
|
room_avatar_url = room_avatar_event.content.get("url", "")
|
||||||
|
|
||||||
|
token, public_keys, fallback_public_key, display_name = (
|
||||||
|
yield self._ask_id_server_for_third_party_invite(
|
||||||
|
id_server=id_server,
|
||||||
|
medium=medium,
|
||||||
|
address=address,
|
||||||
|
room_id=room_id,
|
||||||
|
inviter_user_id=user.to_string(),
|
||||||
|
room_alias=canonical_room_alias,
|
||||||
|
room_avatar_url=room_avatar_url,
|
||||||
|
room_join_rules=room_join_rules,
|
||||||
|
room_name=room_name,
|
||||||
|
inviter_display_name=inviter_display_name,
|
||||||
|
inviter_avatar_url=inviter_avatar_url
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
msg_handler = self.hs.get_handlers().message_handler
|
||||||
|
yield msg_handler.create_and_send_nonmember_event(
|
||||||
|
requester,
|
||||||
|
{
|
||||||
|
"type": EventTypes.ThirdPartyInvite,
|
||||||
|
"content": {
|
||||||
|
"display_name": display_name,
|
||||||
|
"public_keys": public_keys,
|
||||||
|
|
||||||
|
# For backwards compatibility:
|
||||||
|
"key_validity_url": fallback_public_key["key_validity_url"],
|
||||||
|
"public_key": fallback_public_key["public_key"],
|
||||||
|
},
|
||||||
|
"room_id": room_id,
|
||||||
|
"sender": user.to_string(),
|
||||||
|
"state_key": token,
|
||||||
|
},
|
||||||
|
txn_id=txn_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _ask_id_server_for_third_party_invite(
|
||||||
|
self,
|
||||||
|
id_server,
|
||||||
|
medium,
|
||||||
|
address,
|
||||||
|
room_id,
|
||||||
|
inviter_user_id,
|
||||||
|
room_alias,
|
||||||
|
room_avatar_url,
|
||||||
|
room_join_rules,
|
||||||
|
room_name,
|
||||||
|
inviter_display_name,
|
||||||
|
inviter_avatar_url
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
Asks an identity server for a third party invite.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
id_server (str): hostname + optional port for the identity server.
|
||||||
|
medium (str): The literal string "email".
|
||||||
|
address (str): The third party address being invited.
|
||||||
|
room_id (str): The ID of the room to which the user is invited.
|
||||||
|
inviter_user_id (str): The user ID of the inviter.
|
||||||
|
room_alias (str): An alias for the room, for cosmetic notifications.
|
||||||
|
room_avatar_url (str): The URL of the room's avatar, for cosmetic
|
||||||
|
notifications.
|
||||||
|
room_join_rules (str): The join rules of the email (e.g. "public").
|
||||||
|
room_name (str): The m.room.name of the room.
|
||||||
|
inviter_display_name (str): The current display name of the
|
||||||
|
inviter.
|
||||||
|
inviter_avatar_url (str): The URL of the inviter's avatar.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A deferred tuple containing:
|
||||||
|
token (str): The token which must be signed to prove authenticity.
|
||||||
|
public_keys ([{"public_key": str, "key_validity_url": str}]):
|
||||||
|
public_key is a base64-encoded ed25519 public key.
|
||||||
|
fallback_public_key: One element from public_keys.
|
||||||
|
display_name (str): A user-friendly name to represent the invited
|
||||||
|
user.
|
||||||
|
"""
|
||||||
|
|
||||||
|
is_url = "%s%s/_matrix/identity/api/v1/store-invite" % (
|
||||||
|
id_server_scheme, id_server,
|
||||||
|
)
|
||||||
|
|
||||||
|
invite_config = {
|
||||||
|
"medium": medium,
|
||||||
|
"address": address,
|
||||||
|
"room_id": room_id,
|
||||||
|
"room_alias": room_alias,
|
||||||
|
"room_avatar_url": room_avatar_url,
|
||||||
|
"room_join_rules": room_join_rules,
|
||||||
|
"room_name": room_name,
|
||||||
|
"sender": inviter_user_id,
|
||||||
|
"sender_display_name": inviter_display_name,
|
||||||
|
"sender_avatar_url": inviter_avatar_url,
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.hs.config.invite_3pid_guest:
|
||||||
|
registration_handler = self.hs.get_handlers().registration_handler
|
||||||
|
guest_access_token = yield registration_handler.guest_access_token_for(
|
||||||
|
medium=medium,
|
||||||
|
address=address,
|
||||||
|
inviter_user_id=inviter_user_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
guest_user_info = yield self.hs.get_auth().get_user_by_access_token(
|
||||||
|
guest_access_token
|
||||||
|
)
|
||||||
|
|
||||||
|
invite_config.update({
|
||||||
|
"guest_access_token": guest_access_token,
|
||||||
|
"guest_user_id": guest_user_info["user"].to_string(),
|
||||||
|
})
|
||||||
|
|
||||||
|
data = yield self.hs.get_simple_http_client().post_urlencoded_get_json(
|
||||||
|
is_url,
|
||||||
|
invite_config
|
||||||
|
)
|
||||||
|
# TODO: Check for success
|
||||||
|
token = data["token"]
|
||||||
|
public_keys = data.get("public_keys", [])
|
||||||
|
if "public_key" in data:
|
||||||
|
fallback_public_key = {
|
||||||
|
"public_key": data["public_key"],
|
||||||
|
"key_validity_url": "%s%s/_matrix/identity/api/v1/pubkey/isvalid" % (
|
||||||
|
id_server_scheme, id_server,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
fallback_public_key = public_keys[0]
|
||||||
|
|
||||||
|
if not public_keys:
|
||||||
|
public_keys.append(fallback_public_key)
|
||||||
|
display_name = data["display_name"]
|
||||||
|
defer.returnValue((token, public_keys, fallback_public_key, display_name))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def forget(self, user, room_id):
|
||||||
|
user_id = user.to_string()
|
||||||
|
|
||||||
|
member = yield self.state_handler.get_current_state(
|
||||||
|
room_id=room_id,
|
||||||
|
event_type=EventTypes.Member,
|
||||||
|
state_key=user_id
|
||||||
|
)
|
||||||
|
membership = member.membership if member else None
|
||||||
|
|
||||||
|
if membership is not None and membership != Membership.LEAVE:
|
||||||
|
raise SynapseError(400, "User %s in room %s" % (
|
||||||
|
user_id, room_id
|
||||||
|
))
|
||||||
|
|
||||||
|
if membership:
|
||||||
|
yield self.store.forget(user_id, room_id)
|
@ -17,8 +17,8 @@ from ._base import BaseHandler
|
|||||||
|
|
||||||
from synapse.streams.config import PaginationConfig
|
from synapse.streams.config import PaginationConfig
|
||||||
from synapse.api.constants import Membership, EventTypes
|
from synapse.api.constants import Membership, EventTypes
|
||||||
from synapse.util import unwrapFirstError
|
from synapse.util.async import concurrently_execute
|
||||||
from synapse.util.logcontext import LoggingContext, preserve_fn
|
from synapse.util.logcontext import LoggingContext
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
from synapse.util.caches.response_cache import ResponseCache
|
from synapse.util.caches.response_cache import ResponseCache
|
||||||
from synapse.push.clientformat import format_push_rules_for_user
|
from synapse.push.clientformat import format_push_rules_for_user
|
||||||
@ -250,58 +250,50 @@ class SyncHandler(BaseHandler):
|
|||||||
joined = []
|
joined = []
|
||||||
invited = []
|
invited = []
|
||||||
archived = []
|
archived = []
|
||||||
deferreds = []
|
|
||||||
|
|
||||||
room_list_chunks = [room_list[i:i + 10] for i in xrange(0, len(room_list), 10)]
|
user_id = sync_config.user.to_string()
|
||||||
for room_list_chunk in room_list_chunks:
|
|
||||||
for event in room_list_chunk:
|
|
||||||
if event.membership == Membership.JOIN:
|
|
||||||
room_sync_deferred = preserve_fn(
|
|
||||||
self.full_state_sync_for_joined_room
|
|
||||||
)(
|
|
||||||
room_id=event.room_id,
|
|
||||||
sync_config=sync_config,
|
|
||||||
now_token=now_token,
|
|
||||||
timeline_since_token=timeline_since_token,
|
|
||||||
ephemeral_by_room=ephemeral_by_room,
|
|
||||||
tags_by_room=tags_by_room,
|
|
||||||
account_data_by_room=account_data_by_room,
|
|
||||||
)
|
|
||||||
room_sync_deferred.addCallback(joined.append)
|
|
||||||
deferreds.append(room_sync_deferred)
|
|
||||||
elif event.membership == Membership.INVITE:
|
|
||||||
invite = yield self.store.get_event(event.event_id)
|
|
||||||
invited.append(InvitedSyncResult(
|
|
||||||
room_id=event.room_id,
|
|
||||||
invite=invite,
|
|
||||||
))
|
|
||||||
elif event.membership in (Membership.LEAVE, Membership.BAN):
|
|
||||||
# Always send down rooms we were banned or kicked from.
|
|
||||||
if not sync_config.filter_collection.include_leave:
|
|
||||||
if event.membership == Membership.LEAVE:
|
|
||||||
if sync_config.user.to_string() == event.sender:
|
|
||||||
continue
|
|
||||||
|
|
||||||
leave_token = now_token.copy_and_replace(
|
@defer.inlineCallbacks
|
||||||
"room_key", "s%d" % (event.stream_ordering,)
|
def _generate_room_entry(event):
|
||||||
)
|
if event.membership == Membership.JOIN:
|
||||||
room_sync_deferred = preserve_fn(
|
room_result = yield self.full_state_sync_for_joined_room(
|
||||||
self.full_state_sync_for_archived_room
|
room_id=event.room_id,
|
||||||
)(
|
sync_config=sync_config,
|
||||||
sync_config=sync_config,
|
now_token=now_token,
|
||||||
room_id=event.room_id,
|
timeline_since_token=timeline_since_token,
|
||||||
leave_event_id=event.event_id,
|
ephemeral_by_room=ephemeral_by_room,
|
||||||
leave_token=leave_token,
|
tags_by_room=tags_by_room,
|
||||||
timeline_since_token=timeline_since_token,
|
account_data_by_room=account_data_by_room,
|
||||||
tags_by_room=tags_by_room,
|
)
|
||||||
account_data_by_room=account_data_by_room,
|
joined.append(room_result)
|
||||||
)
|
elif event.membership == Membership.INVITE:
|
||||||
room_sync_deferred.addCallback(archived.append)
|
invite = yield self.store.get_event(event.event_id)
|
||||||
deferreds.append(room_sync_deferred)
|
invited.append(InvitedSyncResult(
|
||||||
|
room_id=event.room_id,
|
||||||
|
invite=invite,
|
||||||
|
))
|
||||||
|
elif event.membership in (Membership.LEAVE, Membership.BAN):
|
||||||
|
# Always send down rooms we were banned or kicked from.
|
||||||
|
if not sync_config.filter_collection.include_leave:
|
||||||
|
if event.membership == Membership.LEAVE:
|
||||||
|
if user_id == event.sender:
|
||||||
|
return
|
||||||
|
|
||||||
yield defer.gatherResults(
|
leave_token = now_token.copy_and_replace(
|
||||||
deferreds, consumeErrors=True
|
"room_key", "s%d" % (event.stream_ordering,)
|
||||||
).addErrback(unwrapFirstError)
|
)
|
||||||
|
room_result = yield self.full_state_sync_for_archived_room(
|
||||||
|
sync_config=sync_config,
|
||||||
|
room_id=event.room_id,
|
||||||
|
leave_event_id=event.event_id,
|
||||||
|
leave_token=leave_token,
|
||||||
|
timeline_since_token=timeline_since_token,
|
||||||
|
tags_by_room=tags_by_room,
|
||||||
|
account_data_by_room=account_data_by_room,
|
||||||
|
)
|
||||||
|
archived.append(room_result)
|
||||||
|
|
||||||
|
yield concurrently_execute(_generate_room_entry, room_list, 10)
|
||||||
|
|
||||||
account_data_for_user = sync_config.filter_collection.filter_account_data(
|
account_data_for_user = sync_config.filter_collection.filter_account_data(
|
||||||
self.account_data_for_user(account_data)
|
self.account_data_for_user(account_data)
|
||||||
@ -671,7 +663,8 @@ class SyncHandler(BaseHandler):
|
|||||||
def load_filtered_recents(self, room_id, sync_config, now_token,
|
def load_filtered_recents(self, room_id, sync_config, now_token,
|
||||||
since_token=None, recents=None, newly_joined_room=False):
|
since_token=None, recents=None, newly_joined_room=False):
|
||||||
"""
|
"""
|
||||||
:returns a Deferred TimelineBatch
|
Returns:
|
||||||
|
a Deferred TimelineBatch
|
||||||
"""
|
"""
|
||||||
with Measure(self.clock, "load_filtered_recents"):
|
with Measure(self.clock, "load_filtered_recents"):
|
||||||
filtering_factor = 2
|
filtering_factor = 2
|
||||||
@ -838,8 +831,11 @@ class SyncHandler(BaseHandler):
|
|||||||
"""
|
"""
|
||||||
Get the room state after the given event
|
Get the room state after the given event
|
||||||
|
|
||||||
:param synapse.events.EventBase event: event of interest
|
Args:
|
||||||
:return: A Deferred map from ((type, state_key)->Event)
|
event(synapse.events.EventBase): event of interest
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A Deferred map from ((type, state_key)->Event)
|
||||||
"""
|
"""
|
||||||
state = yield self.store.get_state_for_event(event.event_id)
|
state = yield self.store.get_state_for_event(event.event_id)
|
||||||
if event.is_state():
|
if event.is_state():
|
||||||
@ -850,9 +846,13 @@ class SyncHandler(BaseHandler):
|
|||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_state_at(self, room_id, stream_position):
|
def get_state_at(self, room_id, stream_position):
|
||||||
""" Get the room state at a particular stream position
|
""" Get the room state at a particular stream position
|
||||||
:param str room_id: room for which to get state
|
|
||||||
:param StreamToken stream_position: point at which to get state
|
Args:
|
||||||
:returns: A Deferred map from ((type, state_key)->Event)
|
room_id(str): room for which to get state
|
||||||
|
stream_position(StreamToken): point at which to get state
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A Deferred map from ((type, state_key)->Event)
|
||||||
"""
|
"""
|
||||||
last_events, token = yield self.store.get_recent_events_for_room(
|
last_events, token = yield self.store.get_recent_events_for_room(
|
||||||
room_id, end_token=stream_position.room_key, limit=1,
|
room_id, end_token=stream_position.room_key, limit=1,
|
||||||
@ -873,15 +873,18 @@ class SyncHandler(BaseHandler):
|
|||||||
""" Works out the differnce in state between the start of the timeline
|
""" Works out the differnce in state between the start of the timeline
|
||||||
and the previous sync.
|
and the previous sync.
|
||||||
|
|
||||||
:param str room_id
|
Args:
|
||||||
:param TimelineBatch batch: The timeline batch for the room that will
|
room_id(str):
|
||||||
be sent to the user.
|
batch(synapse.handlers.sync.TimelineBatch): The timeline batch for
|
||||||
:param sync_config
|
the room that will be sent to the user.
|
||||||
:param str since_token: Token of the end of the previous batch. May be None.
|
sync_config(synapse.handlers.sync.SyncConfig):
|
||||||
:param str now_token: Token of the end of the current batch.
|
since_token(str|None): Token of the end of the previous batch. May
|
||||||
:param bool full_state: Whether to force returning the full state.
|
be None.
|
||||||
|
now_token(str): Token of the end of the current batch.
|
||||||
|
full_state(bool): Whether to force returning the full state.
|
||||||
|
|
||||||
:returns A new event dictionary
|
Returns:
|
||||||
|
A deferred new event dictionary
|
||||||
"""
|
"""
|
||||||
# TODO(mjark) Check if the state events were received by the server
|
# TODO(mjark) Check if the state events were received by the server
|
||||||
# after the previous sync, since we need to include those state
|
# after the previous sync, since we need to include those state
|
||||||
@ -953,11 +956,13 @@ class SyncHandler(BaseHandler):
|
|||||||
Check if the user has just joined the given room (so should
|
Check if the user has just joined the given room (so should
|
||||||
be given the full state)
|
be given the full state)
|
||||||
|
|
||||||
:param sync_config:
|
Args:
|
||||||
:param dict[(str,str), synapse.events.FrozenEvent] state_delta: the
|
sync_config(synapse.handlers.sync.SyncConfig):
|
||||||
difference in state since the last sync
|
state_delta(dict[(str,str), synapse.events.FrozenEvent]): the
|
||||||
|
difference in state since the last sync
|
||||||
|
|
||||||
:returns A deferred Tuple (state_delta, limited)
|
Returns:
|
||||||
|
A deferred Tuple (state_delta, limited)
|
||||||
"""
|
"""
|
||||||
join_event = state_delta.get((
|
join_event = state_delta.get((
|
||||||
EventTypes.Member, sync_config.user.to_string()), None)
|
EventTypes.Member, sync_config.user.to_string()), None)
|
||||||
|
@ -26,14 +26,19 @@ logger = logging.getLogger(__name__)
|
|||||||
def parse_integer(request, name, default=None, required=False):
|
def parse_integer(request, name, default=None, required=False):
|
||||||
"""Parse an integer parameter from the request string
|
"""Parse an integer parameter from the request string
|
||||||
|
|
||||||
:param request: the twisted HTTP request.
|
Args:
|
||||||
:param name (str): the name of the query parameter.
|
request: the twisted HTTP request.
|
||||||
:param default: value to use if the parameter is absent, defaults to None.
|
name (str): the name of the query parameter.
|
||||||
:param required (bool): whether to raise a 400 SynapseError if the
|
default (int|None): value to use if the parameter is absent, defaults
|
||||||
parameter is absent, defaults to False.
|
to None.
|
||||||
:return: An int value or the default.
|
required (bool): whether to raise a 400 SynapseError if the
|
||||||
:raises
|
parameter is absent, defaults to False.
|
||||||
SynapseError if the parameter is absent and required, or if the
|
|
||||||
|
Returns:
|
||||||
|
int|None: An int value or the default.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
SynapseError: if the parameter is absent and required, or if the
|
||||||
parameter is present and not an integer.
|
parameter is present and not an integer.
|
||||||
"""
|
"""
|
||||||
if name in request.args:
|
if name in request.args:
|
||||||
@ -53,14 +58,19 @@ def parse_integer(request, name, default=None, required=False):
|
|||||||
def parse_boolean(request, name, default=None, required=False):
|
def parse_boolean(request, name, default=None, required=False):
|
||||||
"""Parse a boolean parameter from the request query string
|
"""Parse a boolean parameter from the request query string
|
||||||
|
|
||||||
:param request: the twisted HTTP request.
|
Args:
|
||||||
:param name (str): the name of the query parameter.
|
request: the twisted HTTP request.
|
||||||
:param default: value to use if the parameter is absent, defaults to None.
|
name (str): the name of the query parameter.
|
||||||
:param required (bool): whether to raise a 400 SynapseError if the
|
default (bool|None): value to use if the parameter is absent, defaults
|
||||||
parameter is absent, defaults to False.
|
to None.
|
||||||
:return: A bool value or the default.
|
required (bool): whether to raise a 400 SynapseError if the
|
||||||
:raises
|
parameter is absent, defaults to False.
|
||||||
SynapseError if the parameter is absent and required, or if the
|
|
||||||
|
Returns:
|
||||||
|
bool|None: A bool value or the default.
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
SynapseError: if the parameter is absent and required, or if the
|
||||||
parameter is present and not one of "true" or "false".
|
parameter is present and not one of "true" or "false".
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -88,15 +98,20 @@ def parse_string(request, name, default=None, required=False,
|
|||||||
allowed_values=None, param_type="string"):
|
allowed_values=None, param_type="string"):
|
||||||
"""Parse a string parameter from the request query string.
|
"""Parse a string parameter from the request query string.
|
||||||
|
|
||||||
:param request: the twisted HTTP request.
|
Args:
|
||||||
:param name (str): the name of the query parameter.
|
request: the twisted HTTP request.
|
||||||
:param default: value to use if the parameter is absent, defaults to None.
|
name (str): the name of the query parameter.
|
||||||
:param required (bool): whether to raise a 400 SynapseError if the
|
default (str|None): value to use if the parameter is absent, defaults
|
||||||
parameter is absent, defaults to False.
|
to None.
|
||||||
:param allowed_values (list): List of allowed values for the string,
|
required (bool): whether to raise a 400 SynapseError if the
|
||||||
or None if any value is allowed, defaults to None
|
parameter is absent, defaults to False.
|
||||||
:return: A string value or the default.
|
allowed_values (list[str]): List of allowed values for the string,
|
||||||
:raises
|
or None if any value is allowed, defaults to None
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
str|None: A string value or the default.
|
||||||
|
|
||||||
|
Raises:
|
||||||
SynapseError if the parameter is absent and required, or if the
|
SynapseError if the parameter is absent and required, or if the
|
||||||
parameter is present, must be one of a list of allowed values and
|
parameter is present, must be one of a list of allowed values and
|
||||||
is not one of those allowed values.
|
is not one of those allowed values.
|
||||||
@ -122,9 +137,13 @@ def parse_string(request, name, default=None, required=False,
|
|||||||
def parse_json_value_from_request(request):
|
def parse_json_value_from_request(request):
|
||||||
"""Parse a JSON value from the body of a twisted HTTP request.
|
"""Parse a JSON value from the body of a twisted HTTP request.
|
||||||
|
|
||||||
:param request: the twisted HTTP request.
|
Args:
|
||||||
:returns: The JSON value.
|
request: the twisted HTTP request.
|
||||||
:raises
|
|
||||||
|
Returns:
|
||||||
|
The JSON value.
|
||||||
|
|
||||||
|
Raises:
|
||||||
SynapseError if the request body couldn't be decoded as JSON.
|
SynapseError if the request body couldn't be decoded as JSON.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
@ -143,8 +162,10 @@ def parse_json_value_from_request(request):
|
|||||||
def parse_json_object_from_request(request):
|
def parse_json_object_from_request(request):
|
||||||
"""Parse a JSON object from the body of a twisted HTTP request.
|
"""Parse a JSON object from the body of a twisted HTTP request.
|
||||||
|
|
||||||
:param request: the twisted HTTP request.
|
Args:
|
||||||
:raises
|
request: the twisted HTTP request.
|
||||||
|
|
||||||
|
Raises:
|
||||||
SynapseError if the request body couldn't be decoded as JSON or
|
SynapseError if the request body couldn't be decoded as JSON or
|
||||||
if it wasn't a JSON object.
|
if it wasn't a JSON object.
|
||||||
"""
|
"""
|
||||||
|
@ -503,13 +503,14 @@ class Notifier(object):
|
|||||||
def wait_for_replication(self, callback, timeout):
|
def wait_for_replication(self, callback, timeout):
|
||||||
"""Wait for an event to happen.
|
"""Wait for an event to happen.
|
||||||
|
|
||||||
:param callback:
|
Args:
|
||||||
Gets called whenever an event happens. If this returns a truthy
|
callback: Gets called whenever an event happens. If this returns a
|
||||||
value then ``wait_for_replication`` returns, otherwise it waits
|
truthy value then ``wait_for_replication`` returns, otherwise
|
||||||
for another event.
|
it waits for another event.
|
||||||
:param int timeout:
|
timeout: How many milliseconds to wait for callback return a truthy
|
||||||
How many milliseconds to wait for callback return a truthy value.
|
value.
|
||||||
:returns:
|
|
||||||
|
Returns:
|
||||||
A deferred that resolves with the value returned by the callback.
|
A deferred that resolves with the value returned by the callback.
|
||||||
"""
|
"""
|
||||||
listener = _NotificationListener(None)
|
listener = _NotificationListener(None)
|
||||||
|
@ -19,9 +19,11 @@ import copy
|
|||||||
def list_with_base_rules(rawrules):
|
def list_with_base_rules(rawrules):
|
||||||
"""Combine the list of rules set by the user with the default push rules
|
"""Combine the list of rules set by the user with the default push rules
|
||||||
|
|
||||||
:param list rawrules: The rules the user has modified or set.
|
Args:
|
||||||
:returns: A new list with the rules set by the user combined with the
|
rawrules(list): The rules the user has modified or set.
|
||||||
defaults.
|
|
||||||
|
Returns:
|
||||||
|
A new list with the rules set by the user combined with the defaults.
|
||||||
"""
|
"""
|
||||||
ruleslist = []
|
ruleslist = []
|
||||||
|
|
||||||
|
@ -133,8 +133,9 @@ class PushRuleEvaluator:
|
|||||||
enabled = self.enabled_map.get(r['rule_id'], None)
|
enabled = self.enabled_map.get(r['rule_id'], None)
|
||||||
if enabled is not None and not enabled:
|
if enabled is not None and not enabled:
|
||||||
continue
|
continue
|
||||||
|
elif enabled is None and not r.get("enabled", True):
|
||||||
if not r.get("enabled", True):
|
# if no override, check enabled on the rule itself
|
||||||
|
# (may have come from a base rule)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
conditions = r['conditions']
|
conditions = r['conditions']
|
||||||
|
@ -37,6 +37,7 @@ REQUIREMENTS = {
|
|||||||
"pysaml2>=3.0.0,<4.0.0": ["saml2>=3.0.0,<4.0.0"],
|
"pysaml2>=3.0.0,<4.0.0": ["saml2>=3.0.0,<4.0.0"],
|
||||||
"pymacaroons-pynacl": ["pymacaroons"],
|
"pymacaroons-pynacl": ["pymacaroons"],
|
||||||
"lxml>=3.6.0": ["lxml"],
|
"lxml>=3.6.0": ["lxml"],
|
||||||
|
"pyjwt": ["jwt"],
|
||||||
}
|
}
|
||||||
CONDITIONAL_REQUIREMENTS = {
|
CONDITIONAL_REQUIREMENTS = {
|
||||||
"web_client": {
|
"web_client": {
|
||||||
|
@ -38,6 +38,7 @@ STREAM_NAMES = (
|
|||||||
("backfill",),
|
("backfill",),
|
||||||
("push_rules",),
|
("push_rules",),
|
||||||
("pushers",),
|
("pushers",),
|
||||||
|
("state",),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -123,6 +124,7 @@ class ReplicationResource(Resource):
|
|||||||
backfill_token = yield self.store.get_current_backfill_token()
|
backfill_token = yield self.store.get_current_backfill_token()
|
||||||
push_rules_token, room_stream_token = self.store.get_push_rules_stream_token()
|
push_rules_token, room_stream_token = self.store.get_push_rules_stream_token()
|
||||||
pushers_token = self.store.get_pushers_stream_token()
|
pushers_token = self.store.get_pushers_stream_token()
|
||||||
|
state_token = self.store.get_state_stream_token()
|
||||||
|
|
||||||
defer.returnValue(_ReplicationToken(
|
defer.returnValue(_ReplicationToken(
|
||||||
room_stream_token,
|
room_stream_token,
|
||||||
@ -133,6 +135,7 @@ class ReplicationResource(Resource):
|
|||||||
backfill_token,
|
backfill_token,
|
||||||
push_rules_token,
|
push_rules_token,
|
||||||
pushers_token,
|
pushers_token,
|
||||||
|
state_token,
|
||||||
))
|
))
|
||||||
|
|
||||||
@request_handler
|
@request_handler
|
||||||
@ -156,6 +159,7 @@ class ReplicationResource(Resource):
|
|||||||
yield self.receipts(writer, current_token, limit)
|
yield self.receipts(writer, current_token, limit)
|
||||||
yield self.push_rules(writer, current_token, limit)
|
yield self.push_rules(writer, current_token, limit)
|
||||||
yield self.pushers(writer, current_token, limit)
|
yield self.pushers(writer, current_token, limit)
|
||||||
|
yield self.state(writer, current_token, limit)
|
||||||
self.streams(writer, current_token)
|
self.streams(writer, current_token)
|
||||||
|
|
||||||
logger.info("Replicated %d rows", writer.total)
|
logger.info("Replicated %d rows", writer.total)
|
||||||
@ -200,16 +204,27 @@ class ReplicationResource(Resource):
|
|||||||
request_events = current_token.events
|
request_events = current_token.events
|
||||||
if request_backfill is None:
|
if request_backfill is None:
|
||||||
request_backfill = current_token.backfill
|
request_backfill = current_token.backfill
|
||||||
events_rows, backfill_rows = yield self.store.get_all_new_events(
|
res = yield self.store.get_all_new_events(
|
||||||
request_backfill, request_events,
|
request_backfill, request_events,
|
||||||
current_token.backfill, current_token.events,
|
current_token.backfill, current_token.events,
|
||||||
limit
|
limit
|
||||||
)
|
)
|
||||||
|
writer.write_header_and_rows("events", res.new_forward_events, (
|
||||||
|
"position", "internal", "json", "state_group"
|
||||||
|
))
|
||||||
|
writer.write_header_and_rows("backfill", res.new_backfill_events, (
|
||||||
|
"position", "internal", "json", "state_group"
|
||||||
|
))
|
||||||
writer.write_header_and_rows(
|
writer.write_header_and_rows(
|
||||||
"events", events_rows, ("position", "internal", "json")
|
"forward_ex_outliers", res.forward_ex_outliers,
|
||||||
|
("position", "event_id", "state_group")
|
||||||
)
|
)
|
||||||
writer.write_header_and_rows(
|
writer.write_header_and_rows(
|
||||||
"backfill", backfill_rows, ("position", "internal", "json")
|
"backward_ex_outliers", res.backward_ex_outliers,
|
||||||
|
("position", "event_id", "state_group")
|
||||||
|
)
|
||||||
|
writer.write_header_and_rows(
|
||||||
|
"state_resets", res.state_resets, ("position",)
|
||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@ -320,6 +335,24 @@ class ReplicationResource(Resource):
|
|||||||
"position", "user_id", "app_id", "pushkey"
|
"position", "user_id", "app_id", "pushkey"
|
||||||
))
|
))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def state(self, writer, current_token, limit):
|
||||||
|
current_position = current_token.state
|
||||||
|
|
||||||
|
state = parse_integer(writer.request, "state")
|
||||||
|
if state is not None:
|
||||||
|
state_groups, state_group_state = (
|
||||||
|
yield self.store.get_all_new_state_groups(
|
||||||
|
state, current_position, limit
|
||||||
|
)
|
||||||
|
)
|
||||||
|
writer.write_header_and_rows("state_groups", state_groups, (
|
||||||
|
"position", "room_id", "event_id"
|
||||||
|
))
|
||||||
|
writer.write_header_and_rows("state_group_state", state_group_state, (
|
||||||
|
"position", "type", "state_key", "event_id"
|
||||||
|
))
|
||||||
|
|
||||||
|
|
||||||
class _Writer(object):
|
class _Writer(object):
|
||||||
"""Writes the streams as a JSON object as the response to the request"""
|
"""Writes the streams as a JSON object as the response to the request"""
|
||||||
@ -350,7 +383,7 @@ class _Writer(object):
|
|||||||
|
|
||||||
class _ReplicationToken(collections.namedtuple("_ReplicationToken", (
|
class _ReplicationToken(collections.namedtuple("_ReplicationToken", (
|
||||||
"events", "presence", "typing", "receipts", "account_data", "backfill",
|
"events", "presence", "typing", "receipts", "account_data", "backfill",
|
||||||
"push_rules", "pushers"
|
"push_rules", "pushers", "state"
|
||||||
))):
|
))):
|
||||||
__slots__ = []
|
__slots__ = []
|
||||||
|
|
||||||
|
@ -33,6 +33,9 @@ from saml2.client import Saml2Client
|
|||||||
|
|
||||||
import xml.etree.ElementTree as ET
|
import xml.etree.ElementTree as ET
|
||||||
|
|
||||||
|
import jwt
|
||||||
|
from jwt.exceptions import InvalidTokenError
|
||||||
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -43,12 +46,16 @@ class LoginRestServlet(ClientV1RestServlet):
|
|||||||
SAML2_TYPE = "m.login.saml2"
|
SAML2_TYPE = "m.login.saml2"
|
||||||
CAS_TYPE = "m.login.cas"
|
CAS_TYPE = "m.login.cas"
|
||||||
TOKEN_TYPE = "m.login.token"
|
TOKEN_TYPE = "m.login.token"
|
||||||
|
JWT_TYPE = "m.login.jwt"
|
||||||
|
|
||||||
def __init__(self, hs):
|
def __init__(self, hs):
|
||||||
super(LoginRestServlet, self).__init__(hs)
|
super(LoginRestServlet, self).__init__(hs)
|
||||||
self.idp_redirect_url = hs.config.saml2_idp_redirect_url
|
self.idp_redirect_url = hs.config.saml2_idp_redirect_url
|
||||||
self.password_enabled = hs.config.password_enabled
|
self.password_enabled = hs.config.password_enabled
|
||||||
self.saml2_enabled = hs.config.saml2_enabled
|
self.saml2_enabled = hs.config.saml2_enabled
|
||||||
|
self.jwt_enabled = hs.config.jwt_enabled
|
||||||
|
self.jwt_secret = hs.config.jwt_secret
|
||||||
|
self.jwt_algorithm = hs.config.jwt_algorithm
|
||||||
self.cas_enabled = hs.config.cas_enabled
|
self.cas_enabled = hs.config.cas_enabled
|
||||||
self.cas_server_url = hs.config.cas_server_url
|
self.cas_server_url = hs.config.cas_server_url
|
||||||
self.cas_required_attributes = hs.config.cas_required_attributes
|
self.cas_required_attributes = hs.config.cas_required_attributes
|
||||||
@ -57,6 +64,8 @@ class LoginRestServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
def on_GET(self, request):
|
def on_GET(self, request):
|
||||||
flows = []
|
flows = []
|
||||||
|
if self.jwt_enabled:
|
||||||
|
flows.append({"type": LoginRestServlet.JWT_TYPE})
|
||||||
if self.saml2_enabled:
|
if self.saml2_enabled:
|
||||||
flows.append({"type": LoginRestServlet.SAML2_TYPE})
|
flows.append({"type": LoginRestServlet.SAML2_TYPE})
|
||||||
if self.cas_enabled:
|
if self.cas_enabled:
|
||||||
@ -98,6 +107,10 @@ class LoginRestServlet(ClientV1RestServlet):
|
|||||||
"uri": "%s%s" % (self.idp_redirect_url, relay_state)
|
"uri": "%s%s" % (self.idp_redirect_url, relay_state)
|
||||||
}
|
}
|
||||||
defer.returnValue((200, result))
|
defer.returnValue((200, result))
|
||||||
|
elif self.jwt_enabled and (login_submission["type"] ==
|
||||||
|
LoginRestServlet.JWT_TYPE):
|
||||||
|
result = yield self.do_jwt_login(login_submission)
|
||||||
|
defer.returnValue(result)
|
||||||
# TODO Delete this after all CAS clients switch to token login instead
|
# TODO Delete this after all CAS clients switch to token login instead
|
||||||
elif self.cas_enabled and (login_submission["type"] ==
|
elif self.cas_enabled and (login_submission["type"] ==
|
||||||
LoginRestServlet.CAS_TYPE):
|
LoginRestServlet.CAS_TYPE):
|
||||||
@ -209,6 +222,46 @@ class LoginRestServlet(ClientV1RestServlet):
|
|||||||
|
|
||||||
defer.returnValue((200, result))
|
defer.returnValue((200, result))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def do_jwt_login(self, login_submission):
|
||||||
|
token = login_submission['token']
|
||||||
|
if token is None:
|
||||||
|
raise LoginError(401, "Unauthorized", errcode=Codes.UNAUTHORIZED)
|
||||||
|
|
||||||
|
try:
|
||||||
|
payload = jwt.decode(token, self.jwt_secret, algorithms=[self.jwt_algorithm])
|
||||||
|
except InvalidTokenError:
|
||||||
|
raise LoginError(401, "Invalid JWT", errcode=Codes.UNAUTHORIZED)
|
||||||
|
|
||||||
|
user = payload['user']
|
||||||
|
if user is None:
|
||||||
|
raise LoginError(401, "Invalid JWT", errcode=Codes.UNAUTHORIZED)
|
||||||
|
|
||||||
|
user_id = UserID.create(user, self.hs.hostname).to_string()
|
||||||
|
auth_handler = self.handlers.auth_handler
|
||||||
|
user_exists = yield auth_handler.does_user_exist(user_id)
|
||||||
|
if user_exists:
|
||||||
|
user_id, access_token, refresh_token = (
|
||||||
|
yield auth_handler.get_login_tuple_for_user_id(user_id)
|
||||||
|
)
|
||||||
|
result = {
|
||||||
|
"user_id": user_id, # may have changed
|
||||||
|
"access_token": access_token,
|
||||||
|
"refresh_token": refresh_token,
|
||||||
|
"home_server": self.hs.hostname,
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
user_id, access_token = (
|
||||||
|
yield self.handlers.registration_handler.register(localpart=user)
|
||||||
|
)
|
||||||
|
result = {
|
||||||
|
"user_id": user_id, # may have changed
|
||||||
|
"access_token": access_token,
|
||||||
|
"home_server": self.hs.hostname,
|
||||||
|
}
|
||||||
|
|
||||||
|
defer.returnValue((200, result))
|
||||||
|
|
||||||
# TODO Delete this after all CAS clients switch to token login instead
|
# TODO Delete this after all CAS clients switch to token login instead
|
||||||
def parse_cas_response(self, cas_response_body):
|
def parse_cas_response(self, cas_response_body):
|
||||||
root = ET.fromstring(cas_response_body)
|
root = ET.fromstring(cas_response_body)
|
||||||
|
@ -405,6 +405,42 @@ class RoomEventContext(ClientV1RestServlet):
|
|||||||
defer.returnValue((200, results))
|
defer.returnValue((200, results))
|
||||||
|
|
||||||
|
|
||||||
|
class RoomForgetRestServlet(ClientV1RestServlet):
|
||||||
|
def register(self, http_server):
|
||||||
|
PATTERNS = ("/rooms/(?P<room_id>[^/]*)/forget")
|
||||||
|
register_txn_path(self, PATTERNS, http_server)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_POST(self, request, room_id, txn_id=None):
|
||||||
|
requester = yield self.auth.get_user_by_req(
|
||||||
|
request,
|
||||||
|
allow_guest=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
yield self.handlers.room_member_handler.forget(
|
||||||
|
user=requester.user,
|
||||||
|
room_id=room_id,
|
||||||
|
)
|
||||||
|
|
||||||
|
defer.returnValue((200, {}))
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def on_PUT(self, request, room_id, txn_id):
|
||||||
|
try:
|
||||||
|
defer.returnValue(
|
||||||
|
self.txns.get_client_transaction(request, txn_id)
|
||||||
|
)
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
response = yield self.on_POST(
|
||||||
|
request, room_id, txn_id
|
||||||
|
)
|
||||||
|
|
||||||
|
self.txns.store_client_transaction(request, txn_id, response)
|
||||||
|
defer.returnValue(response)
|
||||||
|
|
||||||
|
|
||||||
# TODO: Needs unit testing
|
# TODO: Needs unit testing
|
||||||
class RoomMembershipRestServlet(ClientV1RestServlet):
|
class RoomMembershipRestServlet(ClientV1RestServlet):
|
||||||
|
|
||||||
@ -624,6 +660,7 @@ def register_servlets(hs, http_server):
|
|||||||
RoomMemberListRestServlet(hs).register(http_server)
|
RoomMemberListRestServlet(hs).register(http_server)
|
||||||
RoomMessageListRestServlet(hs).register(http_server)
|
RoomMessageListRestServlet(hs).register(http_server)
|
||||||
JoinRoomAliasServlet(hs).register(http_server)
|
JoinRoomAliasServlet(hs).register(http_server)
|
||||||
|
RoomForgetRestServlet(hs).register(http_server)
|
||||||
RoomMembershipRestServlet(hs).register(http_server)
|
RoomMembershipRestServlet(hs).register(http_server)
|
||||||
RoomSendEventRestServlet(hs).register(http_server)
|
RoomSendEventRestServlet(hs).register(http_server)
|
||||||
PublicRoomListRestServlet(hs).register(http_server)
|
PublicRoomListRestServlet(hs).register(http_server)
|
||||||
|
@ -199,15 +199,17 @@ class SyncRestServlet(RestServlet):
|
|||||||
"""
|
"""
|
||||||
Encode the joined rooms in a sync result
|
Encode the joined rooms in a sync result
|
||||||
|
|
||||||
:param list[synapse.handlers.sync.JoinedSyncResult] rooms: list of sync
|
Args:
|
||||||
results for rooms this user is joined to
|
rooms(list[synapse.handlers.sync.JoinedSyncResult]): list of sync
|
||||||
:param int time_now: current time - used as a baseline for age
|
results for rooms this user is joined to
|
||||||
calculations
|
time_now(int): current time - used as a baseline for age
|
||||||
:param int token_id: ID of the user's auth token - used for namespacing
|
calculations
|
||||||
of transaction IDs
|
token_id(int): ID of the user's auth token - used for namespacing
|
||||||
|
of transaction IDs
|
||||||
|
|
||||||
:return: the joined rooms list, in our response format
|
Returns:
|
||||||
:rtype: dict[str, dict[str, object]]
|
dict[str, dict[str, object]]: the joined rooms list, in our
|
||||||
|
response format
|
||||||
"""
|
"""
|
||||||
joined = {}
|
joined = {}
|
||||||
for room in rooms:
|
for room in rooms:
|
||||||
@ -221,15 +223,17 @@ class SyncRestServlet(RestServlet):
|
|||||||
"""
|
"""
|
||||||
Encode the invited rooms in a sync result
|
Encode the invited rooms in a sync result
|
||||||
|
|
||||||
:param list[synapse.handlers.sync.InvitedSyncResult] rooms: list of
|
Args:
|
||||||
sync results for rooms this user is joined to
|
rooms(list[synapse.handlers.sync.InvitedSyncResult]): list of
|
||||||
:param int time_now: current time - used as a baseline for age
|
sync results for rooms this user is joined to
|
||||||
calculations
|
time_now(int): current time - used as a baseline for age
|
||||||
:param int token_id: ID of the user's auth token - used for namespacing
|
calculations
|
||||||
|
token_id(int): ID of the user's auth token - used for namespacing
|
||||||
of transaction IDs
|
of transaction IDs
|
||||||
|
|
||||||
:return: the invited rooms list, in our response format
|
Returns:
|
||||||
:rtype: dict[str, dict[str, object]]
|
dict[str, dict[str, object]]: the invited rooms list, in our
|
||||||
|
response format
|
||||||
"""
|
"""
|
||||||
invited = {}
|
invited = {}
|
||||||
for room in rooms:
|
for room in rooms:
|
||||||
@ -251,15 +255,17 @@ class SyncRestServlet(RestServlet):
|
|||||||
"""
|
"""
|
||||||
Encode the archived rooms in a sync result
|
Encode the archived rooms in a sync result
|
||||||
|
|
||||||
:param list[synapse.handlers.sync.ArchivedSyncResult] rooms: list of
|
Args:
|
||||||
sync results for rooms this user is joined to
|
rooms (list[synapse.handlers.sync.ArchivedSyncResult]): list of
|
||||||
:param int time_now: current time - used as a baseline for age
|
sync results for rooms this user is joined to
|
||||||
calculations
|
time_now(int): current time - used as a baseline for age
|
||||||
:param int token_id: ID of the user's auth token - used for namespacing
|
calculations
|
||||||
of transaction IDs
|
token_id(int): ID of the user's auth token - used for namespacing
|
||||||
|
of transaction IDs
|
||||||
|
|
||||||
:return: the invited rooms list, in our response format
|
Returns:
|
||||||
:rtype: dict[str, dict[str, object]]
|
dict[str, dict[str, object]]: The invited rooms list, in our
|
||||||
|
response format
|
||||||
"""
|
"""
|
||||||
joined = {}
|
joined = {}
|
||||||
for room in rooms:
|
for room in rooms:
|
||||||
@ -272,17 +278,18 @@ class SyncRestServlet(RestServlet):
|
|||||||
@staticmethod
|
@staticmethod
|
||||||
def encode_room(room, time_now, token_id, joined=True):
|
def encode_room(room, time_now, token_id, joined=True):
|
||||||
"""
|
"""
|
||||||
:param JoinedSyncResult|ArchivedSyncResult room: sync result for a
|
Args:
|
||||||
single room
|
room (JoinedSyncResult|ArchivedSyncResult): sync result for a
|
||||||
:param int time_now: current time - used as a baseline for age
|
single room
|
||||||
calculations
|
time_now (int): current time - used as a baseline for age
|
||||||
:param int token_id: ID of the user's auth token - used for namespacing
|
calculations
|
||||||
of transaction IDs
|
token_id (int): ID of the user's auth token - used for namespacing
|
||||||
:param joined: True if the user is joined to this room - will mean
|
of transaction IDs
|
||||||
we handle ephemeral events
|
joined (bool): True if the user is joined to this room - will mean
|
||||||
|
we handle ephemeral events
|
||||||
|
|
||||||
:return: the room, encoded in our response format
|
Returns:
|
||||||
:rtype: dict[str, object]
|
dict[str, object]: the room, encoded in our response format
|
||||||
"""
|
"""
|
||||||
def serialize(event):
|
def serialize(event):
|
||||||
# TODO(mjark): Respect formatting requirements in the filter.
|
# TODO(mjark): Respect formatting requirements in the filter.
|
||||||
|
@ -86,7 +86,8 @@ class StateHandler(object):
|
|||||||
If `event_type` is specified, then the method returns only the one
|
If `event_type` is specified, then the method returns only the one
|
||||||
event (or None) with that `event_type` and `state_key`.
|
event (or None) with that `event_type` and `state_key`.
|
||||||
|
|
||||||
:returns map from (type, state_key) to event
|
Returns:
|
||||||
|
map from (type, state_key) to event
|
||||||
"""
|
"""
|
||||||
event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
|
event_ids = yield self.store.get_latest_event_ids_in_room(room_id)
|
||||||
|
|
||||||
@ -100,7 +101,7 @@ class StateHandler(object):
|
|||||||
defer.returnValue(state)
|
defer.returnValue(state)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def compute_event_context(self, event, old_state=None, outlier=False):
|
def compute_event_context(self, event, old_state=None):
|
||||||
""" Fills out the context with the `current state` of the graph. The
|
""" Fills out the context with the `current state` of the graph. The
|
||||||
`current state` here is defined to be the state of the event graph
|
`current state` here is defined to be the state of the event graph
|
||||||
just before the event - i.e. it never includes `event`
|
just before the event - i.e. it never includes `event`
|
||||||
@ -115,7 +116,7 @@ class StateHandler(object):
|
|||||||
"""
|
"""
|
||||||
context = EventContext()
|
context = EventContext()
|
||||||
|
|
||||||
if outlier:
|
if event.internal_metadata.is_outlier():
|
||||||
# If this is an outlier, then we know it shouldn't have any current
|
# If this is an outlier, then we know it shouldn't have any current
|
||||||
# state. Certainly store.get_current_state won't return any, and
|
# state. Certainly store.get_current_state won't return any, and
|
||||||
# persisting the event won't store the state group.
|
# persisting the event won't store the state group.
|
||||||
@ -176,10 +177,11 @@ class StateHandler(object):
|
|||||||
""" Given a list of event_ids this method fetches the state at each
|
""" Given a list of event_ids this method fetches the state at each
|
||||||
event, resolves conflicts between them and returns them.
|
event, resolves conflicts between them and returns them.
|
||||||
|
|
||||||
:returns a Deferred tuple of (`state_group`, `state`, `prev_state`).
|
Returns:
|
||||||
`state_group` is the name of a state group if one and only one is
|
a Deferred tuple of (`state_group`, `state`, `prev_state`).
|
||||||
involved. `state` is a map from (type, state_key) to event, and
|
`state_group` is the name of a state group if one and only one is
|
||||||
`prev_state` is a list of event ids.
|
involved. `state` is a map from (type, state_key) to event, and
|
||||||
|
`prev_state` is a list of event ids.
|
||||||
"""
|
"""
|
||||||
logger.debug("resolve_state_groups event_ids %s", event_ids)
|
logger.debug("resolve_state_groups event_ids %s", event_ids)
|
||||||
|
|
||||||
@ -251,9 +253,10 @@ class StateHandler(object):
|
|||||||
|
|
||||||
def _resolve_events(self, state_sets, event_type=None, state_key=""):
|
def _resolve_events(self, state_sets, event_type=None, state_key=""):
|
||||||
"""
|
"""
|
||||||
:returns a tuple (new_state, prev_states). new_state is a map
|
Returns
|
||||||
from (type, state_key) to event. prev_states is a list of event_ids.
|
(dict[(str, str), synapse.events.FrozenEvent], list[str]): a tuple
|
||||||
:rtype: (dict[(str, str), synapse.events.FrozenEvent], list[str])
|
(new_state, prev_states). new_state is a map from (type, state_key)
|
||||||
|
to event. prev_states is a list of event_ids.
|
||||||
"""
|
"""
|
||||||
with Measure(self.clock, "state._resolve_events"):
|
with Measure(self.clock, "state._resolve_events"):
|
||||||
state = {}
|
state = {}
|
||||||
|
@ -88,15 +88,6 @@ class DataStore(RoomMemberStore, RoomStore,
|
|||||||
self.hs = hs
|
self.hs = hs
|
||||||
self.database_engine = hs.database_engine
|
self.database_engine = hs.database_engine
|
||||||
|
|
||||||
cur = db_conn.cursor()
|
|
||||||
try:
|
|
||||||
cur.execute("SELECT MIN(stream_ordering) FROM events",)
|
|
||||||
rows = cur.fetchall()
|
|
||||||
self.min_stream_token = rows[0][0] if rows and rows[0] and rows[0][0] else -1
|
|
||||||
self.min_stream_token = min(self.min_stream_token, -1)
|
|
||||||
finally:
|
|
||||||
cur.close()
|
|
||||||
|
|
||||||
self.client_ip_last_seen = Cache(
|
self.client_ip_last_seen = Cache(
|
||||||
name="client_ip_last_seen",
|
name="client_ip_last_seen",
|
||||||
keylen=4,
|
keylen=4,
|
||||||
@ -105,6 +96,9 @@ class DataStore(RoomMemberStore, RoomStore,
|
|||||||
self._stream_id_gen = StreamIdGenerator(
|
self._stream_id_gen = StreamIdGenerator(
|
||||||
db_conn, "events", "stream_ordering"
|
db_conn, "events", "stream_ordering"
|
||||||
)
|
)
|
||||||
|
self._backfill_id_gen = StreamIdGenerator(
|
||||||
|
db_conn, "events", "stream_ordering", step=-1
|
||||||
|
)
|
||||||
self._receipts_id_gen = StreamIdGenerator(
|
self._receipts_id_gen = StreamIdGenerator(
|
||||||
db_conn, "receipts_linearized", "stream_id"
|
db_conn, "receipts_linearized", "stream_id"
|
||||||
)
|
)
|
||||||
@ -116,7 +110,7 @@ class DataStore(RoomMemberStore, RoomStore,
|
|||||||
)
|
)
|
||||||
|
|
||||||
self._transaction_id_gen = IdGenerator(db_conn, "sent_transactions", "id")
|
self._transaction_id_gen = IdGenerator(db_conn, "sent_transactions", "id")
|
||||||
self._state_groups_id_gen = IdGenerator(db_conn, "state_groups", "id")
|
self._state_groups_id_gen = StreamIdGenerator(db_conn, "state_groups", "id")
|
||||||
self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id")
|
self._access_tokens_id_gen = IdGenerator(db_conn, "access_tokens", "id")
|
||||||
self._refresh_tokens_id_gen = IdGenerator(db_conn, "refresh_tokens", "id")
|
self._refresh_tokens_id_gen = IdGenerator(db_conn, "refresh_tokens", "id")
|
||||||
self._push_rule_id_gen = IdGenerator(db_conn, "push_rules", "id")
|
self._push_rule_id_gen = IdGenerator(db_conn, "push_rules", "id")
|
||||||
@ -129,7 +123,7 @@ class DataStore(RoomMemberStore, RoomStore,
|
|||||||
extra_tables=[("deleted_pushers", "stream_id")],
|
extra_tables=[("deleted_pushers", "stream_id")],
|
||||||
)
|
)
|
||||||
|
|
||||||
events_max = self._stream_id_gen.get_max_token()
|
events_max = self._stream_id_gen.get_current_token()
|
||||||
event_cache_prefill, min_event_val = self._get_cache_dict(
|
event_cache_prefill, min_event_val = self._get_cache_dict(
|
||||||
db_conn, "events",
|
db_conn, "events",
|
||||||
entity_column="room_id",
|
entity_column="room_id",
|
||||||
@ -145,7 +139,7 @@ class DataStore(RoomMemberStore, RoomStore,
|
|||||||
"MembershipStreamChangeCache", events_max,
|
"MembershipStreamChangeCache", events_max,
|
||||||
)
|
)
|
||||||
|
|
||||||
account_max = self._account_data_id_gen.get_max_token()
|
account_max = self._account_data_id_gen.get_current_token()
|
||||||
self._account_data_stream_cache = StreamChangeCache(
|
self._account_data_stream_cache = StreamChangeCache(
|
||||||
"AccountDataAndTagsChangeCache", account_max,
|
"AccountDataAndTagsChangeCache", account_max,
|
||||||
)
|
)
|
||||||
@ -156,7 +150,7 @@ class DataStore(RoomMemberStore, RoomStore,
|
|||||||
db_conn, "presence_stream",
|
db_conn, "presence_stream",
|
||||||
entity_column="user_id",
|
entity_column="user_id",
|
||||||
stream_column="stream_id",
|
stream_column="stream_id",
|
||||||
max_value=self._presence_id_gen.get_max_token(),
|
max_value=self._presence_id_gen.get_current_token(),
|
||||||
)
|
)
|
||||||
self.presence_stream_cache = StreamChangeCache(
|
self.presence_stream_cache = StreamChangeCache(
|
||||||
"PresenceStreamChangeCache", min_presence_val,
|
"PresenceStreamChangeCache", min_presence_val,
|
||||||
@ -167,7 +161,7 @@ class DataStore(RoomMemberStore, RoomStore,
|
|||||||
db_conn, "push_rules_stream",
|
db_conn, "push_rules_stream",
|
||||||
entity_column="user_id",
|
entity_column="user_id",
|
||||||
stream_column="stream_id",
|
stream_column="stream_id",
|
||||||
max_value=self._push_rules_stream_id_gen.get_max_token()[0],
|
max_value=self._push_rules_stream_id_gen.get_current_token()[0],
|
||||||
)
|
)
|
||||||
|
|
||||||
self.push_rules_stream_cache = StreamChangeCache(
|
self.push_rules_stream_cache = StreamChangeCache(
|
||||||
|
@ -200,7 +200,7 @@ class AccountDataStore(SQLBaseStore):
|
|||||||
"add_room_account_data", add_account_data_txn, next_id
|
"add_room_account_data", add_account_data_txn, next_id
|
||||||
)
|
)
|
||||||
|
|
||||||
result = self._account_data_id_gen.get_max_token()
|
result = self._account_data_id_gen.get_current_token()
|
||||||
defer.returnValue(result)
|
defer.returnValue(result)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@ -239,7 +239,7 @@ class AccountDataStore(SQLBaseStore):
|
|||||||
"add_user_account_data", add_account_data_txn, next_id
|
"add_user_account_data", add_account_data_txn, next_id
|
||||||
)
|
)
|
||||||
|
|
||||||
result = self._account_data_id_gen.get_max_token()
|
result = self._account_data_id_gen.get_current_token()
|
||||||
defer.returnValue(result)
|
defer.returnValue(result)
|
||||||
|
|
||||||
def _update_max_stream_id(self, txn, next_id):
|
def _update_max_stream_id(self, txn, next_id):
|
||||||
|
@ -26,8 +26,9 @@ logger = logging.getLogger(__name__)
|
|||||||
class EventPushActionsStore(SQLBaseStore):
|
class EventPushActionsStore(SQLBaseStore):
|
||||||
def _set_push_actions_for_event_and_users_txn(self, txn, event, tuples):
|
def _set_push_actions_for_event_and_users_txn(self, txn, event, tuples):
|
||||||
"""
|
"""
|
||||||
:param event: the event set actions for
|
Args:
|
||||||
:param tuples: list of tuples of (user_id, actions)
|
event: the event set actions for
|
||||||
|
tuples: list of tuples of (user_id, actions)
|
||||||
"""
|
"""
|
||||||
values = []
|
values = []
|
||||||
for uid, actions in tuples:
|
for uid, actions in tuples:
|
||||||
|
@ -24,7 +24,7 @@ from synapse.util.logutils import log_function
|
|||||||
from synapse.api.constants import EventTypes
|
from synapse.api.constants import EventTypes
|
||||||
|
|
||||||
from canonicaljson import encode_canonical_json
|
from canonicaljson import encode_canonical_json
|
||||||
from contextlib import contextmanager
|
from collections import namedtuple
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import math
|
import math
|
||||||
@ -60,64 +60,71 @@ class EventsStore(SQLBaseStore):
|
|||||||
)
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def persist_events(self, events_and_contexts, backfilled=False,
|
def persist_events(self, events_and_contexts, backfilled=False):
|
||||||
is_new_state=True):
|
|
||||||
if not events_and_contexts:
|
if not events_and_contexts:
|
||||||
return
|
return
|
||||||
|
|
||||||
if backfilled:
|
if backfilled:
|
||||||
start = self.min_stream_token - 1
|
stream_ordering_manager = self._backfill_id_gen.get_next_mult(
|
||||||
self.min_stream_token -= len(events_and_contexts) + 1
|
len(events_and_contexts)
|
||||||
stream_orderings = range(start, self.min_stream_token, -1)
|
)
|
||||||
|
|
||||||
@contextmanager
|
|
||||||
def stream_ordering_manager():
|
|
||||||
yield stream_orderings
|
|
||||||
stream_ordering_manager = stream_ordering_manager()
|
|
||||||
else:
|
else:
|
||||||
stream_ordering_manager = self._stream_id_gen.get_next_mult(
|
stream_ordering_manager = self._stream_id_gen.get_next_mult(
|
||||||
len(events_and_contexts)
|
len(events_and_contexts)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
state_group_id_manager = self._state_groups_id_gen.get_next_mult(
|
||||||
|
len(events_and_contexts)
|
||||||
|
)
|
||||||
with stream_ordering_manager as stream_orderings:
|
with stream_ordering_manager as stream_orderings:
|
||||||
for (event, _), stream in zip(events_and_contexts, stream_orderings):
|
with state_group_id_manager as state_group_ids:
|
||||||
event.internal_metadata.stream_ordering = stream
|
for (event, context), stream, state_group_id in zip(
|
||||||
|
events_and_contexts, stream_orderings, state_group_ids
|
||||||
|
):
|
||||||
|
event.internal_metadata.stream_ordering = stream
|
||||||
|
# Assign a state group_id in case a new id is needed for
|
||||||
|
# this context. In theory we only need to assign this
|
||||||
|
# for contexts that have current_state and aren't outliers
|
||||||
|
# but that make the code more complicated. Assigning an ID
|
||||||
|
# per event only causes the state_group_ids to grow as fast
|
||||||
|
# as the stream_ordering so in practise shouldn't be a problem.
|
||||||
|
context.new_state_group_id = state_group_id
|
||||||
|
|
||||||
chunks = [
|
chunks = [
|
||||||
events_and_contexts[x:x + 100]
|
events_and_contexts[x:x + 100]
|
||||||
for x in xrange(0, len(events_and_contexts), 100)
|
for x in xrange(0, len(events_and_contexts), 100)
|
||||||
]
|
]
|
||||||
|
|
||||||
for chunk in chunks:
|
for chunk in chunks:
|
||||||
# We can't easily parallelize these since different chunks
|
# We can't easily parallelize these since different chunks
|
||||||
# might contain the same event. :(
|
# might contain the same event. :(
|
||||||
yield self.runInteraction(
|
yield self.runInteraction(
|
||||||
"persist_events",
|
"persist_events",
|
||||||
self._persist_events_txn,
|
self._persist_events_txn,
|
||||||
events_and_contexts=chunk,
|
events_and_contexts=chunk,
|
||||||
backfilled=backfilled,
|
backfilled=backfilled,
|
||||||
is_new_state=is_new_state,
|
)
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@log_function
|
@log_function
|
||||||
def persist_event(self, event, context,
|
def persist_event(self, event, context, current_state=None):
|
||||||
is_new_state=True, current_state=None):
|
|
||||||
try:
|
try:
|
||||||
with self._stream_id_gen.get_next() as stream_ordering:
|
with self._stream_id_gen.get_next() as stream_ordering:
|
||||||
event.internal_metadata.stream_ordering = stream_ordering
|
with self._state_groups_id_gen.get_next() as state_group_id:
|
||||||
yield self.runInteraction(
|
event.internal_metadata.stream_ordering = stream_ordering
|
||||||
"persist_event",
|
context.new_state_group_id = state_group_id
|
||||||
self._persist_event_txn,
|
yield self.runInteraction(
|
||||||
event=event,
|
"persist_event",
|
||||||
context=context,
|
self._persist_event_txn,
|
||||||
is_new_state=is_new_state,
|
event=event,
|
||||||
current_state=current_state,
|
context=context,
|
||||||
)
|
current_state=current_state,
|
||||||
|
)
|
||||||
except _RollbackButIsFineException:
|
except _RollbackButIsFineException:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
max_persisted_id = yield self._stream_id_gen.get_max_token()
|
max_persisted_id = yield self._stream_id_gen.get_current_token()
|
||||||
defer.returnValue((stream_ordering, max_persisted_id))
|
defer.returnValue((stream_ordering, max_persisted_id))
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@ -177,8 +184,7 @@ class EventsStore(SQLBaseStore):
|
|||||||
defer.returnValue({e.event_id: e for e in events})
|
defer.returnValue({e.event_id: e for e in events})
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def _persist_event_txn(self, txn, event, context,
|
def _persist_event_txn(self, txn, event, context, current_state):
|
||||||
is_new_state=True, current_state=None):
|
|
||||||
# We purposefully do this first since if we include a `current_state`
|
# We purposefully do this first since if we include a `current_state`
|
||||||
# key, we *want* to update the `current_state_events` table
|
# key, we *want* to update the `current_state_events` table
|
||||||
if current_state:
|
if current_state:
|
||||||
@ -186,7 +192,16 @@ class EventsStore(SQLBaseStore):
|
|||||||
txn.call_after(self.get_rooms_for_user.invalidate_all)
|
txn.call_after(self.get_rooms_for_user.invalidate_all)
|
||||||
txn.call_after(self.get_users_in_room.invalidate, (event.room_id,))
|
txn.call_after(self.get_users_in_room.invalidate, (event.room_id,))
|
||||||
txn.call_after(self.get_joined_hosts_for_room.invalidate, (event.room_id,))
|
txn.call_after(self.get_joined_hosts_for_room.invalidate, (event.room_id,))
|
||||||
txn.call_after(self.get_room_name_and_aliases, event.room_id)
|
txn.call_after(self.get_room_name_and_aliases.invalidate, (event.room_id,))
|
||||||
|
|
||||||
|
# Add an entry to the current_state_resets table to record the point
|
||||||
|
# where we clobbered the current state
|
||||||
|
stream_order = event.internal_metadata.stream_ordering
|
||||||
|
self._simple_insert_txn(
|
||||||
|
txn,
|
||||||
|
table="current_state_resets",
|
||||||
|
values={"event_stream_ordering": stream_order}
|
||||||
|
)
|
||||||
|
|
||||||
self._simple_delete_txn(
|
self._simple_delete_txn(
|
||||||
txn,
|
txn,
|
||||||
@ -210,12 +225,10 @@ class EventsStore(SQLBaseStore):
|
|||||||
txn,
|
txn,
|
||||||
[(event, context)],
|
[(event, context)],
|
||||||
backfilled=False,
|
backfilled=False,
|
||||||
is_new_state=is_new_state,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
@log_function
|
@log_function
|
||||||
def _persist_events_txn(self, txn, events_and_contexts, backfilled,
|
def _persist_events_txn(self, txn, events_and_contexts, backfilled):
|
||||||
is_new_state=True):
|
|
||||||
depth_updates = {}
|
depth_updates = {}
|
||||||
for event, context in events_and_contexts:
|
for event, context in events_and_contexts:
|
||||||
# Remove the any existing cache entries for the event_ids
|
# Remove the any existing cache entries for the event_ids
|
||||||
@ -282,9 +295,7 @@ class EventsStore(SQLBaseStore):
|
|||||||
|
|
||||||
outlier_persisted = have_persisted[event.event_id]
|
outlier_persisted = have_persisted[event.event_id]
|
||||||
if not event.internal_metadata.is_outlier() and outlier_persisted:
|
if not event.internal_metadata.is_outlier() and outlier_persisted:
|
||||||
self._store_state_groups_txn(
|
self._store_mult_state_groups_txn(txn, ((event, context),))
|
||||||
txn, event, context,
|
|
||||||
)
|
|
||||||
|
|
||||||
metadata_json = encode_json(
|
metadata_json = encode_json(
|
||||||
event.internal_metadata.get_dict()
|
event.internal_metadata.get_dict()
|
||||||
@ -299,6 +310,18 @@ class EventsStore(SQLBaseStore):
|
|||||||
(metadata_json, event.event_id,)
|
(metadata_json, event.event_id,)
|
||||||
)
|
)
|
||||||
|
|
||||||
|
stream_order = event.internal_metadata.stream_ordering
|
||||||
|
state_group_id = context.state_group or context.new_state_group_id
|
||||||
|
self._simple_insert_txn(
|
||||||
|
txn,
|
||||||
|
table="ex_outlier_stream",
|
||||||
|
values={
|
||||||
|
"event_stream_ordering": stream_order,
|
||||||
|
"event_id": event.event_id,
|
||||||
|
"state_group": state_group_id,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
sql = (
|
sql = (
|
||||||
"UPDATE events SET outlier = ?"
|
"UPDATE events SET outlier = ?"
|
||||||
" WHERE event_id = ?"
|
" WHERE event_id = ?"
|
||||||
@ -310,19 +333,14 @@ class EventsStore(SQLBaseStore):
|
|||||||
|
|
||||||
self._update_extremeties(txn, [event])
|
self._update_extremeties(txn, [event])
|
||||||
|
|
||||||
events_and_contexts = filter(
|
events_and_contexts = [
|
||||||
lambda ec: ec[0] not in to_remove,
|
ec for ec in events_and_contexts if ec[0] not in to_remove
|
||||||
events_and_contexts
|
]
|
||||||
)
|
|
||||||
|
|
||||||
if not events_and_contexts:
|
if not events_and_contexts:
|
||||||
return
|
return
|
||||||
|
|
||||||
self._store_mult_state_groups_txn(txn, [
|
self._store_mult_state_groups_txn(txn, events_and_contexts)
|
||||||
(event, context)
|
|
||||||
for event, context in events_and_contexts
|
|
||||||
if not event.internal_metadata.is_outlier()
|
|
||||||
])
|
|
||||||
|
|
||||||
self._handle_mult_prev_events(
|
self._handle_mult_prev_events(
|
||||||
txn,
|
txn,
|
||||||
@ -421,10 +439,9 @@ class EventsStore(SQLBaseStore):
|
|||||||
txn, [event for event, _ in events_and_contexts]
|
txn, [event for event, _ in events_and_contexts]
|
||||||
)
|
)
|
||||||
|
|
||||||
state_events_and_contexts = filter(
|
state_events_and_contexts = [
|
||||||
lambda i: i[0].is_state(),
|
ec for ec in events_and_contexts if ec[0].is_state()
|
||||||
events_and_contexts,
|
]
|
||||||
)
|
|
||||||
|
|
||||||
state_values = []
|
state_values = []
|
||||||
for event, context in state_events_and_contexts:
|
for event, context in state_events_and_contexts:
|
||||||
@ -462,32 +479,50 @@ class EventsStore(SQLBaseStore):
|
|||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
if is_new_state:
|
if backfilled:
|
||||||
for event, _ in state_events_and_contexts:
|
# Backfilled events come before the current state so we don't need
|
||||||
if not context.rejected:
|
# to update the current state table
|
||||||
txn.call_after(
|
return
|
||||||
self._get_current_state_for_key.invalidate,
|
|
||||||
(event.room_id, event.type, event.state_key,)
|
|
||||||
)
|
|
||||||
|
|
||||||
if event.type in [EventTypes.Name, EventTypes.Aliases]:
|
for event, _ in state_events_and_contexts:
|
||||||
txn.call_after(
|
if (not event.internal_metadata.is_invite_from_remote()
|
||||||
self.get_room_name_and_aliases.invalidate,
|
and event.internal_metadata.is_outlier()):
|
||||||
(event.room_id,)
|
# Outlier events generally shouldn't clobber the current state.
|
||||||
)
|
# However invites from remote severs for rooms we aren't in
|
||||||
|
# are a bit special: they don't come with any associated
|
||||||
|
# state so are technically an outlier, however all the
|
||||||
|
# client-facing code assumes that they are in the current
|
||||||
|
# state table so we insert the event anyway.
|
||||||
|
continue
|
||||||
|
|
||||||
self._simple_upsert_txn(
|
if context.rejected:
|
||||||
txn,
|
# If the event failed it's auth checks then it shouldn't
|
||||||
"current_state_events",
|
# clobbler the current state.
|
||||||
keyvalues={
|
continue
|
||||||
"room_id": event.room_id,
|
|
||||||
"type": event.type,
|
txn.call_after(
|
||||||
"state_key": event.state_key,
|
self._get_current_state_for_key.invalidate,
|
||||||
},
|
(event.room_id, event.type, event.state_key,)
|
||||||
values={
|
)
|
||||||
"event_id": event.event_id,
|
|
||||||
}
|
if event.type in [EventTypes.Name, EventTypes.Aliases]:
|
||||||
)
|
txn.call_after(
|
||||||
|
self.get_room_name_and_aliases.invalidate,
|
||||||
|
(event.room_id,)
|
||||||
|
)
|
||||||
|
|
||||||
|
self._simple_upsert_txn(
|
||||||
|
txn,
|
||||||
|
"current_state_events",
|
||||||
|
keyvalues={
|
||||||
|
"room_id": event.room_id,
|
||||||
|
"type": event.type,
|
||||||
|
"state_key": event.state_key,
|
||||||
|
},
|
||||||
|
values={
|
||||||
|
"event_id": event.event_id,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -1076,10 +1111,7 @@ class EventsStore(SQLBaseStore):
|
|||||||
|
|
||||||
def get_current_backfill_token(self):
|
def get_current_backfill_token(self):
|
||||||
"""The current minimum token that backfilled events have reached"""
|
"""The current minimum token that backfilled events have reached"""
|
||||||
|
return -self._backfill_id_gen.get_current_token()
|
||||||
# TODO: Fix race with the persit_event txn by using one of the
|
|
||||||
# stream id managers
|
|
||||||
return -self.min_stream_token
|
|
||||||
|
|
||||||
def get_all_new_events(self, last_backfill_id, last_forward_id,
|
def get_all_new_events(self, last_backfill_id, last_forward_id,
|
||||||
current_backfill_id, current_forward_id, limit):
|
current_backfill_id, current_forward_id, limit):
|
||||||
@ -1087,10 +1119,12 @@ class EventsStore(SQLBaseStore):
|
|||||||
new events or as backfilled events"""
|
new events or as backfilled events"""
|
||||||
def get_all_new_events_txn(txn):
|
def get_all_new_events_txn(txn):
|
||||||
sql = (
|
sql = (
|
||||||
"SELECT e.stream_ordering, ej.internal_metadata, ej.json"
|
"SELECT e.stream_ordering, ej.internal_metadata, ej.json, eg.state_group"
|
||||||
" FROM events as e"
|
" FROM events as e"
|
||||||
" JOIN event_json as ej"
|
" JOIN event_json as ej"
|
||||||
" ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
|
" ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
|
||||||
|
" LEFT JOIN event_to_state_groups as eg"
|
||||||
|
" ON e.event_id = eg.event_id"
|
||||||
" WHERE ? < e.stream_ordering AND e.stream_ordering <= ?"
|
" WHERE ? < e.stream_ordering AND e.stream_ordering <= ?"
|
||||||
" ORDER BY e.stream_ordering ASC"
|
" ORDER BY e.stream_ordering ASC"
|
||||||
" LIMIT ?"
|
" LIMIT ?"
|
||||||
@ -1098,14 +1132,43 @@ class EventsStore(SQLBaseStore):
|
|||||||
if last_forward_id != current_forward_id:
|
if last_forward_id != current_forward_id:
|
||||||
txn.execute(sql, (last_forward_id, current_forward_id, limit))
|
txn.execute(sql, (last_forward_id, current_forward_id, limit))
|
||||||
new_forward_events = txn.fetchall()
|
new_forward_events = txn.fetchall()
|
||||||
|
|
||||||
|
if len(new_forward_events) == limit:
|
||||||
|
upper_bound = new_forward_events[-1][0]
|
||||||
|
else:
|
||||||
|
upper_bound = current_forward_id
|
||||||
|
|
||||||
|
sql = (
|
||||||
|
"SELECT -event_stream_ordering FROM current_state_resets"
|
||||||
|
" WHERE ? < event_stream_ordering"
|
||||||
|
" AND event_stream_ordering <= ?"
|
||||||
|
" ORDER BY event_stream_ordering ASC"
|
||||||
|
)
|
||||||
|
txn.execute(sql, (last_forward_id, upper_bound))
|
||||||
|
state_resets = txn.fetchall()
|
||||||
|
|
||||||
|
sql = (
|
||||||
|
"SELECT -event_stream_ordering, event_id, state_group"
|
||||||
|
" FROM ex_outlier_stream"
|
||||||
|
" WHERE ? > event_stream_ordering"
|
||||||
|
" AND event_stream_ordering >= ?"
|
||||||
|
" ORDER BY event_stream_ordering DESC"
|
||||||
|
)
|
||||||
|
txn.execute(sql, (last_forward_id, upper_bound))
|
||||||
|
forward_ex_outliers = txn.fetchall()
|
||||||
else:
|
else:
|
||||||
new_forward_events = []
|
new_forward_events = []
|
||||||
|
state_resets = []
|
||||||
|
forward_ex_outliers = []
|
||||||
|
|
||||||
sql = (
|
sql = (
|
||||||
"SELECT -e.stream_ordering, ej.internal_metadata, ej.json"
|
"SELECT -e.stream_ordering, ej.internal_metadata, ej.json,"
|
||||||
|
" eg.state_group"
|
||||||
" FROM events as e"
|
" FROM events as e"
|
||||||
" JOIN event_json as ej"
|
" JOIN event_json as ej"
|
||||||
" ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
|
" ON e.event_id = ej.event_id AND e.room_id = ej.room_id"
|
||||||
|
" LEFT JOIN event_to_state_groups as eg"
|
||||||
|
" ON e.event_id = eg.event_id"
|
||||||
" WHERE ? > e.stream_ordering AND e.stream_ordering >= ?"
|
" WHERE ? > e.stream_ordering AND e.stream_ordering >= ?"
|
||||||
" ORDER BY e.stream_ordering DESC"
|
" ORDER BY e.stream_ordering DESC"
|
||||||
" LIMIT ?"
|
" LIMIT ?"
|
||||||
@ -1113,8 +1176,35 @@ class EventsStore(SQLBaseStore):
|
|||||||
if last_backfill_id != current_backfill_id:
|
if last_backfill_id != current_backfill_id:
|
||||||
txn.execute(sql, (-last_backfill_id, -current_backfill_id, limit))
|
txn.execute(sql, (-last_backfill_id, -current_backfill_id, limit))
|
||||||
new_backfill_events = txn.fetchall()
|
new_backfill_events = txn.fetchall()
|
||||||
|
|
||||||
|
if len(new_backfill_events) == limit:
|
||||||
|
upper_bound = new_backfill_events[-1][0]
|
||||||
|
else:
|
||||||
|
upper_bound = current_backfill_id
|
||||||
|
|
||||||
|
sql = (
|
||||||
|
"SELECT -event_stream_ordering, event_id, state_group"
|
||||||
|
" FROM ex_outlier_stream"
|
||||||
|
" WHERE ? > event_stream_ordering"
|
||||||
|
" AND event_stream_ordering >= ?"
|
||||||
|
" ORDER BY event_stream_ordering DESC"
|
||||||
|
)
|
||||||
|
txn.execute(sql, (-last_backfill_id, -upper_bound))
|
||||||
|
backward_ex_outliers = txn.fetchall()
|
||||||
else:
|
else:
|
||||||
new_backfill_events = []
|
new_backfill_events = []
|
||||||
|
backward_ex_outliers = []
|
||||||
|
|
||||||
return (new_forward_events, new_backfill_events)
|
return AllNewEventsResult(
|
||||||
|
new_forward_events, new_backfill_events,
|
||||||
|
forward_ex_outliers, backward_ex_outliers,
|
||||||
|
state_resets,
|
||||||
|
)
|
||||||
return self.runInteraction("get_all_new_events", get_all_new_events_txn)
|
return self.runInteraction("get_all_new_events", get_all_new_events_txn)
|
||||||
|
|
||||||
|
|
||||||
|
AllNewEventsResult = namedtuple("AllNewEventsResult", [
|
||||||
|
"new_forward_events", "new_backfill_events",
|
||||||
|
"forward_ex_outliers", "backward_ex_outliers",
|
||||||
|
"state_resets"
|
||||||
|
])
|
||||||
|
@ -68,7 +68,9 @@ class PresenceStore(SQLBaseStore):
|
|||||||
self._update_presence_txn, stream_orderings, presence_states,
|
self._update_presence_txn, stream_orderings, presence_states,
|
||||||
)
|
)
|
||||||
|
|
||||||
defer.returnValue((stream_orderings[-1], self._presence_id_gen.get_max_token()))
|
defer.returnValue((
|
||||||
|
stream_orderings[-1], self._presence_id_gen.get_current_token()
|
||||||
|
))
|
||||||
|
|
||||||
def _update_presence_txn(self, txn, stream_orderings, presence_states):
|
def _update_presence_txn(self, txn, stream_orderings, presence_states):
|
||||||
for stream_id, state in zip(stream_orderings, presence_states):
|
for stream_id, state in zip(stream_orderings, presence_states):
|
||||||
@ -155,7 +157,7 @@ class PresenceStore(SQLBaseStore):
|
|||||||
defer.returnValue([UserPresenceState(**row) for row in rows])
|
defer.returnValue([UserPresenceState(**row) for row in rows])
|
||||||
|
|
||||||
def get_current_presence_token(self):
|
def get_current_presence_token(self):
|
||||||
return self._presence_id_gen.get_max_token()
|
return self._presence_id_gen.get_current_token()
|
||||||
|
|
||||||
def allow_presence_visible(self, observed_localpart, observer_userid):
|
def allow_presence_visible(self, observed_localpart, observer_userid):
|
||||||
return self._simple_insert(
|
return self._simple_insert(
|
||||||
|
@ -392,7 +392,7 @@ class PushRuleStore(SQLBaseStore):
|
|||||||
"""Get the position of the push rules stream.
|
"""Get the position of the push rules stream.
|
||||||
Returns a pair of a stream id for the push_rules stream and the
|
Returns a pair of a stream id for the push_rules stream and the
|
||||||
room stream ordering it corresponds to."""
|
room stream ordering it corresponds to."""
|
||||||
return self._push_rules_stream_id_gen.get_max_token()
|
return self._push_rules_stream_id_gen.get_current_token()
|
||||||
|
|
||||||
def have_push_rules_changed_for_user(self, user_id, last_id):
|
def have_push_rules_changed_for_user(self, user_id, last_id):
|
||||||
if not self.push_rules_stream_cache.has_entity_changed(user_id, last_id):
|
if not self.push_rules_stream_cache.has_entity_changed(user_id, last_id):
|
||||||
|
@ -78,7 +78,7 @@ class PusherStore(SQLBaseStore):
|
|||||||
defer.returnValue(rows)
|
defer.returnValue(rows)
|
||||||
|
|
||||||
def get_pushers_stream_token(self):
|
def get_pushers_stream_token(self):
|
||||||
return self._pushers_id_gen.get_max_token()
|
return self._pushers_id_gen.get_current_token()
|
||||||
|
|
||||||
def get_all_updated_pushers(self, last_id, current_id, limit):
|
def get_all_updated_pushers(self, last_id, current_id, limit):
|
||||||
def get_all_updated_pushers_txn(txn):
|
def get_all_updated_pushers_txn(txn):
|
||||||
|
@ -31,7 +31,7 @@ class ReceiptsStore(SQLBaseStore):
|
|||||||
super(ReceiptsStore, self).__init__(hs)
|
super(ReceiptsStore, self).__init__(hs)
|
||||||
|
|
||||||
self._receipts_stream_cache = StreamChangeCache(
|
self._receipts_stream_cache = StreamChangeCache(
|
||||||
"ReceiptsRoomChangeCache", self._receipts_id_gen.get_max_token()
|
"ReceiptsRoomChangeCache", self._receipts_id_gen.get_current_token()
|
||||||
)
|
)
|
||||||
|
|
||||||
@cached(num_args=2)
|
@cached(num_args=2)
|
||||||
@ -221,7 +221,7 @@ class ReceiptsStore(SQLBaseStore):
|
|||||||
defer.returnValue(results)
|
defer.returnValue(results)
|
||||||
|
|
||||||
def get_max_receipt_stream_id(self):
|
def get_max_receipt_stream_id(self):
|
||||||
return self._receipts_id_gen.get_max_token()
|
return self._receipts_id_gen.get_current_token()
|
||||||
|
|
||||||
def insert_linearized_receipt_txn(self, txn, room_id, receipt_type,
|
def insert_linearized_receipt_txn(self, txn, room_id, receipt_type,
|
||||||
user_id, event_id, data, stream_id):
|
user_id, event_id, data, stream_id):
|
||||||
@ -346,7 +346,7 @@ class ReceiptsStore(SQLBaseStore):
|
|||||||
room_id, receipt_type, user_id, event_ids, data
|
room_id, receipt_type, user_id, event_ids, data
|
||||||
)
|
)
|
||||||
|
|
||||||
max_persisted_id = self._stream_id_gen.get_max_token()
|
max_persisted_id = self._stream_id_gen.get_current_token()
|
||||||
|
|
||||||
defer.returnValue((stream_id, max_persisted_id))
|
defer.returnValue((stream_id, max_persisted_id))
|
||||||
|
|
||||||
|
@ -458,12 +458,15 @@ class RegistrationStore(SQLBaseStore):
|
|||||||
"""
|
"""
|
||||||
Gets the 3pid's guest access token if exists, else saves access_token.
|
Gets the 3pid's guest access token if exists, else saves access_token.
|
||||||
|
|
||||||
:param medium (str): Medium of the 3pid. Must be "email".
|
Args:
|
||||||
:param address (str): 3pid address.
|
medium (str): Medium of the 3pid. Must be "email".
|
||||||
:param access_token (str): The access token to persist if none is
|
address (str): 3pid address.
|
||||||
already persisted.
|
access_token (str): The access token to persist if none is
|
||||||
:param inviter_user_id (str): User ID of the inviter.
|
already persisted.
|
||||||
:return (deferred str): Whichever access token is persisted at the end
|
inviter_user_id (str): User ID of the inviter.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
deferred str: Whichever access token is persisted at the end
|
||||||
of this function call.
|
of this function call.
|
||||||
"""
|
"""
|
||||||
def insert(txn):
|
def insert(txn):
|
||||||
|
38
synapse/storage/schema/delta/30/state_stream.sql
Normal file
38
synapse/storage/schema/delta/30/state_stream.sql
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
/* Copyright 2016 OpenMarket Ltd
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The positions in the event stream_ordering when the current_state was
|
||||||
|
* replaced by the state at the event.
|
||||||
|
*/
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS current_state_resets(
|
||||||
|
event_stream_ordering BIGINT PRIMARY KEY NOT NULL
|
||||||
|
);
|
||||||
|
|
||||||
|
/* The outlier events that have aquired a state group typically through
|
||||||
|
* backfill. This is tracked separately to the events table, as assigning a
|
||||||
|
* state group change the position of the existing event in the stream
|
||||||
|
* ordering.
|
||||||
|
* However since a stream_ordering is assigned in persist_event for the
|
||||||
|
* (event, state) pair, we can use that stream_ordering to identify when
|
||||||
|
* the new state was assigned for the event.
|
||||||
|
*/
|
||||||
|
CREATE TABLE IF NOT EXISTS ex_outlier_stream(
|
||||||
|
event_stream_ordering BIGINT PRIMARY KEY NOT NULL,
|
||||||
|
event_id TEXT NOT NULL,
|
||||||
|
state_group BIGINT NOT NULL
|
||||||
|
);
|
@ -64,12 +64,12 @@ class StateStore(SQLBaseStore):
|
|||||||
for group, state_map in group_to_state.items()
|
for group, state_map in group_to_state.items()
|
||||||
})
|
})
|
||||||
|
|
||||||
def _store_state_groups_txn(self, txn, event, context):
|
|
||||||
return self._store_mult_state_groups_txn(txn, [(event, context)])
|
|
||||||
|
|
||||||
def _store_mult_state_groups_txn(self, txn, events_and_contexts):
|
def _store_mult_state_groups_txn(self, txn, events_and_contexts):
|
||||||
state_groups = {}
|
state_groups = {}
|
||||||
for event, context in events_and_contexts:
|
for event, context in events_and_contexts:
|
||||||
|
if event.internal_metadata.is_outlier():
|
||||||
|
continue
|
||||||
|
|
||||||
if context.current_state is None:
|
if context.current_state is None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@ -82,7 +82,8 @@ class StateStore(SQLBaseStore):
|
|||||||
if event.is_state():
|
if event.is_state():
|
||||||
state_events[(event.type, event.state_key)] = event
|
state_events[(event.type, event.state_key)] = event
|
||||||
|
|
||||||
state_group = self._state_groups_id_gen.get_next()
|
state_group = context.new_state_group_id
|
||||||
|
|
||||||
self._simple_insert_txn(
|
self._simple_insert_txn(
|
||||||
txn,
|
txn,
|
||||||
table="state_groups",
|
table="state_groups",
|
||||||
@ -114,11 +115,10 @@ class StateStore(SQLBaseStore):
|
|||||||
table="event_to_state_groups",
|
table="event_to_state_groups",
|
||||||
values=[
|
values=[
|
||||||
{
|
{
|
||||||
"state_group": state_groups[event.event_id],
|
"state_group": state_group_id,
|
||||||
"event_id": event.event_id,
|
"event_id": event_id,
|
||||||
}
|
}
|
||||||
for event, context in events_and_contexts
|
for event_id, state_group_id in state_groups.items()
|
||||||
if context.current_state is not None
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -249,11 +249,14 @@ class StateStore(SQLBaseStore):
|
|||||||
"""
|
"""
|
||||||
Get the state dict corresponding to a particular event
|
Get the state dict corresponding to a particular event
|
||||||
|
|
||||||
:param str event_id: event whose state should be returned
|
Args:
|
||||||
:param list[(str, str)]|None types: List of (type, state_key) tuples
|
event_id(str): event whose state should be returned
|
||||||
which are used to filter the state fetched. May be None, which
|
types(list[(str, str)]|None): List of (type, state_key) tuples
|
||||||
matches any key
|
which are used to filter the state fetched. May be None, which
|
||||||
:return: a deferred dict from (type, state_key) -> state_event
|
matches any key
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A deferred dict from (type, state_key) -> state_event
|
||||||
"""
|
"""
|
||||||
state_map = yield self.get_state_for_events([event_id], types)
|
state_map = yield self.get_state_for_events([event_id], types)
|
||||||
defer.returnValue(state_map[event_id])
|
defer.returnValue(state_map[event_id])
|
||||||
@ -429,3 +432,33 @@ class StateStore(SQLBaseStore):
|
|||||||
}
|
}
|
||||||
|
|
||||||
defer.returnValue(results)
|
defer.returnValue(results)
|
||||||
|
|
||||||
|
def get_all_new_state_groups(self, last_id, current_id, limit):
|
||||||
|
def get_all_new_state_groups_txn(txn):
|
||||||
|
sql = (
|
||||||
|
"SELECT id, room_id, event_id FROM state_groups"
|
||||||
|
" WHERE ? < id AND id <= ? ORDER BY id LIMIT ?"
|
||||||
|
)
|
||||||
|
txn.execute(sql, (last_id, current_id, limit))
|
||||||
|
groups = txn.fetchall()
|
||||||
|
|
||||||
|
if not groups:
|
||||||
|
return ([], [])
|
||||||
|
|
||||||
|
lower_bound = groups[0][0]
|
||||||
|
upper_bound = groups[-1][0]
|
||||||
|
sql = (
|
||||||
|
"SELECT state_group, type, state_key, event_id"
|
||||||
|
" FROM state_groups_state"
|
||||||
|
" WHERE ? <= state_group AND state_group <= ?"
|
||||||
|
)
|
||||||
|
|
||||||
|
txn.execute(sql, (lower_bound, upper_bound))
|
||||||
|
state_group_state = txn.fetchall()
|
||||||
|
return (groups, state_group_state)
|
||||||
|
return self.runInteraction(
|
||||||
|
"get_all_new_state_groups", get_all_new_state_groups_txn
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_state_stream_token(self):
|
||||||
|
return self._state_groups_id_gen.get_current_token()
|
||||||
|
@ -539,7 +539,7 @@ class StreamStore(SQLBaseStore):
|
|||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_room_events_max_id(self, direction='f'):
|
def get_room_events_max_id(self, direction='f'):
|
||||||
token = yield self._stream_id_gen.get_max_token()
|
token = yield self._stream_id_gen.get_current_token()
|
||||||
if direction != 'b':
|
if direction != 'b':
|
||||||
defer.returnValue("s%d" % (token,))
|
defer.returnValue("s%d" % (token,))
|
||||||
else:
|
else:
|
||||||
|
@ -30,7 +30,7 @@ class TagsStore(SQLBaseStore):
|
|||||||
Returns:
|
Returns:
|
||||||
A deferred int.
|
A deferred int.
|
||||||
"""
|
"""
|
||||||
return self._account_data_id_gen.get_max_token()
|
return self._account_data_id_gen.get_current_token()
|
||||||
|
|
||||||
@cached()
|
@cached()
|
||||||
def get_tags_for_user(self, user_id):
|
def get_tags_for_user(self, user_id):
|
||||||
@ -200,7 +200,7 @@ class TagsStore(SQLBaseStore):
|
|||||||
|
|
||||||
self.get_tags_for_user.invalidate((user_id,))
|
self.get_tags_for_user.invalidate((user_id,))
|
||||||
|
|
||||||
result = self._account_data_id_gen.get_max_token()
|
result = self._account_data_id_gen.get_current_token()
|
||||||
defer.returnValue(result)
|
defer.returnValue(result)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@ -222,7 +222,7 @@ class TagsStore(SQLBaseStore):
|
|||||||
|
|
||||||
self.get_tags_for_user.invalidate((user_id,))
|
self.get_tags_for_user.invalidate((user_id,))
|
||||||
|
|
||||||
result = self._account_data_id_gen.get_max_token()
|
result = self._account_data_id_gen.get_current_token()
|
||||||
defer.returnValue(result)
|
defer.returnValue(result)
|
||||||
|
|
||||||
def _update_revision_txn(self, txn, user_id, room_id, next_id):
|
def _update_revision_txn(self, txn, user_id, room_id, next_id):
|
||||||
|
@ -21,7 +21,7 @@ import threading
|
|||||||
class IdGenerator(object):
|
class IdGenerator(object):
|
||||||
def __init__(self, db_conn, table, column):
|
def __init__(self, db_conn, table, column):
|
||||||
self._lock = threading.Lock()
|
self._lock = threading.Lock()
|
||||||
self._next_id = _load_max_id(db_conn, table, column)
|
self._next_id = _load_current_id(db_conn, table, column)
|
||||||
|
|
||||||
def get_next(self):
|
def get_next(self):
|
||||||
with self._lock:
|
with self._lock:
|
||||||
@ -29,12 +29,16 @@ class IdGenerator(object):
|
|||||||
return self._next_id
|
return self._next_id
|
||||||
|
|
||||||
|
|
||||||
def _load_max_id(db_conn, table, column):
|
def _load_current_id(db_conn, table, column, step=1):
|
||||||
cur = db_conn.cursor()
|
cur = db_conn.cursor()
|
||||||
cur.execute("SELECT MAX(%s) FROM %s" % (column, table,))
|
if step == 1:
|
||||||
|
cur.execute("SELECT MAX(%s) FROM %s" % (column, table,))
|
||||||
|
else:
|
||||||
|
cur.execute("SELECT MIN(%s) FROM %s" % (column, table,))
|
||||||
val, = cur.fetchone()
|
val, = cur.fetchone()
|
||||||
cur.close()
|
cur.close()
|
||||||
return int(val) if val else 1
|
current_id = int(val) if val else step
|
||||||
|
return (max if step > 0 else min)(current_id, step)
|
||||||
|
|
||||||
|
|
||||||
class StreamIdGenerator(object):
|
class StreamIdGenerator(object):
|
||||||
@ -45,17 +49,32 @@ class StreamIdGenerator(object):
|
|||||||
all ids less than or equal to it have completed. This handles the fact that
|
all ids less than or equal to it have completed. This handles the fact that
|
||||||
persistence of events can complete out of order.
|
persistence of events can complete out of order.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
db_conn(connection): A database connection to use to fetch the
|
||||||
|
initial value of the generator from.
|
||||||
|
table(str): A database table to read the initial value of the id
|
||||||
|
generator from.
|
||||||
|
column(str): The column of the database table to read the initial
|
||||||
|
value from the id generator from.
|
||||||
|
extra_tables(list): List of pairs of database tables and columns to
|
||||||
|
use to source the initial value of the generator from. The value
|
||||||
|
with the largest magnitude is used.
|
||||||
|
step(int): which direction the stream ids grow in. +1 to grow
|
||||||
|
upwards, -1 to grow downwards.
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
with stream_id_gen.get_next() as stream_id:
|
with stream_id_gen.get_next() as stream_id:
|
||||||
# ... persist event ...
|
# ... persist event ...
|
||||||
"""
|
"""
|
||||||
def __init__(self, db_conn, table, column, extra_tables=[]):
|
def __init__(self, db_conn, table, column, extra_tables=[], step=1):
|
||||||
|
assert step != 0
|
||||||
self._lock = threading.Lock()
|
self._lock = threading.Lock()
|
||||||
self._current_max = _load_max_id(db_conn, table, column)
|
self._step = step
|
||||||
|
self._current = _load_current_id(db_conn, table, column, step)
|
||||||
for table, column in extra_tables:
|
for table, column in extra_tables:
|
||||||
self._current_max = max(
|
self._current = (max if step > 0 else min)(
|
||||||
self._current_max,
|
self._current,
|
||||||
_load_max_id(db_conn, table, column)
|
_load_current_id(db_conn, table, column, step)
|
||||||
)
|
)
|
||||||
self._unfinished_ids = deque()
|
self._unfinished_ids = deque()
|
||||||
|
|
||||||
@ -66,8 +85,8 @@ class StreamIdGenerator(object):
|
|||||||
# ... persist event ...
|
# ... persist event ...
|
||||||
"""
|
"""
|
||||||
with self._lock:
|
with self._lock:
|
||||||
self._current_max += 1
|
self._current += self._step
|
||||||
next_id = self._current_max
|
next_id = self._current
|
||||||
|
|
||||||
self._unfinished_ids.append(next_id)
|
self._unfinished_ids.append(next_id)
|
||||||
|
|
||||||
@ -88,8 +107,12 @@ class StreamIdGenerator(object):
|
|||||||
# ... persist events ...
|
# ... persist events ...
|
||||||
"""
|
"""
|
||||||
with self._lock:
|
with self._lock:
|
||||||
next_ids = range(self._current_max + 1, self._current_max + n + 1)
|
next_ids = range(
|
||||||
self._current_max += n
|
self._current + self._step,
|
||||||
|
self._current + self._step * (n + 1),
|
||||||
|
self._step
|
||||||
|
)
|
||||||
|
self._current += n
|
||||||
|
|
||||||
for next_id in next_ids:
|
for next_id in next_ids:
|
||||||
self._unfinished_ids.append(next_id)
|
self._unfinished_ids.append(next_id)
|
||||||
@ -105,15 +128,15 @@ class StreamIdGenerator(object):
|
|||||||
|
|
||||||
return manager()
|
return manager()
|
||||||
|
|
||||||
def get_max_token(self):
|
def get_current_token(self):
|
||||||
"""Returns the maximum stream id such that all stream ids less than or
|
"""Returns the maximum stream id such that all stream ids less than or
|
||||||
equal to it have been successfully persisted.
|
equal to it have been successfully persisted.
|
||||||
"""
|
"""
|
||||||
with self._lock:
|
with self._lock:
|
||||||
if self._unfinished_ids:
|
if self._unfinished_ids:
|
||||||
return self._unfinished_ids[0] - 1
|
return self._unfinished_ids[0] - self._step
|
||||||
|
|
||||||
return self._current_max
|
return self._current
|
||||||
|
|
||||||
|
|
||||||
class ChainedIdGenerator(object):
|
class ChainedIdGenerator(object):
|
||||||
@ -125,7 +148,7 @@ class ChainedIdGenerator(object):
|
|||||||
def __init__(self, chained_generator, db_conn, table, column):
|
def __init__(self, chained_generator, db_conn, table, column):
|
||||||
self.chained_generator = chained_generator
|
self.chained_generator = chained_generator
|
||||||
self._lock = threading.Lock()
|
self._lock = threading.Lock()
|
||||||
self._current_max = _load_max_id(db_conn, table, column)
|
self._current_max = _load_current_id(db_conn, table, column)
|
||||||
self._unfinished_ids = deque()
|
self._unfinished_ids = deque()
|
||||||
|
|
||||||
def get_next(self):
|
def get_next(self):
|
||||||
@ -137,7 +160,7 @@ class ChainedIdGenerator(object):
|
|||||||
with self._lock:
|
with self._lock:
|
||||||
self._current_max += 1
|
self._current_max += 1
|
||||||
next_id = self._current_max
|
next_id = self._current_max
|
||||||
chained_id = self.chained_generator.get_max_token()
|
chained_id = self.chained_generator.get_current_token()
|
||||||
|
|
||||||
self._unfinished_ids.append((next_id, chained_id))
|
self._unfinished_ids.append((next_id, chained_id))
|
||||||
|
|
||||||
@ -151,7 +174,7 @@ class ChainedIdGenerator(object):
|
|||||||
|
|
||||||
return manager()
|
return manager()
|
||||||
|
|
||||||
def get_max_token(self):
|
def get_current_token(self):
|
||||||
"""Returns the maximum stream id such that all stream ids less than or
|
"""Returns the maximum stream id such that all stream ids less than or
|
||||||
equal to it have been successfully persisted.
|
equal to it have been successfully persisted.
|
||||||
"""
|
"""
|
||||||
@ -160,4 +183,4 @@ class ChainedIdGenerator(object):
|
|||||||
stream_id, chained_id = self._unfinished_ids[0]
|
stream_id, chained_id = self._unfinished_ids[0]
|
||||||
return (stream_id - 1, chained_id)
|
return (stream_id - 1, chained_id)
|
||||||
|
|
||||||
return (self._current_max, self.chained_generator.get_max_token())
|
return (self._current_max, self.chained_generator.get_current_token())
|
||||||
|
@ -16,7 +16,8 @@
|
|||||||
|
|
||||||
from twisted.internet import defer, reactor
|
from twisted.internet import defer, reactor
|
||||||
|
|
||||||
from .logcontext import PreserveLoggingContext
|
from .logcontext import PreserveLoggingContext, preserve_fn
|
||||||
|
from synapse.util import unwrapFirstError
|
||||||
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@ -107,3 +108,32 @@ class ObservableDeferred(object):
|
|||||||
return "<ObservableDeferred object at %s, result=%r, _deferred=%r>" % (
|
return "<ObservableDeferred object at %s, result=%r, _deferred=%r>" % (
|
||||||
id(self), self._result, self._deferred,
|
id(self), self._result, self._deferred,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def concurrently_execute(func, args, limit):
|
||||||
|
"""Executes the function with each argument conncurrently while limiting
|
||||||
|
the number of concurrent executions.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
func (func): Function to execute, should return a deferred.
|
||||||
|
args (list): List of arguments to pass to func, each invocation of func
|
||||||
|
gets a signle argument.
|
||||||
|
limit (int): Maximum number of conccurent executions.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
deferred: Resolved when all function invocations have finished.
|
||||||
|
"""
|
||||||
|
it = iter(args)
|
||||||
|
|
||||||
|
@defer.inlineCallbacks
|
||||||
|
def _concurrently_execute_inner():
|
||||||
|
try:
|
||||||
|
while True:
|
||||||
|
yield func(it.next())
|
||||||
|
except StopIteration:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return defer.gatherResults([
|
||||||
|
preserve_fn(_concurrently_execute_inner)()
|
||||||
|
for _ in xrange(limit)
|
||||||
|
], consumeErrors=True).addErrback(unwrapFirstError)
|
||||||
|
@ -58,15 +58,21 @@ class ReplicationResourceCase(unittest.TestCase):
|
|||||||
self.assertEquals(body, {})
|
self.assertEquals(body, {})
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def test_events(self):
|
def test_events_and_state(self):
|
||||||
get = self.get(events="-1", timeout="0")
|
get = self.get(events="-1", state="-1", timeout="0")
|
||||||
yield self.hs.get_handlers().room_creation_handler.create_room(
|
yield self.hs.get_handlers().room_creation_handler.create_room(
|
||||||
Requester(self.user, "", False), {}
|
Requester(self.user, "", False), {}
|
||||||
)
|
)
|
||||||
code, body = yield get
|
code, body = yield get
|
||||||
self.assertEquals(code, 200)
|
self.assertEquals(code, 200)
|
||||||
self.assertEquals(body["events"]["field_names"], [
|
self.assertEquals(body["events"]["field_names"], [
|
||||||
"position", "internal", "json"
|
"position", "internal", "json", "state_group"
|
||||||
|
])
|
||||||
|
self.assertEquals(body["state_groups"]["field_names"], [
|
||||||
|
"position", "room_id", "event_id"
|
||||||
|
])
|
||||||
|
self.assertEquals(body["state_group_state"]["field_names"], [
|
||||||
|
"position", "type", "state_key", "event_id"
|
||||||
])
|
])
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
@ -132,6 +138,7 @@ class ReplicationResourceCase(unittest.TestCase):
|
|||||||
test_timeout_backfill = _test_timeout("backfill")
|
test_timeout_backfill = _test_timeout("backfill")
|
||||||
test_timeout_push_rules = _test_timeout("push_rules")
|
test_timeout_push_rules = _test_timeout("push_rules")
|
||||||
test_timeout_pushers = _test_timeout("pushers")
|
test_timeout_pushers = _test_timeout("pushers")
|
||||||
|
test_timeout_state = _test_timeout("state")
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def send_text_message(self, room_id, message):
|
def send_text_message(self, room_id, message):
|
||||||
@ -182,4 +189,21 @@ class ReplicationResourceCase(unittest.TestCase):
|
|||||||
)
|
)
|
||||||
response_body = json.loads(response_json)
|
response_body = json.loads(response_json)
|
||||||
|
|
||||||
|
if response_code == 200:
|
||||||
|
self.check_response(response_body)
|
||||||
|
|
||||||
defer.returnValue((response_code, response_body))
|
defer.returnValue((response_code, response_body))
|
||||||
|
|
||||||
|
def check_response(self, response_body):
|
||||||
|
for name, stream in response_body.items():
|
||||||
|
self.assertIn("field_names", stream)
|
||||||
|
field_names = stream["field_names"]
|
||||||
|
self.assertIn("rows", stream)
|
||||||
|
self.assertTrue(stream["rows"])
|
||||||
|
for row in stream["rows"]:
|
||||||
|
self.assertEquals(
|
||||||
|
len(row), len(field_names),
|
||||||
|
"%s: len(row = %r) == len(field_names = %r)" % (
|
||||||
|
name, row, field_names
|
||||||
|
)
|
||||||
|
)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user