Merge branch 'develop' of github.com:matrix-org/synapse into release-v0.18.0

This commit is contained in:
Erik Johnston 2016-09-19 17:15:24 +01:00
commit ddfcdd4778
5 changed files with 194 additions and 118 deletions

View File

@ -42,6 +42,7 @@ The current available worker applications are:
* synapse.app.appservice - handles output traffic to Application Services * synapse.app.appservice - handles output traffic to Application Services
* synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API) * synapse.app.federation_reader - handles receiving federation traffic (including public_rooms API)
* synapse.app.media_repository - handles the media repository. * synapse.app.media_repository - handles the media repository.
* synapse.app.client_reader - handles client API endpoints like /publicRooms
Each worker configuration file inherits the configuration of the main homeserver Each worker configuration file inherits the configuration of the main homeserver
configuration file. You can then override configuration specific to that worker, configuration file. You can then override configuration specific to that worker,

View File

@ -51,7 +51,7 @@ import sys
import logging import logging
import gc import gc
logger = logging.getLogger("synapse.app.federation_reader") logger = logging.getLogger("synapse.app.client_reader")
class ClientReaderSlavedStore( class ClientReaderSlavedStore(

View File

@ -115,32 +115,122 @@ class RoomListHandler(BaseHandler):
sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1]) sorted_entries = sorted(rooms_to_order_value.items(), key=lambda e: e[1])
sorted_rooms = [room_id for room_id, _ in sorted_entries] sorted_rooms = [room_id for room_id, _ in sorted_entries]
# `sorted_rooms` should now be a list of all public room ids that is
# stable across pagination. Therefore, we can use indices into this
# list as our pagination tokens.
# Filter out rooms that we don't want to return
rooms_to_scan = [
r for r in sorted_rooms
if r not in newly_unpublished and rooms_to_num_joined[room_id] > 0
]
if since_token: if since_token:
# Filter out rooms we've already returned previously
# `since_token.current_limit` is the index of the last room we
# sent down, so we exclude it and everything before/after it.
if since_token.direction_is_forward: if since_token.direction_is_forward:
sorted_rooms = sorted_rooms[since_token.current_limit:] rooms_to_scan = rooms_to_scan[since_token.current_limit + 1:]
else: else:
sorted_rooms = sorted_rooms[:since_token.current_limit] rooms_to_scan = rooms_to_scan[:since_token.current_limit]
sorted_rooms.reverse() rooms_to_scan.reverse()
rooms_to_scan = sorted_rooms
if limit and not search_filter:
rooms_to_scan = sorted_rooms[:limit + 1]
# Actually generate the entries. _generate_room_entry will append to
# chunk but will stop if len(chunk) > limit
chunk = [] chunk = []
if limit and not search_filter:
step = limit + 1
for i in xrange(0, len(rooms_to_scan), step):
# We iterate here because the vast majority of cases we'll stop
# at first iteration, but occaisonally _generate_room_entry
# won't append to the chunk and so we need to loop again.
# We don't want to scan over the entire range either as that
# would potentially waste a lot of work.
yield concurrently_execute(
lambda r: self._generate_room_entry(
r, rooms_to_num_joined[r],
chunk, limit, search_filter
),
rooms_to_scan[i:i + step], 10
)
if len(chunk) >= limit + 1:
break
else:
yield concurrently_execute(
lambda r: self._generate_room_entry(
r, rooms_to_num_joined[r],
chunk, limit, search_filter
),
rooms_to_scan, 5
)
chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"]))
# Work out the new limit of the batch for pagination, or None if we
# know there are no more results that would be returned.
# i.e., [since_token.current_limit..new_limit] is the batch of rooms
# we've returned (or the reverse if we paginated backwards)
# We tried to pull out limit + 1 rooms above, so if we have <= limit
# then we know there are no more results to return
new_limit = None
if chunk and (not limit or len(chunk) > limit):
if not since_token or since_token.direction_is_forward:
if limit:
chunk = chunk[:limit]
last_room_id = chunk[-1]["room_id"]
else:
if limit:
chunk = chunk[-limit:]
last_room_id = chunk[0]["room_id"]
new_limit = sorted_rooms.index(last_room_id)
results = {
"chunk": chunk,
}
if since_token:
results["new_rooms"] = bool(newly_visible)
if not since_token or since_token.direction_is_forward:
if new_limit is not None:
results["next_batch"] = RoomListNextBatch(
stream_ordering=stream_token,
public_room_stream_id=public_room_stream_id,
current_limit=new_limit,
direction_is_forward=True,
).to_token()
if since_token:
results["prev_batch"] = since_token.copy_and_replace(
direction_is_forward=False,
current_limit=since_token.current_limit + 1,
).to_token()
else:
if new_limit is not None:
results["prev_batch"] = RoomListNextBatch(
stream_ordering=stream_token,
public_room_stream_id=public_room_stream_id,
current_limit=new_limit,
direction_is_forward=False,
).to_token()
if since_token:
results["next_batch"] = since_token.copy_and_replace(
direction_is_forward=True,
current_limit=since_token.current_limit - 1,
).to_token()
defer.returnValue(results)
@defer.inlineCallbacks @defer.inlineCallbacks
def handle_room(room_id): def _generate_room_entry(self, room_id, num_joined_users, chunk, limit,
search_filter):
if limit and len(chunk) > limit + 1: if limit and len(chunk) > limit + 1:
# We've already got enough, so lets just drop it. # We've already got enough, so lets just drop it.
return return
num_joined_users = rooms_to_num_joined[room_id]
if num_joined_users == 0:
return
if room_id in newly_unpublished:
return
result = { result = {
"room_id": room_id, "room_id": room_id,
"num_joined_members": num_joined_users, "num_joined_members": num_joined_users,
@ -216,70 +306,6 @@ class RoomListHandler(BaseHandler):
if _matches_room_entry(result, search_filter): if _matches_room_entry(result, search_filter):
chunk.append(result) chunk.append(result)
yield concurrently_execute(handle_room, rooms_to_scan, 10)
chunk.sort(key=lambda e: (-e["num_joined_members"], e["room_id"]))
# Work out the new limit of the batch for pagination, or None if we
# know there are no more results that would be returned.
new_limit = None
if chunk and (not limit or len(chunk) > limit):
if limit:
chunk = chunk[:limit]
addition = 1
if since_token:
addition += since_token.current_limit
if not since_token or since_token.direction_is_forward:
last_room_id = chunk[-1]["room_id"]
else:
last_room_id = chunk[0]["room_id"]
addition *= -1
try:
new_limit = sorted_rooms.index(last_room_id) + addition
if new_limit >= len(sorted_rooms):
new_limit = None
except ValueError:
pass
results = {
"chunk": chunk,
}
if since_token:
results["new_rooms"] = bool(newly_visible)
if not since_token or since_token.direction_is_forward:
if new_limit:
results["next_batch"] = RoomListNextBatch(
stream_ordering=stream_token,
public_room_stream_id=public_room_stream_id,
current_limit=new_limit,
direction_is_forward=True,
).to_token()
if since_token:
results["prev_batch"] = since_token.copy_and_replace(
direction_is_forward=False,
).to_token()
else:
if new_limit:
results["prev_batch"] = RoomListNextBatch(
stream_ordering=stream_token,
public_room_stream_id=public_room_stream_id,
current_limit=new_limit,
direction_is_forward=False,
).to_token()
if since_token:
results["next_batch"] = since_token.copy_and_replace(
direction_is_forward=True,
).to_token()
defer.returnValue(results)
@defer.inlineCallbacks @defer.inlineCallbacks
def get_remote_public_room_list(self, server_name, limit=None, since_token=None, def get_remote_public_room_list(self, server_name, limit=None, since_token=None,
search_filter=None): search_filter=None):
@ -361,12 +387,12 @@ class RoomListNextBatch(namedtuple("RoomListNextBatch", (
def _matches_room_entry(room_entry, search_filter): def _matches_room_entry(room_entry, search_filter):
if search_filter and search_filter.get("generic_search_term", None): if search_filter and search_filter.get("generic_search_term", None):
generic_search_term = search_filter["generic_search_term"] generic_search_term = search_filter["generic_search_term"].upper()
if generic_search_term in room_entry.get("name", ""): if generic_search_term in room_entry.get("name", "").upper():
return True return True
elif generic_search_term in room_entry.get("topic", ""): elif generic_search_term in room_entry.get("topic", "").upper():
return True return True
elif generic_search_term in room_entry.get("canonical_alias", ""): elif generic_search_term in room_entry.get("canonical_alias", "").upper():
return True return True
else: else:
return True return True

View File

@ -263,6 +263,8 @@ BASE_APPEND_UNDERRIDE_RULES = [
} }
] ]
}, },
# XXX: once m.direct is standardised everywhere, we should use it to detect
# a DM from the user's perspective rather than this heuristic.
{ {
'rule_id': 'global/underride/.m.rule.room_one_to_one', 'rule_id': 'global/underride/.m.rule.room_one_to_one',
'conditions': [ 'conditions': [
@ -289,6 +291,34 @@ BASE_APPEND_UNDERRIDE_RULES = [
} }
] ]
}, },
# XXX: this is going to fire for events which aren't m.room.messages
# but are encrypted (e.g. m.call.*)...
{
'rule_id': 'global/underride/.m.rule.encrypted_room_one_to_one',
'conditions': [
{
'kind': 'room_member_count',
'is': '2',
'_id': 'member_count',
},
{
'kind': 'event_match',
'key': 'type',
'pattern': 'm.room.encrypted',
'_id': '_encrypted',
}
],
'actions': [
'notify',
{
'set_tweak': 'sound',
'value': 'default'
}, {
'set_tweak': 'highlight',
'value': False
}
]
},
{ {
'rule_id': 'global/underride/.m.rule.message', 'rule_id': 'global/underride/.m.rule.message',
'conditions': [ 'conditions': [
@ -305,6 +335,25 @@ BASE_APPEND_UNDERRIDE_RULES = [
'value': False 'value': False
} }
] ]
},
# XXX: this is going to fire for events which aren't m.room.messages
# but are encrypted (e.g. m.call.*)...
{
'rule_id': 'global/underride/.m.rule.encrypted',
'conditions': [
{
'kind': 'event_match',
'key': 'type',
'pattern': 'm.room.encrypted',
'_id': '_encrypted',
}
],
'actions': [
'notify', {
'set_tweak': 'highlight',
'value': False
}
]
} }
] ]

View File

@ -307,7 +307,7 @@ class PublicRoomListRestServlet(ClientV1RestServlet):
server = parse_string(request, "server", default=None) server = parse_string(request, "server", default=None)
try: try:
yield self.auth.get_user_by_req(request) yield self.auth.get_user_by_req(request, allow_guest=True)
except AuthError as e: except AuthError as e:
# We allow people to not be authed if they're just looking at our # We allow people to not be authed if they're just looking at our
# room list, but require auth when we proxy the request. # room list, but require auth when we proxy the request.
@ -339,7 +339,7 @@ class PublicRoomListRestServlet(ClientV1RestServlet):
@defer.inlineCallbacks @defer.inlineCallbacks
def on_POST(self, request): def on_POST(self, request):
yield self.auth.get_user_by_req(request) yield self.auth.get_user_by_req(request, allow_guest=True)
server = parse_string(request, "server", default=None) server = parse_string(request, "server", default=None)
content = parse_json_object_from_request(request) content = parse_json_object_from_request(request)