Merge branch 'release-v0.99.0' into develop

This commit is contained in:
Richard van der Hoff 2019-01-31 18:43:20 +00:00
commit 625385d684
6 changed files with 103 additions and 13 deletions

View File

@ -1,3 +1,24 @@
Synapse 0.99.0rc3 (2019-01-31)
==============================
Bugfixes
--------
- Fix infinite loop when an event is redacted in a v3 room ([\#4535](https://github.com/matrix-org/synapse/issues/4535))
Improved Documentation
----------------------
- Update debian installation instructions ([\#4526](https://github.com/matrix-org/synapse/issues/4526))
Internal Changes
----------------
- Add some debug for membership syncing issues ([\#4538](https://github.com/matrix-org/synapse/issues/4538))
Synapse 0.99.0rc2 (2019-01-30)
==============================

View File

@ -333,12 +333,38 @@ https://developer.github.com/changes/2014-04-25-user-content-security for more d
Platform-Specific Instructions
==============================
Debian
------
Debian/Ubuntu
-------------
Matrix provides official Debian packages via apt from https://matrix.org/packages/debian/.
Note that these packages do not include a client - choose one from
https://matrix.org/docs/projects/try-matrix-now.html (or build your own with one of our SDKs :)
Matrix.org packages
~~~~~~~~~~~~~~~~~~~
Matrix.org provides Debian/Ubuntu packages of the latest stable version of
Synapse via https://matrix.org/packages/debian/. To use them::
sudo apt install -y lsb-release curl apt-transport-https
echo "deb https://matrix.org/packages/debian `lsb_release -cs` main" |
sudo tee /etc/apt/sources.list.d/matrix-org.list
curl "https://matrix.org/packages/debian/repo-key.asc" |
sudo apt-key add -
sudo apt update
sudo apt install matrix-synapse-py3
Downstream Debian/Ubuntu packages
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
For ``buster`` and ``sid``, Synapse is available in the Debian repositories and
it should be possible to install it with simply::
sudo apt install matrix-synapse
There is also a version of ``matrix-synapse`` in ``stretch-backports``. Please
see the `Debian documentation on backports
<https://backports.debian.org/Instructions/>`_ for information on how to use
them.
We do not recommend using the packages in downstream Ubuntu at this time, as
they are old and suffer from known security vulnerabilities.
Fedora
------

View File

@ -6,8 +6,10 @@ To use it, first install prometheus by following the instructions at
http://prometheus.io/
### for Prometheus v1
Add a new job to the main prometheus.conf file:
```yaml
job: {
name: "synapse"
@ -15,10 +17,12 @@ Add a new job to the main prometheus.conf file:
target: "http://SERVER.LOCATION.HERE:PORT/_synapse/metrics"
}
}
```
### for Prometheus v2
Add a new job to the main prometheus.yml file:
```yaml
- job_name: "synapse"
metrics_path: "/_synapse/metrics"
# when endpoint uses https:
@ -26,11 +30,14 @@ Add a new job to the main prometheus.yml file:
static_configs:
- targets: ['SERVER.LOCATION:PORT']
```
To use `synapse.rules` add
```yaml
rule_files:
- "/PATH/TO/synapse-v2.rules"
```
Metrics are disabled by default when running synapse; they must be enabled
with the 'enable-metrics' option, either in the synapse config file or as a

View File

@ -27,4 +27,4 @@ try:
except ImportError:
pass
__version__ = "0.99.0rc2"
__version__ = "0.99.0rc3"

View File

@ -895,14 +895,17 @@ class SyncHandler(object):
Returns:
Deferred(SyncResult)
"""
logger.info("Calculating sync response for %r", sync_config.user)
# NB: The now_token gets changed by some of the generate_sync_* methods,
# this is due to some of the underlying streams not supporting the ability
# to query up to a given point.
# Always use the `now_token` in `SyncResultBuilder`
now_token = yield self.event_sources.get_current_token()
logger.info(
"Calculating sync response for %r between %s and %s",
sync_config.user, since_token, now_token,
)
user_id = sync_config.user.to_string()
app_service = self.store.get_app_service_by_user_id(user_id)
if app_service:
@ -1390,6 +1393,12 @@ class SyncHandler(object):
room_entries = []
invited = []
for room_id, events in iteritems(mem_change_events_by_room_id):
logger.info(
"Membership changes in %s: [%s]",
room_id,
", ".join(("%s (%s)" % (e.event_id, e.membership) for e in events)),
)
non_joins = [e for e in events if e.membership != Membership.JOIN]
has_join = len(non_joins) != len(events)

View File

@ -161,6 +161,12 @@ class EventsWorkerStore(SQLBaseStore):
log_ctx = LoggingContext.current_context()
log_ctx.record_event_fetch(len(missing_events_ids))
# Note that _enqueue_events is also responsible for turning db rows
# into FrozenEvents (via _get_event_from_row), which involves seeing if
# the events have been redacted, and if so pulling the redaction event out
# of the database to check it.
#
# _enqueue_events is a bit of a rubbish name but naming is hard.
missing_events = yield self._enqueue_events(
missing_events_ids,
allow_rejected=allow_rejected,
@ -179,14 +185,35 @@ class EventsWorkerStore(SQLBaseStore):
# instead.
if not allow_rejected and entry.event.type == EventTypes.Redaction:
if entry.event.internal_metadata.need_to_check_redaction():
orig = yield self.get_event(
entry.event.redacts,
# XXX: we need to avoid calling get_event here.
#
# The problem is that we end up at this point when an event
# which has been redacted is pulled out of the database by
# _enqueue_events, because _enqueue_events needs to check the
# redaction before it can cache the redacted event. So obviously,
# calling get_event to get the redacted event out of the database
# gives us an infinite loop.
#
# For now (quick hack to fix during 0.99 release cycle), we just
# go and fetch the relevant row from the db, but it would be nice
# to think about how we can cache this rather than hit the db
# every time we access a redaction event.
#
# One thought on how to do this:
# 1. split _get_events up so that it is divided into (a) get the
# rawish event from the db/cache, (b) do the redaction/rejection
# filtering
# 2. have _get_event_from_row just call the first half of that
orig_sender = yield self._simple_select_one_onecol(
table="events",
keyvalues={"event_id": entry.event.redacts},
retcol="sender",
allow_none=True,
allow_rejected=True,
get_prev_content=False,
)
expected_domain = get_domain_from_id(entry.event.sender)
if orig and get_domain_from_id(orig.sender) == expected_domain:
if orig_sender and get_domain_from_id(orig_sender) == expected_domain:
# This redaction event is allowed. Mark as not needing a
# recheck.
entry.event.internal_metadata.recheck_redaction = False