mirror of
https://git.anonymousland.org/anonymousland/synapse.git
synced 2024-10-01 11:49:51 -04:00
Merge branch 'develop' of github.com:matrix-org/synapse into erikj/logging_context
This commit is contained in:
commit
da6a7bbdde
@ -35,3 +35,6 @@ Turned to Dust <dwinslow86 at gmail.com>
|
|||||||
|
|
||||||
Brabo <brabo at riseup.net>
|
Brabo <brabo at riseup.net>
|
||||||
* Installation instruction fixes
|
* Installation instruction fixes
|
||||||
|
|
||||||
|
Ivan Shapovalov <intelfx100 at gmail.com>
|
||||||
|
* contrib/systemd: a sample systemd unit file and a logger configuration
|
||||||
|
@ -10,6 +10,8 @@ General:
|
|||||||
* Fix race in caches that occasionally caused some presence updates to be
|
* Fix race in caches that occasionally caused some presence updates to be
|
||||||
dropped - SYN-369.
|
dropped - SYN-369.
|
||||||
* Check server name has not changed on restart.
|
* Check server name has not changed on restart.
|
||||||
|
* Add a sample systemd unit file and a logger configuration in
|
||||||
|
contrib/systemd. Contributed Ivan Shapovalov.
|
||||||
|
|
||||||
Federation:
|
Federation:
|
||||||
|
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
Upgrading to v0.x.x
|
Upgrading to v0.9.0
|
||||||
===================
|
===================
|
||||||
|
|
||||||
Application services have had a breaking API change in this version.
|
Application services have had a breaking API change in this version.
|
||||||
|
@ -20,7 +20,7 @@ The format of the AS configuration file is as follows:
|
|||||||
|
|
||||||
url: <base url of AS>
|
url: <base url of AS>
|
||||||
as_token: <token AS will add to requests to HS>
|
as_token: <token AS will add to requests to HS>
|
||||||
hs_token: <token HS will ad to requests to AS>
|
hs_token: <token HS will add to requests to AS>
|
||||||
sender_localpart: <localpart of AS user>
|
sender_localpart: <localpart of AS user>
|
||||||
namespaces:
|
namespaces:
|
||||||
users: # List of users we're interested in
|
users: # List of users we're interested in
|
||||||
|
@ -82,13 +82,13 @@ complete, restart synapse. For instance::
|
|||||||
cp homeserver.db homeserver.db.snapshot
|
cp homeserver.db homeserver.db.snapshot
|
||||||
./synctl start
|
./synctl start
|
||||||
|
|
||||||
Assuming your database config file (as described in the section *Synapse
|
Assuming your new config file (as described in the section *Synapse config*)
|
||||||
config*) is named ``database_config.yaml`` and the SQLite snapshot is at
|
is named ``homeserver-postgres.yaml`` and the SQLite snapshot is at
|
||||||
``homeserver.db.snapshot`` then simply run::
|
``homeserver.db.snapshot`` then simply run::
|
||||||
|
|
||||||
python scripts/port_from_sqlite_to_postgres.py \
|
python scripts/port_from_sqlite_to_postgres.py \
|
||||||
--sqlite-database homeserver.db.snapshot \
|
--sqlite-database homeserver.db.snapshot \
|
||||||
--postgres-config database_config.yaml
|
--postgres-config homeserver-postgres.yaml
|
||||||
|
|
||||||
The flag ``--curses`` displays a coloured curses progress UI.
|
The flag ``--curses`` displays a coloured curses progress UI.
|
||||||
|
|
||||||
|
113
scripts-dev/convert_server_keys.py
Normal file
113
scripts-dev/convert_server_keys.py
Normal file
@ -0,0 +1,113 @@
|
|||||||
|
import psycopg2
|
||||||
|
import yaml
|
||||||
|
import sys
|
||||||
|
import json
|
||||||
|
import time
|
||||||
|
import hashlib
|
||||||
|
from syutil.base64util import encode_base64
|
||||||
|
from syutil.crypto.signing_key import read_signing_keys
|
||||||
|
from syutil.crypto.jsonsign import sign_json
|
||||||
|
from syutil.jsonutil import encode_canonical_json
|
||||||
|
|
||||||
|
|
||||||
|
def select_v1_keys(connection):
|
||||||
|
cursor = connection.cursor()
|
||||||
|
cursor.execute("SELECT server_name, key_id, verify_key FROM server_signature_keys")
|
||||||
|
rows = cursor.fetchall()
|
||||||
|
cursor.close()
|
||||||
|
results = {}
|
||||||
|
for server_name, key_id, verify_key in rows:
|
||||||
|
results.setdefault(server_name, {})[key_id] = encode_base64(verify_key)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def select_v1_certs(connection):
|
||||||
|
cursor = connection.cursor()
|
||||||
|
cursor.execute("SELECT server_name, tls_certificate FROM server_tls_certificates")
|
||||||
|
rows = cursor.fetchall()
|
||||||
|
cursor.close()
|
||||||
|
results = {}
|
||||||
|
for server_name, tls_certificate in rows:
|
||||||
|
results[server_name] = tls_certificate
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def select_v2_json(connection):
|
||||||
|
cursor = connection.cursor()
|
||||||
|
cursor.execute("SELECT server_name, key_id, key_json FROM server_keys_json")
|
||||||
|
rows = cursor.fetchall()
|
||||||
|
cursor.close()
|
||||||
|
results = {}
|
||||||
|
for server_name, key_id, key_json in rows:
|
||||||
|
results.setdefault(server_name, {})[key_id] = json.loads(str(key_json).decode("utf-8"))
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def convert_v1_to_v2(server_name, valid_until, keys, certificate):
|
||||||
|
return {
|
||||||
|
"old_verify_keys": {},
|
||||||
|
"server_name": server_name,
|
||||||
|
"verify_keys": keys,
|
||||||
|
"valid_until_ts": valid_until,
|
||||||
|
"tls_fingerprints": [fingerprint(certificate)],
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def fingerprint(certificate):
|
||||||
|
finger = hashlib.sha256(certificate)
|
||||||
|
return {"sha256": encode_base64(finger.digest())}
|
||||||
|
|
||||||
|
|
||||||
|
def rows_v2(server, json):
|
||||||
|
valid_until = json["valid_until_ts"]
|
||||||
|
key_json = encode_canonical_json(json)
|
||||||
|
for key_id in json["verify_keys"]:
|
||||||
|
yield (server, key_id, "-", valid_until, valid_until, buffer(key_json))
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
config = yaml.load(open(sys.argv[1]))
|
||||||
|
valid_until = int(time.time() / (3600 * 24)) * 1000 * 3600 * 24
|
||||||
|
|
||||||
|
server_name = config["server_name"]
|
||||||
|
signing_key = read_signing_keys(open(config["signing_key_path"]))[0]
|
||||||
|
|
||||||
|
database = config["database"]
|
||||||
|
assert database["name"] == "psycopg2", "Can only convert for postgresql"
|
||||||
|
args = database["args"]
|
||||||
|
args.pop("cp_max")
|
||||||
|
args.pop("cp_min")
|
||||||
|
connection = psycopg2.connect(**args)
|
||||||
|
keys = select_v1_keys(connection)
|
||||||
|
certificates = select_v1_certs(connection)
|
||||||
|
json = select_v2_json(connection)
|
||||||
|
|
||||||
|
result = {}
|
||||||
|
for server in keys:
|
||||||
|
if not server in json:
|
||||||
|
v2_json = convert_v1_to_v2(
|
||||||
|
server, valid_until, keys[server], certificates[server]
|
||||||
|
)
|
||||||
|
v2_json = sign_json(v2_json, server_name, signing_key)
|
||||||
|
result[server] = v2_json
|
||||||
|
|
||||||
|
yaml.safe_dump(result, sys.stdout, default_flow_style=False)
|
||||||
|
|
||||||
|
rows = list(
|
||||||
|
row for server, json in result.items()
|
||||||
|
for row in rows_v2(server, json)
|
||||||
|
)
|
||||||
|
|
||||||
|
cursor = connection.cursor()
|
||||||
|
cursor.executemany(
|
||||||
|
"INSERT INTO server_keys_json ("
|
||||||
|
" server_name, key_id, from_server,"
|
||||||
|
" ts_added_ms, ts_valid_until_ms, key_json"
|
||||||
|
") VALUES (%s, %s, %s, %s, %s, %s)",
|
||||||
|
rows
|
||||||
|
)
|
||||||
|
connection.commit()
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
@ -106,7 +106,7 @@ class Store(object):
|
|||||||
try:
|
try:
|
||||||
txn = conn.cursor()
|
txn = conn.cursor()
|
||||||
return func(
|
return func(
|
||||||
LoggingTransaction(txn, desc, self.database_engine),
|
LoggingTransaction(txn, desc, self.database_engine, []),
|
||||||
*args, **kwargs
|
*args, **kwargs
|
||||||
)
|
)
|
||||||
except self.database_engine.module.DatabaseError as e:
|
except self.database_engine.module.DatabaseError as e:
|
||||||
@ -378,9 +378,7 @@ class Porter(object):
|
|||||||
|
|
||||||
for i, row in enumerate(rows):
|
for i, row in enumerate(rows):
|
||||||
rows[i] = tuple(
|
rows[i] = tuple(
|
||||||
self.postgres_store.database_engine.encode_parameter(
|
conv(j, col)
|
||||||
conv(j, col)
|
|
||||||
)
|
|
||||||
for j, col in enumerate(row)
|
for j, col in enumerate(row)
|
||||||
if j > 0
|
if j > 0
|
||||||
)
|
)
|
||||||
@ -725,6 +723,9 @@ if __name__ == "__main__":
|
|||||||
|
|
||||||
postgres_config = yaml.safe_load(args.postgres_config)
|
postgres_config = yaml.safe_load(args.postgres_config)
|
||||||
|
|
||||||
|
if "database" in postgres_config:
|
||||||
|
postgres_config = postgres_config["database"]
|
||||||
|
|
||||||
if "name" not in postgres_config:
|
if "name" not in postgres_config:
|
||||||
sys.stderr.write("Malformed database config: no 'name'")
|
sys.stderr.write("Malformed database config: no 'name'")
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
@ -16,4 +16,4 @@
|
|||||||
""" This is a reference implementation of a Matrix home server.
|
""" This is a reference implementation of a Matrix home server.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__version__ = "0.9.0"
|
__version__ = "0.9.0-r4"
|
||||||
|
@ -51,7 +51,7 @@ logger = logging.getLogger(__name__)
|
|||||||
|
|
||||||
# Remember to update this number every time a change is made to database
|
# Remember to update this number every time a change is made to database
|
||||||
# schema files, so the users will be informed on server restarts.
|
# schema files, so the users will be informed on server restarts.
|
||||||
SCHEMA_VERSION = 17
|
SCHEMA_VERSION = 18
|
||||||
|
|
||||||
dir_path = os.path.abspath(os.path.dirname(__file__))
|
dir_path = os.path.abspath(os.path.dirname(__file__))
|
||||||
|
|
||||||
|
@ -308,6 +308,7 @@ class SQLBaseStore(object):
|
|||||||
self._state_groups_id_gen = IdGenerator("state_groups", "id", self)
|
self._state_groups_id_gen = IdGenerator("state_groups", "id", self)
|
||||||
self._access_tokens_id_gen = IdGenerator("access_tokens", "id", self)
|
self._access_tokens_id_gen = IdGenerator("access_tokens", "id", self)
|
||||||
self._pushers_id_gen = IdGenerator("pushers", "id", self)
|
self._pushers_id_gen = IdGenerator("pushers", "id", self)
|
||||||
|
self._push_rule_id_gen = IdGenerator("push_rules", "id", self)
|
||||||
|
|
||||||
def start_profiling(self):
|
def start_profiling(self):
|
||||||
self._previous_loop_ts = self._clock.time_msec()
|
self._previous_loop_ts = self._clock.time_msec()
|
||||||
|
@ -23,6 +23,7 @@ from synapse.crypto.event_signing import compute_event_reference_hash
|
|||||||
|
|
||||||
from syutil.base64util import decode_base64
|
from syutil.base64util import decode_base64
|
||||||
from syutil.jsonutil import encode_canonical_json
|
from syutil.jsonutil import encode_canonical_json
|
||||||
|
from contextlib import contextmanager
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
@ -41,17 +42,25 @@ class EventsStore(SQLBaseStore):
|
|||||||
self.min_token -= 1
|
self.min_token -= 1
|
||||||
stream_ordering = self.min_token
|
stream_ordering = self.min_token
|
||||||
|
|
||||||
|
if stream_ordering is None:
|
||||||
|
stream_ordering_manager = yield self._stream_id_gen.get_next(self)
|
||||||
|
else:
|
||||||
|
@contextmanager
|
||||||
|
def stream_ordering_manager():
|
||||||
|
yield stream_ordering
|
||||||
|
|
||||||
try:
|
try:
|
||||||
yield self.runInteraction(
|
with stream_ordering_manager as stream_ordering:
|
||||||
"persist_event",
|
yield self.runInteraction(
|
||||||
self._persist_event_txn,
|
"persist_event",
|
||||||
event=event,
|
self._persist_event_txn,
|
||||||
context=context,
|
event=event,
|
||||||
backfilled=backfilled,
|
context=context,
|
||||||
stream_ordering=stream_ordering,
|
backfilled=backfilled,
|
||||||
is_new_state=is_new_state,
|
stream_ordering=stream_ordering,
|
||||||
current_state=current_state,
|
is_new_state=is_new_state,
|
||||||
)
|
current_state=current_state,
|
||||||
|
)
|
||||||
except _RollbackButIsFineException:
|
except _RollbackButIsFineException:
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -95,15 +104,6 @@ class EventsStore(SQLBaseStore):
|
|||||||
# Remove the any existing cache entries for the event_id
|
# Remove the any existing cache entries for the event_id
|
||||||
txn.call_after(self._invalidate_get_event_cache, event.event_id)
|
txn.call_after(self._invalidate_get_event_cache, event.event_id)
|
||||||
|
|
||||||
if stream_ordering is None:
|
|
||||||
with self._stream_id_gen.get_next_txn(txn) as stream_ordering:
|
|
||||||
return self._persist_event_txn(
|
|
||||||
txn, event, context, backfilled,
|
|
||||||
stream_ordering=stream_ordering,
|
|
||||||
is_new_state=is_new_state,
|
|
||||||
current_state=current_state,
|
|
||||||
)
|
|
||||||
|
|
||||||
# We purposefully do this first since if we include a `current_state`
|
# We purposefully do this first since if we include a `current_state`
|
||||||
# key, we *want* to update the `current_state_events` table
|
# key, we *want* to update the `current_state_events` table
|
||||||
if current_state:
|
if current_state:
|
||||||
|
@ -19,7 +19,6 @@ from ._base import SQLBaseStore, Table
|
|||||||
from twisted.internet import defer
|
from twisted.internet import defer
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
import copy
|
|
||||||
import simplejson as json
|
import simplejson as json
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@ -28,46 +27,45 @@ logger = logging.getLogger(__name__)
|
|||||||
class PushRuleStore(SQLBaseStore):
|
class PushRuleStore(SQLBaseStore):
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_push_rules_for_user(self, user_name):
|
def get_push_rules_for_user(self, user_name):
|
||||||
sql = (
|
rows = yield self._simple_select_list(
|
||||||
"SELECT "+",".join(PushRuleTable.fields)+" "
|
table=PushRuleTable.table_name,
|
||||||
"FROM "+PushRuleTable.table_name+" "
|
keyvalues={
|
||||||
"WHERE user_name = ? "
|
"user_name": user_name,
|
||||||
"ORDER BY priority_class DESC, priority DESC"
|
},
|
||||||
|
retcols=PushRuleTable.fields,
|
||||||
)
|
)
|
||||||
rows = yield self._execute("get_push_rules_for_user", None, sql, user_name)
|
|
||||||
|
|
||||||
dicts = []
|
rows.sort(
|
||||||
for r in rows:
|
key=lambda row: (-int(row["priority_class"]), -int(row["priority"]))
|
||||||
d = {}
|
)
|
||||||
for i, f in enumerate(PushRuleTable.fields):
|
|
||||||
d[f] = r[i]
|
|
||||||
dicts.append(d)
|
|
||||||
|
|
||||||
defer.returnValue(dicts)
|
defer.returnValue(rows)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_push_rules_enabled_for_user(self, user_name):
|
def get_push_rules_enabled_for_user(self, user_name):
|
||||||
results = yield self._simple_select_list(
|
results = yield self._simple_select_list(
|
||||||
PushRuleEnableTable.table_name,
|
table=PushRuleEnableTable.table_name,
|
||||||
{'user_name': user_name},
|
keyvalues={
|
||||||
PushRuleEnableTable.fields,
|
'user_name': user_name
|
||||||
|
},
|
||||||
|
retcols=PushRuleEnableTable.fields,
|
||||||
desc="get_push_rules_enabled_for_user",
|
desc="get_push_rules_enabled_for_user",
|
||||||
)
|
)
|
||||||
defer.returnValue(
|
defer.returnValue({
|
||||||
{r['rule_id']: False if r['enabled'] == 0 else True for r in results}
|
r['rule_id']: False if r['enabled'] == 0 else True for r in results
|
||||||
)
|
})
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def add_push_rule(self, before, after, **kwargs):
|
def add_push_rule(self, before, after, **kwargs):
|
||||||
vals = copy.copy(kwargs)
|
vals = kwargs
|
||||||
if 'conditions' in vals:
|
if 'conditions' in vals:
|
||||||
vals['conditions'] = json.dumps(vals['conditions'])
|
vals['conditions'] = json.dumps(vals['conditions'])
|
||||||
if 'actions' in vals:
|
if 'actions' in vals:
|
||||||
vals['actions'] = json.dumps(vals['actions'])
|
vals['actions'] = json.dumps(vals['actions'])
|
||||||
|
|
||||||
# we could check the rest of the keys are valid column names
|
# we could check the rest of the keys are valid column names
|
||||||
# but sqlite will do that anyway so I think it's just pointless.
|
# but sqlite will do that anyway so I think it's just pointless.
|
||||||
if 'id' in vals:
|
vals.pop("id", None)
|
||||||
del vals['id']
|
|
||||||
|
|
||||||
if before or after:
|
if before or after:
|
||||||
ret = yield self.runInteraction(
|
ret = yield self.runInteraction(
|
||||||
@ -87,39 +85,39 @@ class PushRuleStore(SQLBaseStore):
|
|||||||
defer.returnValue(ret)
|
defer.returnValue(ret)
|
||||||
|
|
||||||
def _add_push_rule_relative_txn(self, txn, user_name, **kwargs):
|
def _add_push_rule_relative_txn(self, txn, user_name, **kwargs):
|
||||||
after = None
|
after = kwargs.pop("after", None)
|
||||||
relative_to_rule = None
|
relative_to_rule = kwargs.pop("before", after)
|
||||||
if 'after' in kwargs and kwargs['after']:
|
|
||||||
after = kwargs['after']
|
|
||||||
relative_to_rule = after
|
|
||||||
if 'before' in kwargs and kwargs['before']:
|
|
||||||
relative_to_rule = kwargs['before']
|
|
||||||
|
|
||||||
# get the priority of the rule we're inserting after/before
|
res = self._simple_select_one_txn(
|
||||||
sql = (
|
txn,
|
||||||
"SELECT priority_class, priority FROM ? "
|
table=PushRuleTable.table_name,
|
||||||
"WHERE user_name = ? and rule_id = ?" % (PushRuleTable.table_name,)
|
keyvalues={
|
||||||
|
"user_name": user_name,
|
||||||
|
"rule_id": relative_to_rule,
|
||||||
|
},
|
||||||
|
retcols=["priority_class", "priority"],
|
||||||
|
allow_none=True,
|
||||||
)
|
)
|
||||||
txn.execute(sql, (user_name, relative_to_rule))
|
|
||||||
res = txn.fetchall()
|
|
||||||
if not res:
|
if not res:
|
||||||
raise RuleNotFoundException(
|
raise RuleNotFoundException(
|
||||||
"before/after rule not found: %s" % (relative_to_rule,)
|
"before/after rule not found: %s" % (relative_to_rule,)
|
||||||
)
|
)
|
||||||
priority_class, base_rule_priority = res[0]
|
|
||||||
|
priority_class = res["priority_class"]
|
||||||
|
base_rule_priority = res["priority"]
|
||||||
|
|
||||||
if 'priority_class' in kwargs and kwargs['priority_class'] != priority_class:
|
if 'priority_class' in kwargs and kwargs['priority_class'] != priority_class:
|
||||||
raise InconsistentRuleException(
|
raise InconsistentRuleException(
|
||||||
"Given priority class does not match class of relative rule"
|
"Given priority class does not match class of relative rule"
|
||||||
)
|
)
|
||||||
|
|
||||||
new_rule = copy.copy(kwargs)
|
new_rule = kwargs
|
||||||
if 'before' in new_rule:
|
new_rule.pop("before", None)
|
||||||
del new_rule['before']
|
new_rule.pop("after", None)
|
||||||
if 'after' in new_rule:
|
|
||||||
del new_rule['after']
|
|
||||||
new_rule['priority_class'] = priority_class
|
new_rule['priority_class'] = priority_class
|
||||||
new_rule['user_name'] = user_name
|
new_rule['user_name'] = user_name
|
||||||
|
new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn)
|
||||||
|
|
||||||
# check if the priority before/after is free
|
# check if the priority before/after is free
|
||||||
new_rule_priority = base_rule_priority
|
new_rule_priority = base_rule_priority
|
||||||
@ -153,12 +151,11 @@ class PushRuleStore(SQLBaseStore):
|
|||||||
|
|
||||||
txn.execute(sql, (user_name, priority_class, new_rule_priority))
|
txn.execute(sql, (user_name, priority_class, new_rule_priority))
|
||||||
|
|
||||||
# now insert the new rule
|
self._simple_insert_txn(
|
||||||
sql = "INSERT INTO "+PushRuleTable.table_name+" ("
|
txn,
|
||||||
sql += ",".join(new_rule.keys())+") VALUES ("
|
table=PushRuleTable.table_name,
|
||||||
sql += ", ".join(["?" for _ in new_rule.keys()])+")"
|
values=new_rule,
|
||||||
|
)
|
||||||
txn.execute(sql, new_rule.values())
|
|
||||||
|
|
||||||
def _add_push_rule_highest_priority_txn(self, txn, user_name,
|
def _add_push_rule_highest_priority_txn(self, txn, user_name,
|
||||||
priority_class, **kwargs):
|
priority_class, **kwargs):
|
||||||
@ -176,18 +173,17 @@ class PushRuleStore(SQLBaseStore):
|
|||||||
new_prio = highest_prio + 1
|
new_prio = highest_prio + 1
|
||||||
|
|
||||||
# and insert the new rule
|
# and insert the new rule
|
||||||
new_rule = copy.copy(kwargs)
|
new_rule = kwargs
|
||||||
if 'id' in new_rule:
|
new_rule['id'] = self._push_rule_id_gen.get_next_txn(txn)
|
||||||
del new_rule['id']
|
|
||||||
new_rule['user_name'] = user_name
|
new_rule['user_name'] = user_name
|
||||||
new_rule['priority_class'] = priority_class
|
new_rule['priority_class'] = priority_class
|
||||||
new_rule['priority'] = new_prio
|
new_rule['priority'] = new_prio
|
||||||
|
|
||||||
sql = "INSERT INTO "+PushRuleTable.table_name+" ("
|
self._simple_insert_txn(
|
||||||
sql += ",".join(new_rule.keys())+") VALUES ("
|
txn,
|
||||||
sql += ", ".join(["?" for _ in new_rule.keys()])+")"
|
table=PushRuleTable.table_name,
|
||||||
|
values=new_rule,
|
||||||
txn.execute(sql, new_rule.values())
|
)
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def delete_push_rule(self, user_name, rule_id):
|
def delete_push_rule(self, user_name, rule_id):
|
||||||
@ -211,7 +207,7 @@ class PushRuleStore(SQLBaseStore):
|
|||||||
yield self._simple_upsert(
|
yield self._simple_upsert(
|
||||||
PushRuleEnableTable.table_name,
|
PushRuleEnableTable.table_name,
|
||||||
{'user_name': user_name, 'rule_id': rule_id},
|
{'user_name': user_name, 'rule_id': rule_id},
|
||||||
{'enabled': enabled},
|
{'enabled': 1 if enabled else 0},
|
||||||
desc="set_push_rule_enabled",
|
desc="set_push_rule_enabled",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
32
synapse/storage/schema/delta/18/server_keys_bigger_ints.sql
Normal file
32
synapse/storage/schema/delta/18/server_keys_bigger_ints.sql
Normal file
@ -0,0 +1,32 @@
|
|||||||
|
/* Copyright 2015 OpenMarket Ltd
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
CREATE TABLE IF NOT EXISTS new_server_keys_json (
|
||||||
|
server_name TEXT NOT NULL, -- Server name.
|
||||||
|
key_id TEXT NOT NULL, -- Requested key id.
|
||||||
|
from_server TEXT NOT NULL, -- Which server the keys were fetched from.
|
||||||
|
ts_added_ms BIGINT NOT NULL, -- When the keys were fetched
|
||||||
|
ts_valid_until_ms BIGINT NOT NULL, -- When this version of the keys exipires.
|
||||||
|
key_json bytea NOT NULL, -- JSON certificate for the remote server.
|
||||||
|
CONSTRAINT server_keys_json_uniqueness UNIQUE (server_name, key_id, from_server)
|
||||||
|
);
|
||||||
|
|
||||||
|
INSERT INTO new_server_keys_json
|
||||||
|
SELECT server_name, key_id, from_server,ts_added_ms, ts_valid_until_ms, key_json FROM server_keys_json ;
|
||||||
|
|
||||||
|
DROP TABLE server_keys_json;
|
||||||
|
|
||||||
|
ALTER TABLE new_server_keys_json RENAME TO server_keys_json;
|
@ -78,14 +78,18 @@ class StreamIdGenerator(object):
|
|||||||
self._current_max = None
|
self._current_max = None
|
||||||
self._unfinished_ids = deque()
|
self._unfinished_ids = deque()
|
||||||
|
|
||||||
def get_next_txn(self, txn):
|
@defer.inlineCallbacks
|
||||||
|
def get_next(self, store):
|
||||||
"""
|
"""
|
||||||
Usage:
|
Usage:
|
||||||
with stream_id_gen.get_next_txn(txn) as stream_id:
|
with yield stream_id_gen.get_next as stream_id:
|
||||||
# ... persist event ...
|
# ... persist event ...
|
||||||
"""
|
"""
|
||||||
if not self._current_max:
|
if not self._current_max:
|
||||||
self._get_or_compute_current_max(txn)
|
yield store.runInteraction(
|
||||||
|
"_compute_current_max",
|
||||||
|
self._get_or_compute_current_max,
|
||||||
|
)
|
||||||
|
|
||||||
with self._lock:
|
with self._lock:
|
||||||
self._current_max += 1
|
self._current_max += 1
|
||||||
@ -101,7 +105,7 @@ class StreamIdGenerator(object):
|
|||||||
with self._lock:
|
with self._lock:
|
||||||
self._unfinished_ids.remove(next_id)
|
self._unfinished_ids.remove(next_id)
|
||||||
|
|
||||||
return manager()
|
defer.returnValue(manager())
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_max_token(self, store):
|
def get_max_token(self, store):
|
||||||
|
Loading…
Reference in New Issue
Block a user