mirror of
https://git.anonymousland.org/anonymousland/synapse-product.git
synced 2024-10-01 08:25:44 -04:00
Remove some unused database functions. (#8085)
This commit is contained in:
parent
894dae74fe
commit
6b7ce1d332
1
changelog.d/8085.misc
Normal file
1
changelog.d/8085.misc
Normal file
@ -0,0 +1 @@
|
|||||||
|
Remove some unused database functions.
|
@ -257,11 +257,6 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
|||||||
# Return all events where not all sets can reach them.
|
# Return all events where not all sets can reach them.
|
||||||
return {eid for eid, n in event_to_missing_sets.items() if n}
|
return {eid for eid, n in event_to_missing_sets.items() if n}
|
||||||
|
|
||||||
def get_oldest_events_in_room(self, room_id):
|
|
||||||
return self.db_pool.runInteraction(
|
|
||||||
"get_oldest_events_in_room", self._get_oldest_events_in_room_txn, room_id
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_oldest_events_with_depth_in_room(self, room_id):
|
def get_oldest_events_with_depth_in_room(self, room_id):
|
||||||
return self.db_pool.runInteraction(
|
return self.db_pool.runInteraction(
|
||||||
"get_oldest_events_with_depth_in_room",
|
"get_oldest_events_with_depth_in_room",
|
||||||
@ -303,14 +298,6 @@ class EventFederationWorkerStore(EventsWorkerStore, SignatureWorkerStore, SQLBas
|
|||||||
else:
|
else:
|
||||||
return max(row["depth"] for row in rows)
|
return max(row["depth"] for row in rows)
|
||||||
|
|
||||||
def _get_oldest_events_in_room_txn(self, txn, room_id):
|
|
||||||
return self.db_pool.simple_select_onecol_txn(
|
|
||||||
txn,
|
|
||||||
table="event_backward_extremities",
|
|
||||||
keyvalues={"room_id": room_id},
|
|
||||||
retcol="event_id",
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_prev_events_for_room(self, room_id: str):
|
def get_prev_events_for_room(self, room_id: str):
|
||||||
"""
|
"""
|
||||||
Gets a subset of the current forward extremities in the given room.
|
Gets a subset of the current forward extremities in the given room.
|
||||||
|
@ -43,7 +43,7 @@ from synapse.storage._base import SQLBaseStore, db_to_json, make_in_list_sql_cla
|
|||||||
from synapse.storage.database import DatabasePool
|
from synapse.storage.database import DatabasePool
|
||||||
from synapse.storage.util.id_generators import StreamIdGenerator
|
from synapse.storage.util.id_generators import StreamIdGenerator
|
||||||
from synapse.types import get_domain_from_id
|
from synapse.types import get_domain_from_id
|
||||||
from synapse.util.caches.descriptors import Cache, cached, cachedInlineCallbacks
|
from synapse.util.caches.descriptors import Cache, cachedInlineCallbacks
|
||||||
from synapse.util.iterutils import batch_iter
|
from synapse.util.iterutils import batch_iter
|
||||||
from synapse.util.metrics import Measure
|
from synapse.util.metrics import Measure
|
||||||
|
|
||||||
@ -137,42 +137,6 @@ class EventsWorkerStore(SQLBaseStore):
|
|||||||
desc="get_received_ts",
|
desc="get_received_ts",
|
||||||
)
|
)
|
||||||
|
|
||||||
def get_received_ts_by_stream_pos(self, stream_ordering):
|
|
||||||
"""Given a stream ordering get an approximate timestamp of when it
|
|
||||||
happened.
|
|
||||||
|
|
||||||
This is done by simply taking the received ts of the first event that
|
|
||||||
has a stream ordering greater than or equal to the given stream pos.
|
|
||||||
If none exists returns the current time, on the assumption that it must
|
|
||||||
have happened recently.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
stream_ordering (int)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred[int]
|
|
||||||
"""
|
|
||||||
|
|
||||||
def _get_approximate_received_ts_txn(txn):
|
|
||||||
sql = """
|
|
||||||
SELECT received_ts FROM events
|
|
||||||
WHERE stream_ordering >= ?
|
|
||||||
LIMIT 1
|
|
||||||
"""
|
|
||||||
|
|
||||||
txn.execute(sql, (stream_ordering,))
|
|
||||||
row = txn.fetchone()
|
|
||||||
if row and row[0]:
|
|
||||||
ts = row[0]
|
|
||||||
else:
|
|
||||||
ts = self.clock.time_msec()
|
|
||||||
|
|
||||||
return ts
|
|
||||||
|
|
||||||
return self.db_pool.runInteraction(
|
|
||||||
"get_approximate_received_ts", _get_approximate_received_ts_txn
|
|
||||||
)
|
|
||||||
|
|
||||||
@defer.inlineCallbacks
|
@defer.inlineCallbacks
|
||||||
def get_event(
|
def get_event(
|
||||||
self,
|
self,
|
||||||
@ -923,36 +887,6 @@ class EventsWorkerStore(SQLBaseStore):
|
|||||||
)
|
)
|
||||||
return results
|
return results
|
||||||
|
|
||||||
def _get_total_state_event_counts_txn(self, txn, room_id):
|
|
||||||
"""
|
|
||||||
See get_total_state_event_counts.
|
|
||||||
"""
|
|
||||||
# We join against the events table as that has an index on room_id
|
|
||||||
sql = """
|
|
||||||
SELECT COUNT(*) FROM state_events
|
|
||||||
INNER JOIN events USING (room_id, event_id)
|
|
||||||
WHERE room_id=?
|
|
||||||
"""
|
|
||||||
txn.execute(sql, (room_id,))
|
|
||||||
row = txn.fetchone()
|
|
||||||
return row[0] if row else 0
|
|
||||||
|
|
||||||
def get_total_state_event_counts(self, room_id):
|
|
||||||
"""
|
|
||||||
Gets the total number of state events in a room.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
room_id (str)
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
Deferred[int]
|
|
||||||
"""
|
|
||||||
return self.db_pool.runInteraction(
|
|
||||||
"get_total_state_event_counts",
|
|
||||||
self._get_total_state_event_counts_txn,
|
|
||||||
room_id,
|
|
||||||
)
|
|
||||||
|
|
||||||
def _get_current_state_event_counts_txn(self, txn, room_id):
|
def _get_current_state_event_counts_txn(self, txn, room_id):
|
||||||
"""
|
"""
|
||||||
See get_current_state_event_counts.
|
See get_current_state_event_counts.
|
||||||
@ -1222,97 +1156,6 @@ class EventsWorkerStore(SQLBaseStore):
|
|||||||
|
|
||||||
return rows, to_token, True
|
return rows, to_token, True
|
||||||
|
|
||||||
@cached(num_args=5, max_entries=10)
|
|
||||||
def get_all_new_events(
|
|
||||||
self,
|
|
||||||
last_backfill_id,
|
|
||||||
last_forward_id,
|
|
||||||
current_backfill_id,
|
|
||||||
current_forward_id,
|
|
||||||
limit,
|
|
||||||
):
|
|
||||||
"""Get all the new events that have arrived at the server either as
|
|
||||||
new events or as backfilled events"""
|
|
||||||
have_backfill_events = last_backfill_id != current_backfill_id
|
|
||||||
have_forward_events = last_forward_id != current_forward_id
|
|
||||||
|
|
||||||
if not have_backfill_events and not have_forward_events:
|
|
||||||
return defer.succeed(AllNewEventsResult([], [], [], [], []))
|
|
||||||
|
|
||||||
def get_all_new_events_txn(txn):
|
|
||||||
sql = (
|
|
||||||
"SELECT e.stream_ordering, e.event_id, e.room_id, e.type,"
|
|
||||||
" state_key, redacts"
|
|
||||||
" FROM events AS e"
|
|
||||||
" LEFT JOIN redactions USING (event_id)"
|
|
||||||
" LEFT JOIN state_events USING (event_id)"
|
|
||||||
" WHERE ? < stream_ordering AND stream_ordering <= ?"
|
|
||||||
" ORDER BY stream_ordering ASC"
|
|
||||||
" LIMIT ?"
|
|
||||||
)
|
|
||||||
if have_forward_events:
|
|
||||||
txn.execute(sql, (last_forward_id, current_forward_id, limit))
|
|
||||||
new_forward_events = txn.fetchall()
|
|
||||||
|
|
||||||
if len(new_forward_events) == limit:
|
|
||||||
upper_bound = new_forward_events[-1][0]
|
|
||||||
else:
|
|
||||||
upper_bound = current_forward_id
|
|
||||||
|
|
||||||
sql = (
|
|
||||||
"SELECT event_stream_ordering, event_id, state_group"
|
|
||||||
" FROM ex_outlier_stream"
|
|
||||||
" WHERE ? > event_stream_ordering"
|
|
||||||
" AND event_stream_ordering >= ?"
|
|
||||||
" ORDER BY event_stream_ordering DESC"
|
|
||||||
)
|
|
||||||
txn.execute(sql, (last_forward_id, upper_bound))
|
|
||||||
forward_ex_outliers = txn.fetchall()
|
|
||||||
else:
|
|
||||||
new_forward_events = []
|
|
||||||
forward_ex_outliers = []
|
|
||||||
|
|
||||||
sql = (
|
|
||||||
"SELECT -e.stream_ordering, e.event_id, e.room_id, e.type,"
|
|
||||||
" state_key, redacts"
|
|
||||||
" FROM events AS e"
|
|
||||||
" LEFT JOIN redactions USING (event_id)"
|
|
||||||
" LEFT JOIN state_events USING (event_id)"
|
|
||||||
" WHERE ? > stream_ordering AND stream_ordering >= ?"
|
|
||||||
" ORDER BY stream_ordering DESC"
|
|
||||||
" LIMIT ?"
|
|
||||||
)
|
|
||||||
if have_backfill_events:
|
|
||||||
txn.execute(sql, (-last_backfill_id, -current_backfill_id, limit))
|
|
||||||
new_backfill_events = txn.fetchall()
|
|
||||||
|
|
||||||
if len(new_backfill_events) == limit:
|
|
||||||
upper_bound = new_backfill_events[-1][0]
|
|
||||||
else:
|
|
||||||
upper_bound = current_backfill_id
|
|
||||||
|
|
||||||
sql = (
|
|
||||||
"SELECT -event_stream_ordering, event_id, state_group"
|
|
||||||
" FROM ex_outlier_stream"
|
|
||||||
" WHERE ? > event_stream_ordering"
|
|
||||||
" AND event_stream_ordering >= ?"
|
|
||||||
" ORDER BY event_stream_ordering DESC"
|
|
||||||
)
|
|
||||||
txn.execute(sql, (-last_backfill_id, -upper_bound))
|
|
||||||
backward_ex_outliers = txn.fetchall()
|
|
||||||
else:
|
|
||||||
new_backfill_events = []
|
|
||||||
backward_ex_outliers = []
|
|
||||||
|
|
||||||
return AllNewEventsResult(
|
|
||||||
new_forward_events,
|
|
||||||
new_backfill_events,
|
|
||||||
forward_ex_outliers,
|
|
||||||
backward_ex_outliers,
|
|
||||||
)
|
|
||||||
|
|
||||||
return self.db_pool.runInteraction("get_all_new_events", get_all_new_events_txn)
|
|
||||||
|
|
||||||
async def is_event_after(self, event_id1, event_id2):
|
async def is_event_after(self, event_id1, event_id2):
|
||||||
"""Returns True if event_id1 is after event_id2 in the stream
|
"""Returns True if event_id1 is after event_id2 in the stream
|
||||||
"""
|
"""
|
||||||
@ -1357,14 +1200,3 @@ class EventsWorkerStore(SQLBaseStore):
|
|||||||
return self.db_pool.runInteraction(
|
return self.db_pool.runInteraction(
|
||||||
desc="get_next_event_to_expire", func=get_next_event_to_expire_txn
|
desc="get_next_event_to_expire", func=get_next_event_to_expire_txn
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
AllNewEventsResult = namedtuple(
|
|
||||||
"AllNewEventsResult",
|
|
||||||
[
|
|
||||||
"new_forward_events",
|
|
||||||
"new_backfill_events",
|
|
||||||
"forward_ex_outliers",
|
|
||||||
"backward_ex_outliers",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
@ -157,24 +157,3 @@ class PresenceStore(SQLBaseStore):
|
|||||||
|
|
||||||
def get_current_presence_token(self):
|
def get_current_presence_token(self):
|
||||||
return self._presence_id_gen.get_current_token()
|
return self._presence_id_gen.get_current_token()
|
||||||
|
|
||||||
def allow_presence_visible(self, observed_localpart, observer_userid):
|
|
||||||
return self.db_pool.simple_insert(
|
|
||||||
table="presence_allow_inbound",
|
|
||||||
values={
|
|
||||||
"observed_user_id": observed_localpart,
|
|
||||||
"observer_user_id": observer_userid,
|
|
||||||
},
|
|
||||||
desc="allow_presence_visible",
|
|
||||||
or_ignore=True,
|
|
||||||
)
|
|
||||||
|
|
||||||
def disallow_presence_visible(self, observed_localpart, observer_userid):
|
|
||||||
return self.db_pool.simple_delete_one(
|
|
||||||
table="presence_allow_inbound",
|
|
||||||
keyvalues={
|
|
||||||
"observed_user_id": observed_localpart,
|
|
||||||
"observer_user_id": observer_userid,
|
|
||||||
},
|
|
||||||
desc="disallow_presence_visible",
|
|
||||||
)
|
|
||||||
|
@ -1345,43 +1345,6 @@ class RegistrationStore(RegistrationBackgroundUpdateStore):
|
|||||||
"validate_threepid_session_txn", validate_threepid_session_txn
|
"validate_threepid_session_txn", validate_threepid_session_txn
|
||||||
)
|
)
|
||||||
|
|
||||||
def upsert_threepid_validation_session(
|
|
||||||
self,
|
|
||||||
medium,
|
|
||||||
address,
|
|
||||||
client_secret,
|
|
||||||
send_attempt,
|
|
||||||
session_id,
|
|
||||||
validated_at=None,
|
|
||||||
):
|
|
||||||
"""Upsert a threepid validation session
|
|
||||||
Args:
|
|
||||||
medium (str): The medium of the 3PID
|
|
||||||
address (str): The address of the 3PID
|
|
||||||
client_secret (str): A unique string provided by the client to
|
|
||||||
help identify this validation attempt
|
|
||||||
send_attempt (int): The latest send_attempt on this session
|
|
||||||
session_id (str): The id of this validation session
|
|
||||||
validated_at (int|None): The unix timestamp in milliseconds of
|
|
||||||
when the session was marked as valid
|
|
||||||
"""
|
|
||||||
insertion_values = {
|
|
||||||
"medium": medium,
|
|
||||||
"address": address,
|
|
||||||
"client_secret": client_secret,
|
|
||||||
}
|
|
||||||
|
|
||||||
if validated_at:
|
|
||||||
insertion_values["validated_at"] = validated_at
|
|
||||||
|
|
||||||
return self.db_pool.simple_upsert(
|
|
||||||
table="threepid_validation_session",
|
|
||||||
keyvalues={"session_id": session_id},
|
|
||||||
values={"last_send_attempt": send_attempt},
|
|
||||||
insertion_values=insertion_values,
|
|
||||||
desc="upsert_threepid_validation_session",
|
|
||||||
)
|
|
||||||
|
|
||||||
def start_or_continue_validation_session(
|
def start_or_continue_validation_session(
|
||||||
self,
|
self,
|
||||||
medium,
|
medium,
|
||||||
|
@ -35,10 +35,6 @@ from synapse.util.caches.descriptors import cached
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
OpsLevel = collections.namedtuple(
|
|
||||||
"OpsLevel", ("ban_level", "kick_level", "redact_level")
|
|
||||||
)
|
|
||||||
|
|
||||||
RatelimitOverride = collections.namedtuple(
|
RatelimitOverride = collections.namedtuple(
|
||||||
"RatelimitOverride", ("messages_per_second", "burst_count")
|
"RatelimitOverride", ("messages_per_second", "burst_count")
|
||||||
)
|
)
|
||||||
|
@ -0,0 +1,17 @@
|
|||||||
|
/* Copyright 2020 The Matrix.org Foundation C.I.C.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
-- This table is no longer used.
|
||||||
|
DROP TABLE IF EXISTS presence_allow_inbound;
|
Loading…
Reference in New Issue
Block a user