mirror of
https://mau.dev/maunium/synapse.git
synced 2024-10-01 01:36:05 -04:00
Reduce amount of logging at INFO level. (#6862)
A lot of the things we log at INFO are now a bit superfluous, so lets make them DEBUG logs to reduce the amount we log by default. Co-Authored-By: Brendan Abolivier <babolivier@matrix.org> Co-authored-by: Brendan Abolivier <github@brendanabolivier.com>
This commit is contained in:
parent
9bcd37146e
commit
ed630ea17c
1
changelog.d/6862.misc
Normal file
1
changelog.d/6862.misc
Normal file
@ -0,0 +1 @@
|
||||
Reduce amount we log at `INFO` level.
|
@ -573,7 +573,7 @@ class FederationServer(FederationBase):
|
||||
origin_host, _ = parse_server_name(origin)
|
||||
await self.check_server_matches_acl(origin_host, room_id)
|
||||
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"on_get_missing_events: earliest_events: %r, latest_events: %r,"
|
||||
" limit: %d",
|
||||
earliest_events,
|
||||
@ -586,11 +586,11 @@ class FederationServer(FederationBase):
|
||||
)
|
||||
|
||||
if len(missing_events) < 5:
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"Returning %d events: %r", len(missing_events), missing_events
|
||||
)
|
||||
else:
|
||||
logger.info("Returning %d events", len(missing_events))
|
||||
logger.debug("Returning %d events", len(missing_events))
|
||||
|
||||
time_now = self._clock.time_msec()
|
||||
|
||||
|
@ -158,7 +158,7 @@ class Authenticator(object):
|
||||
origin, json_request, now, "Incoming request"
|
||||
)
|
||||
|
||||
logger.info("Request from %s", origin)
|
||||
logger.debug("Request from %s", origin)
|
||||
request.authenticated_entity = origin
|
||||
|
||||
# If we get a valid signed request from the other side, its probably
|
||||
|
@ -259,7 +259,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
for v in ("invite", "events_default"):
|
||||
current = int(pl_content.get(v, 0))
|
||||
if current < restricted_level:
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"Setting level for %s in %s to %i (was %i)",
|
||||
v,
|
||||
old_room_id,
|
||||
@ -269,7 +269,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
pl_content[v] = restricted_level
|
||||
updated = True
|
||||
else:
|
||||
logger.info("Not setting level for %s (already %i)", v, current)
|
||||
logger.debug("Not setting level for %s (already %i)", v, current)
|
||||
|
||||
if updated:
|
||||
try:
|
||||
@ -296,7 +296,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
EventTypes.Aliases, events_default
|
||||
)
|
||||
|
||||
logger.info("Setting correct PLs in new room to %s", new_pl_content)
|
||||
logger.debug("Setting correct PLs in new room to %s", new_pl_content)
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
requester,
|
||||
{
|
||||
@ -782,7 +782,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
@defer.inlineCallbacks
|
||||
def send(etype, content, **kwargs):
|
||||
event = create(etype, content, **kwargs)
|
||||
logger.info("Sending %s in new room", etype)
|
||||
logger.debug("Sending %s in new room", etype)
|
||||
yield self.event_creation_handler.create_and_send_nonmember_event(
|
||||
creator, event, ratelimit=False
|
||||
)
|
||||
@ -796,7 +796,7 @@ class RoomCreationHandler(BaseHandler):
|
||||
creation_content.update({"creator": creator_id})
|
||||
yield send(etype=EventTypes.Create, content=creation_content)
|
||||
|
||||
logger.info("Sending %s in new room", EventTypes.Member)
|
||||
logger.debug("Sending %s in new room", EventTypes.Member)
|
||||
yield self.room_member_handler.update_membership(
|
||||
creator,
|
||||
creator.user,
|
||||
|
@ -300,7 +300,7 @@ class StatsHandler(StateDeltasHandler):
|
||||
room_state["guest_access"] = event_content.get("guest_access")
|
||||
|
||||
for room_id, state in room_to_state_updates.items():
|
||||
logger.info("Updating room_stats_state for %s: %s", room_id, state)
|
||||
logger.debug("Updating room_stats_state for %s: %s", room_id, state)
|
||||
yield self.store.update_room_state(room_id, state)
|
||||
|
||||
return room_to_stats_deltas, user_to_stats_deltas
|
||||
|
@ -968,7 +968,7 @@ class SyncHandler(object):
|
||||
# Always use the `now_token` in `SyncResultBuilder`
|
||||
now_token = await self.event_sources.get_current_token()
|
||||
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"Calculating sync response for %r between %s and %s",
|
||||
sync_config.user,
|
||||
since_token,
|
||||
@ -1498,7 +1498,7 @@ class SyncHandler(object):
|
||||
room_entries = []
|
||||
invited = []
|
||||
for room_id, events in iteritems(mem_change_events_by_room_id):
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"Membership changes in %s: [%s]",
|
||||
room_id,
|
||||
", ".join(("%s (%s)" % (e.event_id, e.membership) for e in events)),
|
||||
@ -1892,7 +1892,7 @@ class SyncHandler(object):
|
||||
|
||||
if batch.limited and since_token:
|
||||
user_id = sync_result_builder.sync_config.user.to_string()
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"Incremental gappy sync of %s for user %s with %d state events"
|
||||
% (room_id, user_id, len(state))
|
||||
)
|
||||
|
@ -149,7 +149,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
self.pos, room_max_stream_ordering
|
||||
)
|
||||
|
||||
logger.info("Handling %d state deltas", len(deltas))
|
||||
logger.debug("Handling %d state deltas", len(deltas))
|
||||
yield self._handle_deltas(deltas)
|
||||
|
||||
self.pos = max_pos
|
||||
@ -195,7 +195,7 @@ class UserDirectoryHandler(StateDeltasHandler):
|
||||
room_id, self.server_name
|
||||
)
|
||||
if not is_in_room:
|
||||
logger.info("Server left room: %r", room_id)
|
||||
logger.debug("Server left room: %r", room_id)
|
||||
# Fetch all the users that we marked as being in user
|
||||
# directory due to being in the room and then check if
|
||||
# need to remove those users or not
|
||||
|
@ -225,7 +225,7 @@ class SynapseRequest(Request):
|
||||
self.start_time, name=servlet_name, method=self.get_method()
|
||||
)
|
||||
|
||||
self.site.access_logger.info(
|
||||
self.site.access_logger.debug(
|
||||
"%s - %s - Received request: %s %s",
|
||||
self.getClientIP(),
|
||||
self.site.site_tag,
|
||||
|
@ -398,7 +398,7 @@ class HttpPusher(object):
|
||||
Args:
|
||||
badge (int): number of unread messages
|
||||
"""
|
||||
logger.info("Sending updated badge count %d to %s", badge, self.name)
|
||||
logger.debug("Sending updated badge count %d to %s", badge, self.name)
|
||||
d = {
|
||||
"notification": {
|
||||
"id": "",
|
||||
|
@ -183,7 +183,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||
)
|
||||
return 1
|
||||
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"Processing the next %d rooms of %d remaining"
|
||||
% (len(rooms_to_work_on), progress["remaining"])
|
||||
)
|
||||
@ -308,7 +308,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
|
||||
)
|
||||
return 1
|
||||
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"Processing the next %d users of %d remaining"
|
||||
% (len(users_to_work_on), progress["remaining"])
|
||||
)
|
||||
|
@ -390,7 +390,7 @@ class EventsPersistenceStorage(object):
|
||||
state_delta_reuse_delta_counter.inc()
|
||||
break
|
||||
|
||||
logger.info("Calculating state delta for room %s", room_id)
|
||||
logger.debug("Calculating state delta for room %s", room_id)
|
||||
with Measure(
|
||||
self._clock, "persist_events.get_new_state_after_events"
|
||||
):
|
||||
|
@ -144,7 +144,7 @@ class ResponseCache(object):
|
||||
"""
|
||||
result = self.get(key)
|
||||
if not result:
|
||||
logger.info(
|
||||
logger.debug(
|
||||
"[%s]: no cached result for [%s], calculating new one", self._name, key
|
||||
)
|
||||
d = run_in_background(callback, *args, **kwargs)
|
||||
|
Loading…
Reference in New Issue
Block a user