Reduce amount of logging at INFO level. (#6862)

A lot of the things we log at INFO are now a bit superfluous, so lets
make them DEBUG logs to reduce the amount we log by default.

Co-Authored-By: Brendan Abolivier <babolivier@matrix.org>
Co-authored-by: Brendan Abolivier <github@brendanabolivier.com>
This commit is contained in:
Erik Johnston 2020-02-06 13:31:05 +00:00 committed by GitHub
parent 9bcd37146e
commit ed630ea17c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 22 additions and 21 deletions

1
changelog.d/6862.misc Normal file
View File

@ -0,0 +1 @@
Reduce amount we log at `INFO` level.

View File

@ -573,7 +573,7 @@ class FederationServer(FederationBase):
origin_host, _ = parse_server_name(origin) origin_host, _ = parse_server_name(origin)
await self.check_server_matches_acl(origin_host, room_id) await self.check_server_matches_acl(origin_host, room_id)
logger.info( logger.debug(
"on_get_missing_events: earliest_events: %r, latest_events: %r," "on_get_missing_events: earliest_events: %r, latest_events: %r,"
" limit: %d", " limit: %d",
earliest_events, earliest_events,
@ -586,11 +586,11 @@ class FederationServer(FederationBase):
) )
if len(missing_events) < 5: if len(missing_events) < 5:
logger.info( logger.debug(
"Returning %d events: %r", len(missing_events), missing_events "Returning %d events: %r", len(missing_events), missing_events
) )
else: else:
logger.info("Returning %d events", len(missing_events)) logger.debug("Returning %d events", len(missing_events))
time_now = self._clock.time_msec() time_now = self._clock.time_msec()

View File

@ -158,7 +158,7 @@ class Authenticator(object):
origin, json_request, now, "Incoming request" origin, json_request, now, "Incoming request"
) )
logger.info("Request from %s", origin) logger.debug("Request from %s", origin)
request.authenticated_entity = origin request.authenticated_entity = origin
# If we get a valid signed request from the other side, its probably # If we get a valid signed request from the other side, its probably

View File

@ -259,7 +259,7 @@ class RoomCreationHandler(BaseHandler):
for v in ("invite", "events_default"): for v in ("invite", "events_default"):
current = int(pl_content.get(v, 0)) current = int(pl_content.get(v, 0))
if current < restricted_level: if current < restricted_level:
logger.info( logger.debug(
"Setting level for %s in %s to %i (was %i)", "Setting level for %s in %s to %i (was %i)",
v, v,
old_room_id, old_room_id,
@ -269,7 +269,7 @@ class RoomCreationHandler(BaseHandler):
pl_content[v] = restricted_level pl_content[v] = restricted_level
updated = True updated = True
else: else:
logger.info("Not setting level for %s (already %i)", v, current) logger.debug("Not setting level for %s (already %i)", v, current)
if updated: if updated:
try: try:
@ -296,7 +296,7 @@ class RoomCreationHandler(BaseHandler):
EventTypes.Aliases, events_default EventTypes.Aliases, events_default
) )
logger.info("Setting correct PLs in new room to %s", new_pl_content) logger.debug("Setting correct PLs in new room to %s", new_pl_content)
yield self.event_creation_handler.create_and_send_nonmember_event( yield self.event_creation_handler.create_and_send_nonmember_event(
requester, requester,
{ {
@ -782,7 +782,7 @@ class RoomCreationHandler(BaseHandler):
@defer.inlineCallbacks @defer.inlineCallbacks
def send(etype, content, **kwargs): def send(etype, content, **kwargs):
event = create(etype, content, **kwargs) event = create(etype, content, **kwargs)
logger.info("Sending %s in new room", etype) logger.debug("Sending %s in new room", etype)
yield self.event_creation_handler.create_and_send_nonmember_event( yield self.event_creation_handler.create_and_send_nonmember_event(
creator, event, ratelimit=False creator, event, ratelimit=False
) )
@ -796,7 +796,7 @@ class RoomCreationHandler(BaseHandler):
creation_content.update({"creator": creator_id}) creation_content.update({"creator": creator_id})
yield send(etype=EventTypes.Create, content=creation_content) yield send(etype=EventTypes.Create, content=creation_content)
logger.info("Sending %s in new room", EventTypes.Member) logger.debug("Sending %s in new room", EventTypes.Member)
yield self.room_member_handler.update_membership( yield self.room_member_handler.update_membership(
creator, creator,
creator.user, creator.user,

View File

@ -300,7 +300,7 @@ class StatsHandler(StateDeltasHandler):
room_state["guest_access"] = event_content.get("guest_access") room_state["guest_access"] = event_content.get("guest_access")
for room_id, state in room_to_state_updates.items(): for room_id, state in room_to_state_updates.items():
logger.info("Updating room_stats_state for %s: %s", room_id, state) logger.debug("Updating room_stats_state for %s: %s", room_id, state)
yield self.store.update_room_state(room_id, state) yield self.store.update_room_state(room_id, state)
return room_to_stats_deltas, user_to_stats_deltas return room_to_stats_deltas, user_to_stats_deltas

View File

@ -968,7 +968,7 @@ class SyncHandler(object):
# Always use the `now_token` in `SyncResultBuilder` # Always use the `now_token` in `SyncResultBuilder`
now_token = await self.event_sources.get_current_token() now_token = await self.event_sources.get_current_token()
logger.info( logger.debug(
"Calculating sync response for %r between %s and %s", "Calculating sync response for %r between %s and %s",
sync_config.user, sync_config.user,
since_token, since_token,
@ -1498,7 +1498,7 @@ class SyncHandler(object):
room_entries = [] room_entries = []
invited = [] invited = []
for room_id, events in iteritems(mem_change_events_by_room_id): for room_id, events in iteritems(mem_change_events_by_room_id):
logger.info( logger.debug(
"Membership changes in %s: [%s]", "Membership changes in %s: [%s]",
room_id, room_id,
", ".join(("%s (%s)" % (e.event_id, e.membership) for e in events)), ", ".join(("%s (%s)" % (e.event_id, e.membership) for e in events)),
@ -1892,7 +1892,7 @@ class SyncHandler(object):
if batch.limited and since_token: if batch.limited and since_token:
user_id = sync_result_builder.sync_config.user.to_string() user_id = sync_result_builder.sync_config.user.to_string()
logger.info( logger.debug(
"Incremental gappy sync of %s for user %s with %d state events" "Incremental gappy sync of %s for user %s with %d state events"
% (room_id, user_id, len(state)) % (room_id, user_id, len(state))
) )

View File

@ -149,7 +149,7 @@ class UserDirectoryHandler(StateDeltasHandler):
self.pos, room_max_stream_ordering self.pos, room_max_stream_ordering
) )
logger.info("Handling %d state deltas", len(deltas)) logger.debug("Handling %d state deltas", len(deltas))
yield self._handle_deltas(deltas) yield self._handle_deltas(deltas)
self.pos = max_pos self.pos = max_pos
@ -195,7 +195,7 @@ class UserDirectoryHandler(StateDeltasHandler):
room_id, self.server_name room_id, self.server_name
) )
if not is_in_room: if not is_in_room:
logger.info("Server left room: %r", room_id) logger.debug("Server left room: %r", room_id)
# Fetch all the users that we marked as being in user # Fetch all the users that we marked as being in user
# directory due to being in the room and then check if # directory due to being in the room and then check if
# need to remove those users or not # need to remove those users or not

View File

@ -225,7 +225,7 @@ class SynapseRequest(Request):
self.start_time, name=servlet_name, method=self.get_method() self.start_time, name=servlet_name, method=self.get_method()
) )
self.site.access_logger.info( self.site.access_logger.debug(
"%s - %s - Received request: %s %s", "%s - %s - Received request: %s %s",
self.getClientIP(), self.getClientIP(),
self.site.site_tag, self.site.site_tag,

View File

@ -398,7 +398,7 @@ class HttpPusher(object):
Args: Args:
badge (int): number of unread messages badge (int): number of unread messages
""" """
logger.info("Sending updated badge count %d to %s", badge, self.name) logger.debug("Sending updated badge count %d to %s", badge, self.name)
d = { d = {
"notification": { "notification": {
"id": "", "id": "",

View File

@ -183,7 +183,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
) )
return 1 return 1
logger.info( logger.debug(
"Processing the next %d rooms of %d remaining" "Processing the next %d rooms of %d remaining"
% (len(rooms_to_work_on), progress["remaining"]) % (len(rooms_to_work_on), progress["remaining"])
) )
@ -308,7 +308,7 @@ class UserDirectoryBackgroundUpdateStore(StateDeltasStore):
) )
return 1 return 1
logger.info( logger.debug(
"Processing the next %d users of %d remaining" "Processing the next %d users of %d remaining"
% (len(users_to_work_on), progress["remaining"]) % (len(users_to_work_on), progress["remaining"])
) )

View File

@ -390,7 +390,7 @@ class EventsPersistenceStorage(object):
state_delta_reuse_delta_counter.inc() state_delta_reuse_delta_counter.inc()
break break
logger.info("Calculating state delta for room %s", room_id) logger.debug("Calculating state delta for room %s", room_id)
with Measure( with Measure(
self._clock, "persist_events.get_new_state_after_events" self._clock, "persist_events.get_new_state_after_events"
): ):

View File

@ -144,7 +144,7 @@ class ResponseCache(object):
""" """
result = self.get(key) result = self.get(key)
if not result: if not result:
logger.info( logger.debug(
"[%s]: no cached result for [%s], calculating new one", self._name, key "[%s]: no cached result for [%s], calculating new one", self._name, key
) )
d = run_in_background(callback, *args, **kwargs) d = run_in_background(callback, *args, **kwargs)