forked-synapse/synapse/util/metrics.py

248 lines
7.6 KiB
Python
Raw Normal View History

2016-02-04 05:15:56 -05:00
#
2023-11-21 15:29:58 -05:00
# This file is licensed under the Affero General Public License (AGPL) version 3.
#
# Copyright 2016 OpenMarket Ltd
2023-11-21 15:29:58 -05:00
# Copyright (C) 2023 New Vector, Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# See the GNU Affero General Public License for more details:
# <https://www.gnu.org/licenses/agpl-3.0.html>.
#
# Originally licensed under the Apache License, Version 2.0:
# <http://www.apache.org/licenses/LICENSE-2.0>.
#
# [This file includes modifications made by New Vector Limited]
2016-02-04 05:15:56 -05:00
#
#
2018-07-09 02:09:20 -04:00
import logging
from functools import wraps
from types import TracebackType
from typing import Awaitable, Callable, Dict, Generator, Optional, Type, TypeVar
2016-02-04 05:15:56 -05:00
from prometheus_client import CollectorRegistry, Counter, Metric
from typing_extensions import Concatenate, ParamSpec, Protocol
2016-02-04 05:15:56 -05:00
from synapse.logging.context import (
ContextResourceUsage,
LoggingContext,
current_context,
)
from synapse.metrics import InFlightGauge
from synapse.util import Clock
2016-02-04 05:15:56 -05:00
logger = logging.getLogger(__name__)
2018-05-21 20:47:37 -04:00
block_counter = Counter("synapse_util_metrics_block_count", "", ["block_name"])
2016-02-04 05:15:56 -05:00
2018-05-21 20:47:37 -04:00
block_timer = Counter("synapse_util_metrics_block_time_seconds", "", ["block_name"])
2018-05-22 18:32:57 -04:00
block_ru_utime = Counter(
2019-06-20 05:32:02 -04:00
"synapse_util_metrics_block_ru_utime_seconds", "", ["block_name"]
)
2018-05-21 20:47:37 -04:00
2018-05-22 18:32:57 -04:00
block_ru_stime = Counter(
2019-06-20 05:32:02 -04:00
"synapse_util_metrics_block_ru_stime_seconds", "", ["block_name"]
)
2018-05-21 20:47:37 -04:00
2018-05-22 18:32:57 -04:00
block_db_txn_count = Counter(
2019-06-20 05:32:02 -04:00
"synapse_util_metrics_block_db_txn_count", "", ["block_name"]
)
Reorganise request and block metrics In order to circumvent the number of duplicate foo:count metrics increasing without bounds, it's time for a rearrangement. The following are all deprecated, and replaced with synapse_util_metrics_block_count: synapse_util_metrics_block_timer:count synapse_util_metrics_block_ru_utime:count synapse_util_metrics_block_ru_stime:count synapse_util_metrics_block_db_txn_count:count synapse_util_metrics_block_db_txn_duration:count The following are all deprecated, and replaced with synapse_http_server_response_count: synapse_http_server_requests synapse_http_server_response_time:count synapse_http_server_response_ru_utime:count synapse_http_server_response_ru_stime:count synapse_http_server_response_db_txn_count:count synapse_http_server_response_db_txn_duration:count The following are renamed (the old metrics are kept for now, but deprecated): synapse_util_metrics_block_timer:total -> synapse_util_metrics_block_time_seconds synapse_util_metrics_block_ru_utime:total -> synapse_util_metrics_block_ru_utime_seconds synapse_util_metrics_block_ru_stime:total -> synapse_util_metrics_block_ru_stime_seconds synapse_util_metrics_block_db_txn_count:total -> synapse_util_metrics_block_db_txn_count synapse_util_metrics_block_db_txn_duration:total -> synapse_util_metrics_block_db_txn_duration_seconds synapse_http_server_response_time:total -> synapse_http_server_response_time_seconds synapse_http_server_response_ru_utime:total -> synapse_http_server_response_ru_utime_seconds synapse_http_server_response_ru_stime:total -> synapse_http_server_response_ru_stime_seconds synapse_http_server_response_db_txn_count:total -> synapse_http_server_response_db_txn_count synapse_http_server_response_db_txn_duration:total synapse_http_server_response_db_txn_duration_seconds
2018-01-15 12:00:12 -05:00
# seconds spent waiting for db txns, excluding scheduling time, in this block
2018-05-22 18:32:57 -04:00
block_db_txn_duration = Counter(
2019-06-20 05:32:02 -04:00
"synapse_util_metrics_block_db_txn_duration_seconds", "", ["block_name"]
)
2016-02-04 05:15:56 -05:00
# seconds spent waiting for a db connection, in this block
2018-05-22 18:32:57 -04:00
block_db_sched_duration = Counter(
2019-06-20 05:32:02 -04:00
"synapse_util_metrics_block_db_sched_duration_seconds", "", ["block_name"]
)
# This is dynamically created in InFlightGauge.__init__.
class _InFlightMetric(Protocol):
real_time_max: float
real_time_sum: float
# Tracks the number of blocks currently active
in_flight: InFlightGauge[_InFlightMetric] = InFlightGauge(
2019-06-20 05:32:02 -04:00
"synapse_util_metrics_block_in_flight",
"",
labels=["block_name"],
sub_metrics=["real_time_max", "real_time_sum"],
)
P = ParamSpec("P")
R = TypeVar("R")
2016-02-04 05:15:56 -05:00
2020-08-12 09:03:08 -04:00
class HasClock(Protocol):
clock: Clock
def measure_func(
name: Optional[str] = None,
) -> Callable[[Callable[P, Awaitable[R]]], Callable[P, Awaitable[R]]]:
"""Decorate an async method with a `Measure` context manager.
The Measure is created using `self.clock`; it should only be used to decorate
methods in classes defining an instance-level `clock` attribute.
Usage:
@measure_func()
async def foo(...):
...
2019-12-05 12:58:25 -05:00
Which is analogous to:
2019-12-05 12:58:25 -05:00
async def foo(...):
with Measure(...):
...
"""
def wrapper(
func: Callable[Concatenate[HasClock, P], Awaitable[R]]
) -> Callable[P, Awaitable[R]]:
block_name = func.__name__ if name is None else name
2019-12-05 12:58:25 -05:00
@wraps(func)
async def measured_func(self: HasClock, *args: P.args, **kwargs: P.kwargs) -> R:
with Measure(self.clock, block_name):
r = await func(self, *args, **kwargs)
return r
2019-06-20 05:32:02 -04:00
# There are some shenanigans here, because we're decorating a method but
# explicitly making use of the `self` parameter. The key thing here is that the
# return type within the return type for `measure_func` itself describes how the
# decorated function will be called.
return measured_func # type: ignore[return-value]
2019-06-20 05:32:02 -04:00
return wrapper # type: ignore[return-value]
2020-09-04 06:54:56 -04:00
class Measure:
2016-02-09 06:06:19 -05:00
__slots__ = [
2019-06-20 05:32:02 -04:00
"clock",
"name",
"_logging_context",
2019-06-20 05:32:02 -04:00
"start",
2016-02-09 06:06:19 -05:00
]
2016-02-04 05:15:56 -05:00
def __init__(self, clock: Clock, name: str) -> None:
"""
Args:
clock: An object with a "time()" method, which returns the current
time in seconds.
name: The name of the metric to report.
"""
2016-02-04 05:15:56 -05:00
self.clock = clock
self.name = name
curr_context = current_context()
if not curr_context:
logger.warning(
"Starting metrics collection %r from sentinel context: metrics will be lost",
name,
)
parent_context = None
else:
assert isinstance(curr_context, LoggingContext)
parent_context = curr_context
self._logging_context = LoggingContext(str(curr_context), parent_context)
self.start: Optional[float] = None
2016-02-04 05:15:56 -05:00
def __enter__(self) -> "Measure":
if self.start is not None:
raise RuntimeError("Measure() objects cannot be re-used")
2016-02-04 05:15:56 -05:00
self.start = self.clock.time()
self._logging_context.__enter__()
in_flight.register((self.name,), self._update_in_flight)
logger.debug("Entering block %s", self.name)
return self
def __exit__(
self,
exc_type: Optional[Type[BaseException]],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> None:
if self.start is None:
raise RuntimeError("Measure() block exited without being entered")
logger.debug("Exiting block %s", self.name)
duration = self.clock.time() - self.start
usage = self.get_resource_usage()
in_flight.unregister((self.name,), self._update_in_flight)
self._logging_context.__exit__(exc_type, exc_val, exc_tb)
2016-02-04 05:22:44 -05:00
try:
block_counter.labels(self.name).inc()
block_timer.labels(self.name).inc(duration)
block_ru_utime.labels(self.name).inc(usage.ru_utime)
block_ru_stime.labels(self.name).inc(usage.ru_stime)
block_db_txn_count.labels(self.name).inc(usage.db_txn_count)
block_db_txn_duration.labels(self.name).inc(usage.db_txn_duration_sec)
block_db_sched_duration.labels(self.name).inc(usage.db_sched_duration_sec)
except ValueError:
logger.warning("Failed to save metrics! Usage: %s", usage)
def get_resource_usage(self) -> ContextResourceUsage:
"""Get the resources used within this Measure block
If the Measure block is still active, returns the resource usage so far.
"""
return self._logging_context.get_resource_usage()
def _update_in_flight(self, metrics: _InFlightMetric) -> None:
"""Gets called when processing in flight metrics"""
assert self.start is not None
duration = self.clock.time() - self.start
metrics.real_time_max = max(metrics.real_time_max, duration)
metrics.real_time_sum += duration
# TODO: Add other in flight metrics.
class DynamicCollectorRegistry(CollectorRegistry):
"""
Custom Prometheus Collector registry that calls a hook first, allowing you
to update metrics on-demand.
Don't forget to register this registry with the main registry!
"""
def __init__(self) -> None:
super().__init__()
self._pre_update_hooks: Dict[str, Callable[[], None]] = {}
def collect(self) -> Generator[Metric, None, None]:
"""
Collects metrics, calling pre-update hooks first.
"""
for pre_update_hook in self._pre_update_hooks.values():
pre_update_hook()
yield from super().collect()
def register_hook(self, metric_name: str, hook: Callable[[], None]) -> None:
"""
Registers a hook that is called before metric collection.
"""
self._pre_update_hooks[metric_name] = hook