2015-02-24 13:03:39 -05:00
|
|
|
# -*- coding: utf-8 -*-
|
2016-01-06 23:26:29 -05:00
|
|
|
# Copyright 2015, 2016 OpenMarket Ltd
|
2015-02-24 13:03:39 -05:00
|
|
|
#
|
|
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
# you may not use this file except in compliance with the License.
|
|
|
|
# You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2015-03-05 11:15:21 -05:00
|
|
|
# Because otherwise 'resource' collides with synapse.metrics.resource
|
|
|
|
from __future__ import absolute_import
|
|
|
|
|
2015-03-04 12:13:09 -05:00
|
|
|
import logging
|
2015-09-07 11:45:48 -04:00
|
|
|
from resource import getrusage, RUSAGE_SELF
|
2015-08-13 06:38:59 -04:00
|
|
|
import functools
|
2015-04-01 14:04:55 -04:00
|
|
|
import os
|
|
|
|
import stat
|
2015-08-13 06:38:59 -04:00
|
|
|
import time
|
2016-05-09 05:13:25 -04:00
|
|
|
import gc
|
2015-08-13 06:38:59 -04:00
|
|
|
|
|
|
|
from twisted.internet import reactor
|
2015-03-04 12:13:09 -05:00
|
|
|
|
2015-03-10 11:21:03 -04:00
|
|
|
from .metric import (
|
2016-07-20 10:47:28 -04:00
|
|
|
CounterMetric, CallbackMetric, DistributionMetric, CacheMetric,
|
|
|
|
MemoryUsageMetric,
|
2015-03-10 11:21:03 -04:00
|
|
|
)
|
2015-02-24 13:03:39 -05:00
|
|
|
|
|
|
|
|
2015-03-04 12:13:09 -05:00
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
2016-06-02 06:29:44 -04:00
|
|
|
all_metrics = []
|
2015-02-24 13:03:39 -05:00
|
|
|
|
|
|
|
|
|
|
|
class Metrics(object):
|
|
|
|
""" A single Metrics object gives a (mutable) slice view of the all_metrics
|
|
|
|
dict, allowing callers to easily register new metrics that are namespaced
|
|
|
|
nicely."""
|
|
|
|
|
|
|
|
def __init__(self, name):
|
|
|
|
self.name_prefix = name
|
|
|
|
|
2015-03-04 14:43:46 -05:00
|
|
|
def _register(self, metric_class, name, *args, **kwargs):
|
2015-03-06 11:18:21 -05:00
|
|
|
full_name = "%s_%s" % (self.name_prefix, name)
|
2015-03-04 11:46:44 -05:00
|
|
|
|
2015-03-04 14:43:46 -05:00
|
|
|
metric = metric_class(full_name, *args, **kwargs)
|
2015-03-04 11:46:44 -05:00
|
|
|
|
2016-06-02 06:29:44 -04:00
|
|
|
all_metrics.append(metric)
|
2015-03-04 11:46:44 -05:00
|
|
|
return metric
|
|
|
|
|
2015-03-04 14:43:46 -05:00
|
|
|
def register_counter(self, *args, **kwargs):
|
|
|
|
return self._register(CounterMetric, *args, **kwargs)
|
2015-03-04 10:47:23 -05:00
|
|
|
|
2015-03-04 14:43:46 -05:00
|
|
|
def register_callback(self, *args, **kwargs):
|
|
|
|
return self._register(CallbackMetric, *args, **kwargs)
|
2015-03-04 10:47:23 -05:00
|
|
|
|
2015-03-10 11:21:03 -04:00
|
|
|
def register_distribution(self, *args, **kwargs):
|
|
|
|
return self._register(DistributionMetric, *args, **kwargs)
|
2015-03-04 10:47:23 -05:00
|
|
|
|
2015-03-04 14:43:46 -05:00
|
|
|
def register_cache(self, *args, **kwargs):
|
|
|
|
return self._register(CacheMetric, *args, **kwargs)
|
2015-03-04 10:47:23 -05:00
|
|
|
|
2015-02-24 13:03:39 -05:00
|
|
|
|
2016-07-20 10:47:28 -04:00
|
|
|
def register_memory_metrics(hs):
|
2016-08-08 06:12:21 -04:00
|
|
|
try:
|
|
|
|
import psutil
|
|
|
|
process = psutil.Process()
|
|
|
|
process.memory_info().rss
|
|
|
|
except (ImportError, AttributeError):
|
|
|
|
logger.warn(
|
|
|
|
"psutil is not installed or incorrect version."
|
|
|
|
" Disabling memory metrics."
|
|
|
|
)
|
|
|
|
return
|
|
|
|
metric = MemoryUsageMetric(hs, psutil)
|
2016-07-20 10:47:28 -04:00
|
|
|
all_metrics.append(metric)
|
|
|
|
|
|
|
|
|
2015-03-06 11:18:21 -05:00
|
|
|
def get_metrics_for(pkg_name):
|
2015-02-24 13:03:39 -05:00
|
|
|
""" Returns a Metrics instance for conveniently creating metrics
|
|
|
|
namespaced with the given name prefix. """
|
2015-03-06 11:18:21 -05:00
|
|
|
|
|
|
|
# Convert a "package.name" to "package_name" because Prometheus doesn't
|
|
|
|
# let us use . in metric names
|
|
|
|
return Metrics(pkg_name.replace(".", "_"))
|
2015-02-24 13:03:39 -05:00
|
|
|
|
|
|
|
|
|
|
|
def render_all():
|
|
|
|
strs = []
|
|
|
|
|
2015-03-05 11:15:21 -05:00
|
|
|
# TODO(paul): Internal hack
|
|
|
|
update_resource_metrics()
|
|
|
|
|
2016-06-02 06:29:44 -04:00
|
|
|
for metric in all_metrics:
|
2015-03-04 12:13:09 -05:00
|
|
|
try:
|
2016-06-02 06:29:44 -04:00
|
|
|
strs += metric.render()
|
2015-03-12 12:45:05 -04:00
|
|
|
except Exception:
|
2016-06-02 06:29:44 -04:00
|
|
|
strs += ["# FAILED to render"]
|
|
|
|
logger.exception("Failed to render metric")
|
2015-02-24 13:03:39 -05:00
|
|
|
|
2015-03-12 12:24:38 -04:00
|
|
|
strs.append("") # to generate a final CRLF
|
2015-03-06 14:08:47 -05:00
|
|
|
|
2015-02-24 13:03:39 -05:00
|
|
|
return "\n".join(strs)
|
2015-03-05 11:15:21 -05:00
|
|
|
|
|
|
|
|
|
|
|
# Now register some standard process-wide state metrics, to give indications of
|
|
|
|
# process resource usage
|
|
|
|
|
2016-10-19 09:10:03 -04:00
|
|
|
TICKS_PER_SEC = 100
|
2016-10-19 09:23:40 -04:00
|
|
|
BYTES_PER_PAGE = 4096
|
2015-03-05 11:15:21 -05:00
|
|
|
|
2016-10-19 10:34:38 -04:00
|
|
|
HAVE_PROC_STAT = os.path.exists("/proc/stat")
|
|
|
|
HAVE_PROC_SELF_STAT = os.path.exists("/proc/self/stat")
|
|
|
|
HAVE_PROC_SELF_LIMITS = os.path.exists("/proc/self/limits")
|
|
|
|
|
2016-10-19 09:10:03 -04:00
|
|
|
rusage = None
|
|
|
|
stats = None
|
2016-10-19 09:45:08 -04:00
|
|
|
fd_counts = None
|
|
|
|
|
2016-10-19 10:04:52 -04:00
|
|
|
# In order to report process_start_time_seconds we need to know the machine's
|
|
|
|
# boot time, because the value in /proc/self/stat is relative to this
|
|
|
|
boot_time = None
|
2016-10-19 10:34:38 -04:00
|
|
|
if HAVE_PROC_STAT:
|
2016-10-19 10:04:52 -04:00
|
|
|
with open("/proc/stat") as _procstat:
|
|
|
|
for line in _procstat:
|
|
|
|
if line.startswith("btime "):
|
|
|
|
boot_time = int(line.split()[1])
|
|
|
|
|
2016-10-19 09:45:08 -04:00
|
|
|
TYPES = {
|
|
|
|
stat.S_IFSOCK: "SOCK",
|
|
|
|
stat.S_IFLNK: "LNK",
|
|
|
|
stat.S_IFREG: "REG",
|
|
|
|
stat.S_IFBLK: "BLK",
|
|
|
|
stat.S_IFDIR: "DIR",
|
|
|
|
stat.S_IFCHR: "CHR",
|
|
|
|
stat.S_IFIFO: "FIFO",
|
|
|
|
}
|
2015-03-12 12:24:38 -04:00
|
|
|
|
2015-03-05 11:15:21 -05:00
|
|
|
def update_resource_metrics():
|
|
|
|
global rusage
|
|
|
|
rusage = getrusage(RUSAGE_SELF)
|
|
|
|
|
2016-10-19 10:34:38 -04:00
|
|
|
if HAVE_PROC_SELF_STAT:
|
|
|
|
global stats
|
|
|
|
with open("/proc/self/stat") as s:
|
|
|
|
line = s.read()
|
|
|
|
# line is PID (command) more stats go here ...
|
|
|
|
stats = line.split(") ", 1)[1].split(" ")
|
2016-10-19 09:10:03 -04:00
|
|
|
|
2016-10-19 09:45:08 -04:00
|
|
|
global fd_counts
|
|
|
|
fd_counts = _process_fds()
|
2015-04-01 14:04:55 -04:00
|
|
|
|
2015-04-01 14:17:38 -04:00
|
|
|
|
2015-04-01 14:04:55 -04:00
|
|
|
def _process_fds():
|
|
|
|
counts = {(k,): 0 for k in TYPES.values()}
|
|
|
|
counts[("other",)] = 0
|
|
|
|
|
2015-09-07 11:45:55 -04:00
|
|
|
# Not every OS will have a /proc/self/fd directory
|
|
|
|
if not os.path.exists("/proc/self/fd"):
|
|
|
|
return counts
|
|
|
|
|
2015-04-01 14:04:55 -04:00
|
|
|
for fd in os.listdir("/proc/self/fd"):
|
|
|
|
try:
|
|
|
|
s = os.stat("/proc/self/fd/%s" % (fd))
|
|
|
|
fmt = stat.S_IFMT(s.st_mode)
|
|
|
|
if fmt in TYPES:
|
|
|
|
t = TYPES[fmt]
|
|
|
|
else:
|
|
|
|
t = "other"
|
|
|
|
|
|
|
|
counts[(t,)] += 1
|
|
|
|
except OSError:
|
|
|
|
# the dirh itself used by listdir() is usually missing by now
|
|
|
|
pass
|
|
|
|
|
|
|
|
return counts
|
|
|
|
|
2016-10-19 09:45:08 -04:00
|
|
|
|
|
|
|
## Legacy synapse-invented metric names
|
|
|
|
|
|
|
|
resource_metrics = get_metrics_for("process.resource")
|
|
|
|
|
|
|
|
# msecs
|
|
|
|
resource_metrics.register_callback("utime", lambda: rusage.ru_utime * 1000)
|
|
|
|
resource_metrics.register_callback("stime", lambda: rusage.ru_stime * 1000)
|
|
|
|
|
|
|
|
# kilobytes
|
|
|
|
resource_metrics.register_callback("maxrss", lambda: rusage.ru_maxrss * 1024)
|
|
|
|
|
2015-04-01 14:04:55 -04:00
|
|
|
get_metrics_for("process").register_callback("fds", _process_fds, labels=["type"])
|
2015-08-13 06:38:59 -04:00
|
|
|
|
2016-10-19 08:56:06 -04:00
|
|
|
## New prometheus-standard metric names
|
|
|
|
process_metrics = get_metrics_for("process");
|
|
|
|
|
2016-10-19 10:34:38 -04:00
|
|
|
if HAVE_PROC_SELF_STAT:
|
|
|
|
process_metrics.register_callback(
|
|
|
|
"cpu_user_seconds_total", lambda: float(stats[11]) / TICKS_PER_SEC
|
|
|
|
)
|
|
|
|
process_metrics.register_callback(
|
|
|
|
"cpu_system_seconds_total", lambda: float(stats[12]) / TICKS_PER_SEC
|
|
|
|
)
|
|
|
|
process_metrics.register_callback(
|
|
|
|
"cpu_seconds_total", lambda: (float(stats[11]) + float(stats[12])) / TICKS_PER_SEC
|
|
|
|
)
|
|
|
|
|
|
|
|
process_metrics.register_callback(
|
|
|
|
"virtual_memory_bytes", lambda: int(stats[20])
|
|
|
|
)
|
|
|
|
process_metrics.register_callback(
|
|
|
|
"resident_memory_bytes", lambda: int(stats[21]) * BYTES_PER_PAGE
|
|
|
|
)
|
|
|
|
|
|
|
|
process_metrics.register_callback(
|
|
|
|
"open_fds", lambda: sum(fd_counts.values())
|
|
|
|
)
|
|
|
|
|
|
|
|
process_metrics.register_callback(
|
|
|
|
"start_time_seconds", lambda: boot_time + int(stats[19]) / TICKS_PER_SEC
|
|
|
|
)
|
|
|
|
|
|
|
|
if HAVE_PROC_SELF_LIMITS:
|
|
|
|
def _get_max_fds():
|
|
|
|
with open("/proc/self/limits") as limits:
|
|
|
|
for line in limits:
|
|
|
|
if not line.startswith("Max open files "):
|
|
|
|
continue
|
|
|
|
# Line is Max open files $SOFT $HARD
|
|
|
|
return int(line.split()[3])
|
|
|
|
return None
|
|
|
|
|
|
|
|
process_metrics.register_callback(
|
|
|
|
"max_fds", lambda: _get_max_fds()
|
|
|
|
)
|
2016-10-19 10:04:52 -04:00
|
|
|
|
2015-08-13 06:38:59 -04:00
|
|
|
reactor_metrics = get_metrics_for("reactor")
|
|
|
|
tick_time = reactor_metrics.register_distribution("tick_time")
|
|
|
|
pending_calls_metric = reactor_metrics.register_distribution("pending_calls")
|
|
|
|
|
2016-06-07 08:33:13 -04:00
|
|
|
gc_time = reactor_metrics.register_distribution("gc_time", labels=["gen"])
|
2016-06-07 08:40:22 -04:00
|
|
|
gc_unreachable = reactor_metrics.register_counter("gc_unreachable", labels=["gen"])
|
2015-08-13 06:38:59 -04:00
|
|
|
|
2016-06-07 11:51:01 -04:00
|
|
|
reactor_metrics.register_callback(
|
|
|
|
"gc_counts", lambda: {(i,): v for i, v in enumerate(gc.get_count())}, labels=["gen"]
|
|
|
|
)
|
|
|
|
|
2015-08-13 06:38:59 -04:00
|
|
|
|
|
|
|
def runUntilCurrentTimer(func):
|
|
|
|
|
|
|
|
@functools.wraps(func)
|
|
|
|
def f(*args, **kwargs):
|
2015-08-14 10:42:52 -04:00
|
|
|
now = reactor.seconds()
|
|
|
|
num_pending = 0
|
|
|
|
|
|
|
|
# _newTimedCalls is one long list of *all* pending calls. Below loop
|
|
|
|
# is based off of impl of reactor.runUntilCurrent
|
2015-08-18 06:47:00 -04:00
|
|
|
for delayed_call in reactor._newTimedCalls:
|
|
|
|
if delayed_call.time > now:
|
2015-08-14 10:42:52 -04:00
|
|
|
break
|
|
|
|
|
2015-08-18 06:47:00 -04:00
|
|
|
if delayed_call.delayed_time > 0:
|
2015-08-14 10:42:52 -04:00
|
|
|
continue
|
|
|
|
|
|
|
|
num_pending += 1
|
|
|
|
|
|
|
|
num_pending += len(reactor.threadCallQueue)
|
|
|
|
|
2015-08-13 06:41:57 -04:00
|
|
|
start = time.time() * 1000
|
2015-08-13 06:38:59 -04:00
|
|
|
ret = func(*args, **kwargs)
|
|
|
|
end = time.time() * 1000
|
|
|
|
tick_time.inc_by(end - start)
|
2015-08-14 10:42:52 -04:00
|
|
|
pending_calls_metric.inc_by(num_pending)
|
2016-05-09 05:13:25 -04:00
|
|
|
|
2016-05-13 11:31:08 -04:00
|
|
|
# Check if we need to do a manual GC (since its been disabled), and do
|
|
|
|
# one if necessary.
|
2016-05-09 05:13:25 -04:00
|
|
|
threshold = gc.get_threshold()
|
|
|
|
counts = gc.get_count()
|
2016-06-07 08:40:22 -04:00
|
|
|
for i in (2, 1, 0):
|
2016-05-09 05:13:25 -04:00
|
|
|
if threshold[i] < counts[i]:
|
|
|
|
logger.info("Collecting gc %d", i)
|
2016-05-16 04:32:29 -04:00
|
|
|
|
|
|
|
start = time.time() * 1000
|
2016-06-07 08:40:22 -04:00
|
|
|
unreachable = gc.collect(i)
|
2016-05-16 04:32:29 -04:00
|
|
|
end = time.time() * 1000
|
|
|
|
|
2016-06-07 08:33:13 -04:00
|
|
|
gc_time.inc_by(end - start, i)
|
2016-06-07 08:40:22 -04:00
|
|
|
gc_unreachable.inc_by(unreachable, i)
|
2016-05-09 05:13:25 -04:00
|
|
|
|
2015-08-13 06:38:59 -04:00
|
|
|
return ret
|
|
|
|
|
|
|
|
return f
|
|
|
|
|
|
|
|
|
2015-08-18 06:51:08 -04:00
|
|
|
try:
|
|
|
|
# Ensure the reactor has all the attributes we expect
|
|
|
|
reactor.runUntilCurrent
|
|
|
|
reactor._newTimedCalls
|
|
|
|
reactor.threadCallQueue
|
|
|
|
|
2015-08-13 06:38:59 -04:00
|
|
|
# runUntilCurrent is called when we have pending calls. It is called once
|
|
|
|
# per iteratation after fd polling.
|
|
|
|
reactor.runUntilCurrent = runUntilCurrentTimer(reactor.runUntilCurrent)
|
2016-05-13 11:31:08 -04:00
|
|
|
|
|
|
|
# We manually run the GC each reactor tick so that we can get some metrics
|
|
|
|
# about time spent doing GC,
|
2016-05-09 05:13:25 -04:00
|
|
|
gc.disable()
|
2015-08-18 06:51:08 -04:00
|
|
|
except AttributeError:
|
|
|
|
pass
|