[python] make units compliant with IEC standard (#4995)

* apache units fix

* beanstalk

* bind_rndc

* boinc

* ceph

* chrony

* couchdb

* dns_query

* dnsdist

* dockerd

* dovecot

* elasticsearch by @vlvkobal <3

* example

* exim

* fail2ban

* freeradius minor fixes

* freeradius minor fixes

* freeradius minor fixes

* go_expvar

* haproxy

* hddtemp

* httpcheck

* icecast

* ipfs

* isc_dhcpd

* litespeed

* logind

* megacli

* memcached

* mongodb

* monit

* mysql

* nginx

* nginx_plus

* nsd

* ntpd

* nvidia_smi

* openldap

* ovpn_status

* phpfm

* portcheck

* postfix

* postgres

* powerdns

* proxysql

* puppet

* rabbitmq

* redis

* restroshare

* samba

* sensors

* smartdlog

* spigotmc

* springboot

* squid

* retroshare

* tomcat

* retroshare

* tor

* traefik

* traefik

* unbound

* uwsgi

* varnish

* w1sensor

* web_log

* ok codacy

* retroshare

* ipfs
This commit is contained in:
Ilya Mashchenko 2018-12-17 18:50:20 +03:00 committed by GitHub
parent 7ad91b8f9a
commit 97b32703c6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
62 changed files with 747 additions and 580 deletions

View File

@ -5,63 +5,60 @@
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
# default job configuration (overridden by python.d.plugin) ORDER = [
# config = {'local': { 'requests',
# 'update_every': update_every, 'connections',
# 'retries': retries, 'conns_async',
# 'priority': priority, 'net',
# 'url': 'http://www.apache.org/server-status?auto' 'workers',
# }} 'reqpersec',
'bytespersec',
# charts order (can be overridden if you want less charts, or different order) 'bytesperreq',
ORDER = ['requests', 'connections', 'conns_async', 'net', 'workers', 'reqpersec', 'bytespersec', 'bytesperreq'] ]
CHARTS = { CHARTS = {
'bytesperreq': { 'bytesperreq': {
'options': [None, 'apache Lifetime Avg. Response Size', 'bytes/request', 'options': [None, 'Lifetime Avg. Request Size', 'KiB',
'statistics', 'apache.bytesperreq', 'area'], 'statistics', 'apache.bytesperreq', 'area'],
'lines': [ 'lines': [
['size_req'] ['size_req', 'size', 'absolute', 1, 1024 * 100000]
]}, ]},
'workers': { 'workers': {
'options': [None, 'apache Workers', 'workers', 'workers', 'apache.workers', 'stacked'], 'options': [None, 'Workers', 'workers', 'workers', 'apache.workers', 'stacked'],
'lines': [ 'lines': [
['idle'], ['idle'],
['busy'], ['busy'],
]}, ]},
'reqpersec': { 'reqpersec': {
'options': [None, 'apache Lifetime Avg. Requests/s', 'requests/s', 'statistics', 'options': [None, 'Lifetime Avg. Requests/s', 'requests/s', 'statistics',
'apache.reqpersec', 'area'], 'apache.reqpersec', 'area'],
'lines': [ 'lines': [
['requests_sec'] ['requests_sec', 'requests', 'absolute', 1, 100000]
]}, ]},
'bytespersec': { 'bytespersec': {
'options': [None, 'apache Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics', 'options': [None, 'Lifetime Avg. Bandwidth/s', 'kilobits/s', 'statistics',
'apache.bytesperreq', 'area'], 'apache.bytesperreq', 'area'],
'lines': [ 'lines': [
['size_sec', None, 'absolute', 8, 1000] ['size_sec', None, 'absolute', 8, 1000 * 100000]
]}, ]},
'requests': { 'requests': {
'options': [None, 'apache Requests', 'requests/s', 'requests', 'apache.requests', 'line'], 'options': [None, 'Requests', 'requests/s', 'requests', 'apache.requests', 'line'],
'lines': [ 'lines': [
['requests', None, 'incremental'] ['requests', None, 'incremental']
]}, ]},
'net': { 'net': {
'options': [None, 'apache Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'], 'options': [None, 'Bandwidth', 'kilobits/s', 'bandwidth', 'apache.net', 'area'],
'lines': [ 'lines': [
['sent', None, 'incremental', 8, 1] ['sent', None, 'incremental', 8, 1]
]}, ]},
'connections': { 'connections': {
'options': [None, 'apache Connections', 'connections', 'connections', 'apache.connections', 'line'], 'options': [None, 'Connections', 'connections', 'connections', 'apache.connections', 'line'],
'lines': [ 'lines': [
['connections'] ['connections']
]}, ]},
'conns_async': { 'conns_async': {
'options': [None, 'apache Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'], 'options': [None, 'Async Connections', 'connections', 'connections', 'apache.conns_async', 'stacked'],
'lines': [ 'lines': [
['keepalive'], ['keepalive'],
['closing'], ['closing'],
@ -85,6 +82,14 @@ ASSIGNMENT = {
'ConnsAsyncWriting': 'writing' 'ConnsAsyncWriting': 'writing'
} }
FLOAT_VALUES = [
'BytesPerReq',
'ReqPerSec',
'BytesPerSec',
]
LIGHTTPD_MARKER = 'idle_servers'
class Service(UrlService): class Service(UrlService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
@ -95,20 +100,15 @@ class Service(UrlService):
def check(self): def check(self):
self._manager = self._build_manager() self._manager = self._build_manager()
data = self._get_data() data = self._get_data()
if not data: if not data:
return None return None
if 'idle_servers' in data: if LIGHTTPD_MARKER in data:
self.module_name = 'lighttpd' self.turn_into_lighttpd()
for chart in self.definitions:
if chart == 'workers':
lines = self.definitions[chart]['lines']
lines[0] = ['idle_servers', 'idle']
lines[1] = ['busy_servers', 'busy']
opts = self.definitions[chart]['options']
opts[1] = opts[1].replace('apache', 'lighttpd')
opts[4] = opts[4].replace('apache', 'lighttpd')
return True return True
def _get_data(self): def _get_data(self):
@ -117,15 +117,44 @@ class Service(UrlService):
:return: dict :return: dict
""" """
raw_data = self._get_raw_data() raw_data = self._get_raw_data()
if not raw_data: if not raw_data:
return None return None
data = dict() data = dict()
for row in raw_data.split('\n'): for line in raw_data.split('\n'):
tmp = row.split(':') try:
if tmp[0] in ASSIGNMENT: parse_line(line, data)
try: except ValueError:
data[ASSIGNMENT[tmp[0]]] = int(float(tmp[1])) continue
except (IndexError, ValueError):
continue
return data or None return data or None
def turn_into_lighttpd(self):
self.module_name = 'lighttpd'
for chart in self.definitions:
if chart == 'workers':
lines = self.definitions[chart]['lines']
lines[0] = ['idle_servers', 'idle']
lines[1] = ['busy_servers', 'busy']
opts = self.definitions[chart]['options']
opts[1] = opts[1].replace('apache', 'lighttpd')
opts[4] = opts[4].replace('apache', 'lighttpd')
def parse_line(line, data):
parts = line.split(':')
if len(parts) != 2:
return
key, value = parts[0], parts[1]
if key not in ASSIGNMENT:
return
if key in FLOAT_VALUES:
data[ASSIGNMENT[key]] = int((float(value) * 100000))
else:
data[ASSIGNMENT[key]] = int(value)

View File

@ -12,12 +12,18 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService from bases.FrameworkServices.SimpleService import SimpleService
from bases.loaders import safe_load from bases.loaders import safe_load
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
ORDER = ['cpu_usage', 'jobs_rate', 'connections_rate', 'commands_rate', 'current_tubes', 'current_jobs', ORDER = [
'current_connections', 'binlog', 'uptime'] 'cpu_usage',
'jobs_rate',
'connections_rate',
'commands_rate',
'current_tubes',
'current_jobs',
'current_connections',
'binlog',
'uptime',
]
CHARTS = { CHARTS = {
'cpu_usage': { 'cpu_usage': {

View File

@ -11,10 +11,15 @@ from subprocess import Popen
from bases.collection import find_binary from bases.collection import find_binary
from bases.FrameworkServices.SimpleService import SimpleService from bases.FrameworkServices.SimpleService import SimpleService
priority = 60000
update_every = 30 update_every = 30
ORDER = ['name_server_statistics', 'incoming_queries', 'outgoing_queries', 'named_stats_size'] ORDER = [
'name_server_statistics',
'incoming_queries',
'outgoing_queries',
'named_stats_size',
]
CHARTS = { CHARTS = {
'name_server_statistics': { 'name_server_statistics': {
@ -43,7 +48,7 @@ CHARTS = {
'lines': [ 'lines': [
]}, ]},
'named_stats_size': { 'named_stats_size': {
'options': [None, 'Named Stats File Size', 'MB', 'file size', 'bind_rndc.stats_size', 'line'], 'options': [None, 'Named Stats File Size', 'MiB', 'file size', 'bind_rndc.stats_size', 'line'],
'lines': [ 'lines': [
['stats_size', None, 'absolute', 1, 1 << 20] ['stats_size', None, 'absolute', 1, 1 << 20]
] ]
@ -91,10 +96,20 @@ class Service(SimpleService):
self.definitions = CHARTS self.definitions = CHARTS
self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats') self.named_stats_path = self.configuration.get('named_stats_path', '/var/log/bind/named.stats')
self.rndc = find_binary('rndc') self.rndc = find_binary('rndc')
self.data = dict(nms_requests=0, nms_responses=0, nms_failure=0, nms_auth=0, self.data = dict(
nms_non_auth=0, nms_nxrrset=0, nms_success=0, nms_nxdomain=0, nms_requests=0,
nms_recursion=0, nms_duplicate=0, nms_rejected_queries=0, nms_responses=0,
nms_dropped_queries=0) nms_failure=0,
nms_auth=0,
nms_non_auth=0,
nms_nxrrset=0,
nms_success=0,
nms_nxdomain=0,
nms_recursion=0,
nms_duplicate=0,
nms_rejected_queries=0,
nms_dropped_queries=0,
)
def check(self): def check(self):
if not self.rndc: if not self.rndc:

View File

@ -10,7 +10,12 @@ from bases.FrameworkServices.SimpleService import SimpleService
from third_party import boinc_client from third_party import boinc_client
ORDER = ['tasks', 'states', 'sched_states', 'process_states'] ORDER = [
'tasks',
'states',
'sched_states',
'process_states',
]
CHARTS = { CHARTS = {
'tasks': { 'tasks': {
@ -141,14 +146,16 @@ class Service(SimpleService):
def _get_data(self): def _get_data(self):
if not self.is_alive(): if not self.is_alive():
return None return None
data = dict(_DATA_TEMPLATE) data = dict(_DATA_TEMPLATE)
results = []
try: try:
results = self.client.get_tasks() results = self.client.get_tasks()
except socket.error: except socket.error:
self.error('Connection is dead') self.error('Connection is dead')
self.alive = False self.alive = False
return None return None
for task in results: for task in results:
data['total'] += 1 data['total'] += 1
data[_TASK_MAP[task.state]] += 1 data[_TASK_MAP[task.state]] += 1
@ -159,4 +166,5 @@ class Service(SimpleService):
data[_PROC_MAP[task.active_task_state]] += 1 data[_PROC_MAP[task.active_task_state]] += 1
except AttributeError: except AttributeError:
pass pass
return data
return data or None

View File

@ -9,13 +9,13 @@ try:
except ImportError: except ImportError:
CEPH = False CEPH = False
import os
import json import json
import os
from bases.FrameworkServices.SimpleService import SimpleService from bases.FrameworkServices.SimpleService import SimpleService
# default module values (can be overridden per job in `config`) # default module values (can be overridden per job in `config`)
update_every = 10 update_every = 10
priority = 60000
ORDER = [ ORDER = [
'general_usage', 'general_usage',
@ -36,7 +36,7 @@ ORDER = [
CHARTS = { CHARTS = {
'general_usage': { 'general_usage': {
'options': [None, 'Ceph General Space', 'KB', 'general', 'ceph.general_usage', 'stacked'], 'options': [None, 'Ceph General Space', 'KiB', 'general', 'ceph.general_usage', 'stacked'],
'lines': [ 'lines': [
['general_available', 'avail', 'absolute'], ['general_available', 'avail', 'absolute'],
['general_usage', 'used', 'absolute'] ['general_usage', 'used', 'absolute']
@ -49,7 +49,7 @@ CHARTS = {
] ]
}, },
'general_bytes': { 'general_bytes': {
'options': [None, 'Ceph General Read/Write Data/s', 'KB', 'general', 'ceph.general_bytes', 'options': [None, 'Ceph General Read/Write Data/s', 'KiB/s', 'general', 'ceph.general_bytes',
'area'], 'area'],
'lines': [ 'lines': [
['general_read_bytes', 'read', 'absolute', 1, 1024], ['general_read_bytes', 'read', 'absolute', 1, 1024],
@ -73,7 +73,7 @@ CHARTS = {
] ]
}, },
'pool_usage': { 'pool_usage': {
'options': [None, 'Ceph Pools', 'KB', 'pool', 'ceph.pool_usage', 'line'], 'options': [None, 'Ceph Pools', 'KiB', 'pool', 'ceph.pool_usage', 'line'],
'lines': [] 'lines': []
}, },
'pool_objects': { 'pool_objects': {
@ -81,11 +81,11 @@ CHARTS = {
'lines': [] 'lines': []
}, },
'pool_read_bytes': { 'pool_read_bytes': {
'options': [None, 'Ceph Read Pool Data/s', 'KB', 'pool', 'ceph.pool_read_bytes', 'area'], 'options': [None, 'Ceph Read Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_read_bytes', 'area'],
'lines': [] 'lines': []
}, },
'pool_write_bytes': { 'pool_write_bytes': {
'options': [None, 'Ceph Write Pool Data/s', 'KB', 'pool', 'ceph.pool_write_bytes', 'area'], 'options': [None, 'Ceph Write Pool Data/s', 'KiB/s', 'pool', 'ceph.pool_write_bytes', 'area'],
'lines': [] 'lines': []
}, },
'pool_read_operations': { 'pool_read_operations': {
@ -97,7 +97,7 @@ CHARTS = {
'lines': [] 'lines': []
}, },
'osd_usage': { 'osd_usage': {
'options': [None, 'Ceph OSDs', 'KB', 'osd', 'ceph.osd_usage', 'line'], 'options': [None, 'Ceph OSDs', 'KiB', 'osd', 'ceph.osd_usage', 'line'],
'lines': [] 'lines': []
}, },
'osd_apply_latency': { 'osd_apply_latency': {

View File

@ -7,10 +7,19 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
# default module values (can be overridden per job in `config`) # default module values (can be overridden per job in `config`)
update_every = 5 update_every = 5
priority = 60000
CHRONY_COMMAND = 'chronyc -n tracking'
# charts order (can be overridden if you want less charts, or different order) # charts order (can be overridden if you want less charts, or different order)
ORDER = ['system', 'offsets', 'stratum', 'root', 'frequency', 'residualfreq', 'skew'] ORDER = [
'system',
'offsets',
'stratum',
'root',
'frequency',
'residualfreq',
'skew',
]
CHARTS = { CHARTS = {
'system': { 'system': {
@ -76,9 +85,9 @@ class Service(ExecutableService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
ExecutableService.__init__( ExecutableService.__init__(
self, configuration=configuration, name=name) self, configuration=configuration, name=name)
self.command = 'chronyc -n tracking'
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.command = CHRONY_COMMAND
def _get_data(self): def _get_data(self):
""" """

View File

@ -8,6 +8,7 @@ from collections import namedtuple, defaultdict
from json import loads from json import loads
from threading import Thread from threading import Thread
from socket import gethostbyname, gaierror from socket import gethostbyname, gaierror
try: try:
from queue import Queue from queue import Queue
except ImportError: except ImportError:
@ -15,9 +16,9 @@ except ImportError:
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
update_every = 1 update_every = 1
priority = 60000
METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats']) METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
@ -108,7 +109,7 @@ ORDER = [
CHARTS = { CHARTS = {
'activity': { 'activity': {
'options': [None, 'Overall Activity', 'req/s', 'options': [None, 'Overall Activity', 'requests/s',
'dbactivity', 'couchdb.activity', 'stacked'], 'dbactivity', 'couchdb.activity', 'stacked'],
'lines': [ 'lines': [
['couchdb_database_reads', 'DB reads', 'incremental'], ['couchdb_database_reads', 'DB reads', 'incremental'],
@ -117,7 +118,7 @@ CHARTS = {
] ]
}, },
'request_methods': { 'request_methods': {
'options': [None, 'HTTP request methods', 'req/s', 'options': [None, 'HTTP request methods', 'requests/s',
'httptraffic', 'couchdb.request_methods', 'httptraffic', 'couchdb.request_methods',
'stacked'], 'stacked'],
'lines': [ 'lines': [
@ -132,7 +133,7 @@ CHARTS = {
] ]
}, },
'response_codes': { 'response_codes': {
'options': [None, 'HTTP response status codes', 'resp/s', 'options': [None, 'HTTP response status codes', 'responses/s',
'httptraffic', 'couchdb.response_codes', 'httptraffic', 'couchdb.response_codes',
'stacked'], 'stacked'],
'lines': [ 'lines': [
@ -150,15 +151,13 @@ CHARTS = {
] ]
}, },
'open_files': { 'open_files': {
'options': [None, 'Open files', 'files', 'options': [None, 'Open files', 'files', 'ops', 'couchdb.open_files', 'line'],
'ops', 'couchdb.open_files', 'line'],
'lines': [ 'lines': [
['couchdb_open_os_files', '# files', 'absolute'] ['couchdb_open_os_files', '# files', 'absolute']
] ]
}, },
'active_tasks': { 'active_tasks': {
'options': [None, 'Active task breakdown', 'tasks', 'options': [None, 'Active task breakdown', 'tasks', 'ops', 'couchdb.active_tasks', 'stacked'],
'ops', 'couchdb.active_tasks', 'stacked'],
'lines': [ 'lines': [
['activetasks_indexer', 'Indexer', 'absolute'], ['activetasks_indexer', 'Indexer', 'absolute'],
['activetasks_database_compaction', 'DB Compaction', 'absolute'], ['activetasks_database_compaction', 'DB Compaction', 'absolute'],
@ -167,8 +166,7 @@ CHARTS = {
] ]
}, },
'replicator_jobs': { 'replicator_jobs': {
'options': [None, 'Replicator job breakdown', 'jobs', 'options': [None, 'Replicator job breakdown', 'jobs', 'ops', 'couchdb.replicator_jobs', 'stacked'],
'ops', 'couchdb.replicator_jobs', 'stacked'],
'lines': [ 'lines': [
['couch_replicator_jobs_running', 'Running', 'absolute'], ['couch_replicator_jobs_running', 'Running', 'absolute'],
['couch_replicator_jobs_pending', 'Pending', 'absolute'], ['couch_replicator_jobs_pending', 'Pending', 'absolute'],
@ -178,8 +176,7 @@ CHARTS = {
] ]
}, },
'erlang_memory': { 'erlang_memory': {
'options': [None, 'Erlang VM memory usage', 'bytes', 'options': [None, 'Erlang VM memory usage', 'B', 'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
'erlang', 'couchdb.erlang_vm_memory', 'stacked'],
'lines': [ 'lines': [
['memory_atom', 'atom', 'absolute'], ['memory_atom', 'atom', 'absolute'],
['memory_binary', 'binaries', 'absolute'], ['memory_binary', 'binaries', 'absolute'],
@ -190,23 +187,20 @@ CHARTS = {
] ]
}, },
'erlang_reductions': { 'erlang_reductions': {
'options': [None, 'Erlang reductions', 'count', 'options': [None, 'Erlang reductions', 'count', 'erlang', 'couchdb.reductions', 'line'],
'erlang', 'couchdb.reductions', 'line'],
'lines': [ 'lines': [
['reductions', 'reductions', 'incremental'] ['reductions', 'reductions', 'incremental']
] ]
}, },
'erlang_proc_counts': { 'erlang_proc_counts': {
'options': [None, 'Process counts', 'count', 'options': [None, 'Process counts', 'count', 'erlang', 'couchdb.proccounts', 'line'],
'erlang', 'couchdb.proccounts', 'line'],
'lines': [ 'lines': [
['os_proc_count', 'OS procs', 'absolute'], ['os_proc_count', 'OS procs', 'absolute'],
['process_count', 'erl procs', 'absolute'] ['process_count', 'erl procs', 'absolute']
] ]
}, },
'erlang_peak_msg_queue': { 'erlang_peak_msg_queue': {
'options': [None, 'Peak message queue size', 'count', 'options': [None, 'Peak message queue size', 'count', 'erlang', 'couchdb.peakmsgqueue',
'erlang', 'couchdb.peakmsgqueue',
'line'], 'line'],
'lines': [ 'lines': [
['peak_msg_queue', 'peak size', 'absolute'] ['peak_msg_queue', 'peak size', 'absolute']
@ -214,18 +208,15 @@ CHARTS = {
}, },
# Lines for the following are added as part of check() # Lines for the following are added as part of check()
'db_sizes_file': { 'db_sizes_file': {
'options': [None, 'Database sizes (file)', 'KB', 'options': [None, 'Database sizes (file)', 'KiB', 'perdbstats', 'couchdb.db_sizes_file', 'line'],
'perdbstats', 'couchdb.db_sizes_file', 'line'],
'lines': [] 'lines': []
}, },
'db_sizes_external': { 'db_sizes_external': {
'options': [None, 'Database sizes (external)', 'KB', 'options': [None, 'Database sizes (external)', 'KiB', 'perdbstats', 'couchdb.db_sizes_external', 'line'],
'perdbstats', 'couchdb.db_sizes_external', 'line'],
'lines': [] 'lines': []
}, },
'db_sizes_active': { 'db_sizes_active': {
'options': [None, 'Database sizes (active)', 'KB', 'options': [None, 'Database sizes (active)', 'KiB', 'perdbstats', 'couchdb.db_sizes_active', 'line'],
'perdbstats', 'couchdb.db_sizes_active', 'line'],
'lines': [] 'lines': []
}, },
'db_doc_counts': { 'db_doc_counts': {
@ -234,8 +225,7 @@ CHARTS = {
'lines': [] 'lines': []
}, },
'db_doc_del_counts': { 'db_doc_del_counts': {
'options': [None, 'Database # of deleted docs', 'docs', 'options': [None, 'Database # of deleted docs', 'docs', 'perdbstats', 'couchdb_db_doc_del_count', 'line'],
'perdbstats', 'couchdb_db_doc_del_count', 'line'],
'lines': [] 'lines': []
} }
} }
@ -255,7 +245,7 @@ class Service(UrlService):
try: try:
self.dbs = self.configuration.get('databases').split(' ') self.dbs = self.configuration.get('databases').split(' ')
except (KeyError, AttributeError): except (KeyError, AttributeError):
self.dbs = [] self.dbs = list()
def check(self): def check(self):
if not (self.host and self.port): if not (self.host and self.port):

View File

@ -28,10 +28,7 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService from bases.FrameworkServices.SimpleService import SimpleService
# default module values (can be overridden per job in `config`)
update_every = 5 update_every = 5
priority = 60000
retries = 60
class Service(SimpleService): class Service(SimpleService):
@ -46,14 +43,14 @@ class Service(SimpleService):
def check(self): def check(self):
if not DNS_PYTHON: if not DNS_PYTHON:
self.error('\'python-dnspython\' package is needed to use dns_query_time.chart.py') self.error("'python-dnspython' package is needed to use dns_query_time.chart.py")
return False return False
self.timeout = self.timeout if isinstance(self.timeout, int) else 4 self.timeout = self.timeout if isinstance(self.timeout, int) else 4
if not all([self.domains, self.server_list, if not all([self.domains, self.server_list,
isinstance(self.server_list, str), isinstance(self.domains, str)]): isinstance(self.server_list, str), isinstance(self.domains, str)]):
self.error('server_list and domain_list can\'t be empty') self.error("server_list and domain_list can't be empty")
return False return False
else: else:
self.domains, self.server_list = self.domains.split(), self.server_list.split() self.domains, self.server_list = self.domains.split(), self.server_list.split()
@ -129,17 +126,27 @@ def create_charts(aggregate, server_list):
} }
} }
for ns in server_list: for ns in server_list:
definitions['dns_group']['lines'].append(['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute']) dim = [
'_'.join(['ns', ns.replace('.', '_')]),
ns,
'absolute',
]
definitions['dns_group']['lines'].append(dim)
return order, definitions return order, definitions
else: else:
order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list] order = [''.join(['dns_', ns.replace('.', '_')]) for ns in server_list]
definitions = dict() definitions = dict()
for ns in server_list: for ns in server_list:
definitions[''.join(['dns_', ns.replace('.', '_')])] = { definitions[''.join(['dns_', ns.replace('.', '_')])] = {
'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'], 'options': [None, 'DNS Response Time', 'ms', ns, 'dns_query_time.response_time', 'area'],
'lines': [ 'lines': [
['_'.join(['ns', ns.replace('.', '_')]), ns, 'absolute'] [
'_'.join(['ns', ns.replace('.', '_')]),
ns,
'absolute',
]
] ]
} }
return order, definitions return order, definitions

View File

@ -90,9 +90,9 @@ CHARTS = {
] ]
}, },
'servermem': { 'servermem': {
'options': [None, 'DNSDIST server memory utilization', 'MB', 'server', 'dnsdist.servermem', 'area'], 'options': [None, 'DNSDIST server memory utilization', 'MiB', 'server', 'dnsdist.servermem', 'area'],
'lines': [ 'lines': [
['real-memory-usage', 'memory usage', 'absolute', 1, 1048576] ['real-memory-usage', 'memory usage', 'absolute', 1, 1 << 20]
] ]
}, },
'query_latency': { 'query_latency': {

View File

@ -23,21 +23,21 @@ ORDER = [
CHARTS = { CHARTS = {
'running_containers': { 'running_containers': {
'options': [None, 'Number of running containers', 'running containers', 'running containers', 'options': [None, 'Number of running containers', 'containers', 'running containers',
'docker.running_containers', 'line'], 'docker.running_containers', 'line'],
'lines': [ 'lines': [
['running_containers', 'running'] ['running_containers', 'running']
] ]
}, },
'healthy_containers': { 'healthy_containers': {
'options': [None, 'Number of healthy containers', 'healthy containers', 'healthy containers', 'options': [None, 'Number of healthy containers', 'containers', 'healthy containers',
'docker.healthy_containers', 'line'], 'docker.healthy_containers', 'line'],
'lines': [ 'lines': [
['healthy_containers', 'healthy'] ['healthy_containers', 'healthy']
] ]
}, },
'unhealthy_containers': { 'unhealthy_containers': {
'options': [None, 'Number of unhealthy containers', 'unhealthy containers', 'unhealthy containers', 'options': [None, 'Number of unhealthy containers', 'containers', 'unhealthy containers',
'docker.unhealthy_containers', 'line'], 'docker.unhealthy_containers', 'line'],
'lines': [ 'lines': [
['unhealthy_containers', 'unhealthy'] ['unhealthy_containers', 'unhealthy']
@ -51,10 +51,11 @@ class Service(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name) SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.client = None
def check(self): def check(self):
if not HAS_DOCKER: if not HAS_DOCKER:
self.error('\'docker\' package is needed to use docker.chart.py') self.error("'docker' package is needed to use docker.chart.py")
return False return False
self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock')) self.client = docker.DockerClient(base_url=self.configuration.get('url', 'unix://var/run/docker.sock'))
@ -69,6 +70,7 @@ class Service(SimpleService):
def get_data(self): def get_data(self):
data = dict() data = dict()
data['running_containers'] = len(self.client.containers.list(sparse=True)) data['running_containers'] = len(self.client.containers.list(sparse=True))
data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True)) data['healthy_containers'] = len(self.client.containers.list(filters={'health': 'healthy'}, sparse=True))
data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True)) data['unhealthy_containers'] = len(self.client.containers.list(filters={'health': 'unhealthy'}, sparse=True))

View File

@ -5,11 +5,10 @@
from bases.FrameworkServices.SocketService import SocketService from bases.FrameworkServices.SocketService import SocketService
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
# charts order (can be overridden if you want less charts, or different order) UNIX_SOCKET = '/var/run/dovecot/stats'
ORDER = [ ORDER = [
'sessions', 'sessions',
'logins', 'logins',
@ -52,14 +51,14 @@ CHARTS = {
] ]
}, },
'context_switches': { 'context_switches': {
'options': [None, 'Dovecot Context Switches', '', 'context switches', 'dovecot.context_switches', 'line'], 'options': [None, 'Dovecot Context Switches', 'switches', 'context switches', 'dovecot.context_switches', 'line'],
'lines': [ 'lines': [
['vol_cs', 'voluntary', 'absolute'], ['vol_cs', 'voluntary', 'absolute'],
['invol_cs', 'involuntary', 'absolute'] ['invol_cs', 'involuntary', 'absolute']
] ]
}, },
'io': { 'io': {
'options': [None, 'Dovecot Disk I/O', 'kilobytes/s', 'disk', 'dovecot.io', 'area'], 'options': [None, 'Dovecot Disk I/O', 'KiB/s', 'disk', 'dovecot.io', 'area'],
'lines': [ 'lines': [
['disk_input', 'read', 'incremental', 1, 1024], ['disk_input', 'read', 'incremental', 1, 1024],
['disk_output', 'write', 'incremental', -1, 1024] ['disk_output', 'write', 'incremental', -1, 1024]
@ -68,8 +67,8 @@ CHARTS = {
'net': { 'net': {
'options': [None, 'Dovecot Network Bandwidth', 'kilobits/s', 'network', 'dovecot.net', 'area'], 'options': [None, 'Dovecot Network Bandwidth', 'kilobits/s', 'network', 'dovecot.net', 'area'],
'lines': [ 'lines': [
['read_bytes', 'read', 'incremental', 8, 1024], ['read_bytes', 'read', 'incremental', 8, 1000],
['write_bytes', 'write', 'incremental', -8, 1024] ['write_bytes', 'write', 'incremental', -8, 1000]
] ]
}, },
'syscalls': { 'syscalls': {
@ -112,13 +111,12 @@ CHARTS = {
class Service(SocketService): class Service(SocketService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name) SocketService.__init__(self, configuration=configuration, name=name)
self.request = 'EXPORT\tglobal\r\n'
self.host = None # localhost
self.port = None # 24242
# self._keep_alive = True
self.unix_socket = '/var/run/dovecot/stats'
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.host = None # localhost
self.port = None # 24242
self.unix_socket = UNIX_SOCKET
self.request = 'EXPORT\tglobal\r\n'
def _get_data(self): def _get_data(self):
""" """

View File

@ -159,17 +159,20 @@ ORDER = [
'fielddata_evictions_tripped', 'fielddata_evictions_tripped',
'cluster_health_status', 'cluster_health_status',
'cluster_health_nodes', 'cluster_health_nodes',
'cluster_health_pending_tasks',
'cluster_health_flight_fetch',
'cluster_health_shards', 'cluster_health_shards',
'cluster_stats_nodes', 'cluster_stats_nodes',
'cluster_stats_query_cache', 'cluster_stats_query_cache',
'cluster_stats_docs', 'cluster_stats_docs',
'cluster_stats_store', 'cluster_stats_store',
'cluster_stats_indices_shards', 'cluster_stats_indices',
'cluster_stats_shards_total',
] ]
CHARTS = { CHARTS = {
'search_performance_total': { 'search_performance_total': {
'options': [None, 'Queries And Fetches', 'number of', 'search performance', 'options': [None, 'Queries And Fetches', 'events/s', 'search performance',
'elastic.search_performance_total', 'stacked'], 'elastic.search_performance_total', 'stacked'],
'lines': [ 'lines': [
['indices_search_query_total', 'queries', 'incremental'], ['indices_search_query_total', 'queries', 'incremental'],
@ -177,7 +180,7 @@ CHARTS = {
] ]
}, },
'search_performance_current': { 'search_performance_current': {
'options': [None, 'Queries and Fetches In Progress', 'number of', 'search performance', 'options': [None, 'Queries and Fetches In Progress', 'events', 'search performance',
'elastic.search_performance_current', 'stacked'], 'elastic.search_performance_current', 'stacked'],
'lines': [ 'lines': [
['indices_search_query_current', 'queries', 'absolute'], ['indices_search_query_current', 'queries', 'absolute'],
@ -193,14 +196,14 @@ CHARTS = {
] ]
}, },
'search_latency': { 'search_latency': {
'options': [None, 'Query And Fetch Latency', 'ms', 'search performance', 'elastic.search_latency', 'stacked'], 'options': [None, 'Query And Fetch Latency', 'milliseconds', 'search performance', 'elastic.search_latency', 'stacked'],
'lines': [ 'lines': [
['query_latency', 'query', 'absolute', 1, 1000], ['query_latency', 'query', 'absolute', 1, 1000],
['fetch_latency', 'fetch', 'absolute', 1, 1000] ['fetch_latency', 'fetch', 'absolute', 1, 1000]
] ]
}, },
'index_performance_total': { 'index_performance_total': {
'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'number of', 'options': [None, 'Indexed Documents, Index Refreshes, Index Flushes To Disk', 'events/s',
'indexing performance', 'elastic.index_performance_total', 'stacked'], 'indexing performance', 'elastic.index_performance_total', 'stacked'],
'lines': [ 'lines': [
['indices_indexing_index_total', 'indexed', 'incremental'], ['indices_indexing_index_total', 'indexed', 'incremental'],
@ -225,7 +228,7 @@ CHARTS = {
] ]
}, },
'index_latency': { 'index_latency': {
'options': [None, 'Indexing And Flushing Latency', 'ms', 'indexing performance', 'options': [None, 'Indexing And Flushing Latency', 'milliseconds', 'indexing performance',
'elastic.index_latency', 'stacked'], 'elastic.index_latency', 'stacked'],
'lines': [ 'lines': [
['indexing_latency', 'indexing', 'absolute', 1, 1000], ['indexing_latency', 'indexing', 'absolute', 1, 1000],
@ -233,7 +236,7 @@ CHARTS = {
] ]
}, },
'index_translog_operations': { 'index_translog_operations': {
'options': [None, 'Translog Operations', 'count', 'translog', 'options': [None, 'Translog Operations', 'operations', 'translog',
'elastic.index_translog_operations', 'area'], 'elastic.index_translog_operations', 'area'],
'lines': [ 'lines': [
['indices_translog_operations', 'total', 'absolute'], ['indices_translog_operations', 'total', 'absolute'],
@ -241,7 +244,7 @@ CHARTS = {
] ]
}, },
'index_translog_size': { 'index_translog_size': {
'options': [None, 'Translog Size', 'MB', 'translog', 'options': [None, 'Translog Size', 'MiB', 'translog',
'elastic.index_translog_size', 'area'], 'elastic.index_translog_size', 'area'],
'lines': [ 'lines': [
['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567], ['indices_translog_size_in_bytes', 'total', 'absolute', 1, 1048567],
@ -249,21 +252,21 @@ CHARTS = {
] ]
}, },
'index_segments_count': { 'index_segments_count': {
'options': [None, 'Total Number Of Indices Segments', 'count', 'indices segments', 'options': [None, 'Total Number Of Indices Segments', 'segments', 'indices segments',
'elastic.index_segments_count', 'line'], 'elastic.index_segments_count', 'line'],
'lines': [ 'lines': [
['indices_segments_count', 'segments', 'absolute'] ['indices_segments_count', 'segments', 'absolute']
] ]
}, },
'index_segments_memory_writer': { 'index_segments_memory_writer': {
'options': [None, 'Index Writer Memory Usage', 'MB', 'indices segments', 'options': [None, 'Index Writer Memory Usage', 'MiB', 'indices segments',
'elastic.index_segments_memory_writer', 'area'], 'elastic.index_segments_memory_writer', 'area'],
'lines': [ 'lines': [
['indices_segments_index_writer_memory_in_bytes', 'total', 'absolute', 1, 1048567] ['indices_segments_index_writer_memory_in_bytes', 'total', 'absolute', 1, 1048567]
] ]
}, },
'index_segments_memory': { 'index_segments_memory': {
'options': [None, 'Indices Segments Memory Usage', 'MB', 'indices segments', 'options': [None, 'Indices Segments Memory Usage', 'MiB', 'indices segments',
'elastic.index_segments_memory', 'stacked'], 'elastic.index_segments_memory', 'stacked'],
'lines': [ 'lines': [
['indices_segments_terms_memory_in_bytes', 'terms', 'absolute', 1, 1048567], ['indices_segments_terms_memory_in_bytes', 'terms', 'absolute', 1, 1048567],
@ -277,14 +280,14 @@ CHARTS = {
] ]
}, },
'jvm_mem_heap': { 'jvm_mem_heap': {
'options': [None, 'JVM Heap Percentage Currently in Use', 'percent', 'memory usage and gc', 'options': [None, 'JVM Heap Percentage Currently in Use', 'percentage', 'memory usage and gc',
'elastic.jvm_heap', 'area'], 'elastic.jvm_heap', 'area'],
'lines': [ 'lines': [
['jvm_mem_heap_used_percent', 'inuse', 'absolute'] ['jvm_mem_heap_used_percent', 'inuse', 'absolute']
] ]
}, },
'jvm_mem_heap_bytes': { 'jvm_mem_heap_bytes': {
'options': [None, 'JVM Heap Commit And Usage', 'MB', 'memory usage and gc', 'options': [None, 'JVM Heap Commit And Usage', 'MiB', 'memory usage and gc',
'elastic.jvm_heap_bytes', 'area'], 'elastic.jvm_heap_bytes', 'area'],
'lines': [ 'lines': [
['jvm_mem_heap_committed_in_bytes', 'commited', 'absolute', 1, 1048576], ['jvm_mem_heap_committed_in_bytes', 'commited', 'absolute', 1, 1048576],
@ -292,7 +295,7 @@ CHARTS = {
] ]
}, },
'jvm_buffer_pool_count': { 'jvm_buffer_pool_count': {
'options': [None, 'JVM Buffers', 'count', 'memory usage and gc', 'options': [None, 'JVM Buffers', 'pools', 'memory usage and gc',
'elastic.jvm_buffer_pool_count', 'line'], 'elastic.jvm_buffer_pool_count', 'line'],
'lines': [ 'lines': [
['jvm_buffer_pools_direct_count', 'direct', 'absolute'], ['jvm_buffer_pools_direct_count', 'direct', 'absolute'],
@ -300,7 +303,7 @@ CHARTS = {
] ]
}, },
'jvm_direct_buffers_memory': { 'jvm_direct_buffers_memory': {
'options': [None, 'JVM Direct Buffers Memory', 'MB', 'memory usage and gc', 'options': [None, 'JVM Direct Buffers Memory', 'MiB', 'memory usage and gc',
'elastic.jvm_direct_buffers_memory', 'area'], 'elastic.jvm_direct_buffers_memory', 'area'],
'lines': [ 'lines': [
['jvm_buffer_pools_direct_used_in_bytes', 'used', 'absolute', 1, 1048567], ['jvm_buffer_pools_direct_used_in_bytes', 'used', 'absolute', 1, 1048567],
@ -308,7 +311,7 @@ CHARTS = {
] ]
}, },
'jvm_mapped_buffers_memory': { 'jvm_mapped_buffers_memory': {
'options': [None, 'JVM Mapped Buffers Memory', 'MB', 'memory usage and gc', 'options': [None, 'JVM Mapped Buffers Memory', 'MiB', 'memory usage and gc',
'elastic.jvm_mapped_buffers_memory', 'area'], 'elastic.jvm_mapped_buffers_memory', 'area'],
'lines': [ 'lines': [
['jvm_buffer_pools_mapped_used_in_bytes', 'used', 'absolute', 1, 1048567], ['jvm_buffer_pools_mapped_used_in_bytes', 'used', 'absolute', 1, 1048567],
@ -316,14 +319,14 @@ CHARTS = {
] ]
}, },
'jvm_gc_count': { 'jvm_gc_count': {
'options': [None, 'Garbage Collections', 'counts', 'memory usage and gc', 'elastic.gc_count', 'stacked'], 'options': [None, 'Garbage Collections', 'events/s', 'memory usage and gc', 'elastic.gc_count', 'stacked'],
'lines': [ 'lines': [
['jvm_gc_collectors_young_collection_count', 'young', 'incremental'], ['jvm_gc_collectors_young_collection_count', 'young', 'incremental'],
['jvm_gc_collectors_old_collection_count', 'old', 'incremental'] ['jvm_gc_collectors_old_collection_count', 'old', 'incremental']
] ]
}, },
'jvm_gc_time': { 'jvm_gc_time': {
'options': [None, 'Time Spent On Garbage Collections', 'ms', 'memory usage and gc', 'options': [None, 'Time Spent On Garbage Collections', 'milliseconds', 'memory usage and gc',
'elastic.gc_time', 'stacked'], 'elastic.gc_time', 'stacked'],
'lines': [ 'lines': [
['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'], ['jvm_gc_collectors_young_collection_time_in_millis', 'young', 'incremental'],
@ -353,13 +356,13 @@ CHARTS = {
] ]
}, },
'fielddata_cache': { 'fielddata_cache': {
'options': [None, 'Fielddata Cache', 'MB', 'fielddata cache', 'elastic.fielddata_cache', 'line'], 'options': [None, 'Fielddata Cache', 'MiB', 'fielddata cache', 'elastic.fielddata_cache', 'line'],
'lines': [ 'lines': [
['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576] ['indices_fielddata_memory_size_in_bytes', 'cache', 'absolute', 1, 1048576]
] ]
}, },
'fielddata_evictions_tripped': { 'fielddata_evictions_tripped': {
'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'number of events', 'options': [None, 'Fielddata Evictions And Circuit Breaker Tripped Count', 'events/s',
'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'], 'fielddata cache', 'elastic.fielddata_evictions_tripped', 'line'],
'lines': [ 'lines': [
['indices_fielddata_evictions', 'evictions', 'incremental'], ['indices_fielddata_evictions', 'evictions', 'incremental'],
@ -367,12 +370,24 @@ CHARTS = {
] ]
}, },
'cluster_health_nodes': { 'cluster_health_nodes': {
'options': [None, 'Nodes And Tasks Statistics', 'units', 'cluster health API', 'options': [None, 'Nodes Statistics', 'nodes', 'cluster health API',
'elastic.cluster_health_nodes', 'stacked'], 'elastic.cluster_health_nodes', 'stacked'],
'lines': [ 'lines': [
['number_of_nodes', 'nodes', 'absolute'], ['number_of_nodes', 'nodes', 'absolute'],
['number_of_data_nodes', 'data_nodes', 'absolute'], ['number_of_data_nodes', 'data_nodes', 'absolute'],
]
},
'cluster_health_pending_tasks': {
'options': [None, 'Tasks Statistics', 'tasks', 'cluster health API',
'elastic.cluster_health_pending_tasks', 'line'],
'lines': [
['number_of_pending_tasks', 'pending_tasks', 'absolute'], ['number_of_pending_tasks', 'pending_tasks', 'absolute'],
]
},
'cluster_health_flight_fetch': {
'options': [None, 'In Flight Fetches Statistics', 'fetches', 'cluster health API',
'elastic.cluster_health_flight_fetch', 'line'],
'lines': [
['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute'] ['number_of_in_flight_fetch', 'in_flight_fetch', 'absolute']
] ]
}, },
@ -420,24 +435,30 @@ CHARTS = {
] ]
}, },
'cluster_stats_docs': { 'cluster_stats_docs': {
'options': [None, 'Docs Statistics', 'count', 'cluster stats API', 'options': [None, 'Docs Statistics', 'docs', 'cluster stats API',
'elastic.cluster_docs', 'line'], 'elastic.cluster_docs', 'line'],
'lines': [ 'lines': [
['indices_docs_count', 'docs', 'absolute'] ['indices_docs_count', 'docs', 'absolute']
] ]
}, },
'cluster_stats_store': { 'cluster_stats_store': {
'options': [None, 'Store Statistics', 'MB', 'cluster stats API', 'options': [None, 'Store Statistics', 'MiB', 'cluster stats API',
'elastic.cluster_store', 'line'], 'elastic.cluster_store', 'line'],
'lines': [ 'lines': [
['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567] ['indices_store_size_in_bytes', 'size', 'absolute', 1, 1048567]
] ]
}, },
'cluster_stats_indices_shards': { 'cluster_stats_indices': {
'options': [None, 'Indices And Shards Statistics', 'count', 'cluster stats API', 'options': [None, 'Indices Statistics', 'indices', 'cluster stats API',
'elastic.cluster_indices_shards', 'stacked'], 'elastic.cluster_indices', 'line'],
'lines': [ 'lines': [
['indices_count', 'indices', 'absolute'], ['indices_count', 'indices', 'absolute'],
]
},
'cluster_stats_shards_total': {
'options': [None, 'Total Shards Statistics', 'shards', 'cluster stats API',
'elastic.cluster_shards_total', 'line'],
'lines': [
['indices_shards_total', 'shards', 'absolute'] ['indices_shards_total', 'shards', 'absolute']
] ]
}, },
@ -450,7 +471,7 @@ CHARTS = {
] ]
}, },
'host_metrics_file_descriptors': { 'host_metrics_file_descriptors': {
'options': [None, 'Available File Descriptors In Percent', 'percent', 'host metrics', 'options': [None, 'Available File Descriptors In Percent', 'percentage', 'host metrics',
'elastic.host_descriptors', 'area'], 'elastic.host_descriptors', 'area'],
'lines': [ 'lines': [
['file_descriptors_used', 'used', 'absolute', 1, 10] ['file_descriptors_used', 'used', 'absolute', 1, 10]
@ -473,9 +494,11 @@ class Service(UrlService):
self.definitions = CHARTS self.definitions = CHARTS
self.host = self.configuration.get('host') self.host = self.configuration.get('host')
self.port = self.configuration.get('port', 9200) self.port = self.configuration.get('port', 9200)
self.url = '{scheme}://{host}:{port}'.format(scheme=self.configuration.get('scheme', 'http'), self.url = '{scheme}://{host}:{port}'.format(
host=self.host, scheme=self.configuration.get('scheme', 'http'),
port=self.port) host=self.host,
port=self.port,
)
self.latency = dict() self.latency = dict()
self.methods = list() self.methods = list()

View File

@ -7,11 +7,13 @@ from random import SystemRandom
from bases.FrameworkServices.SimpleService import SimpleService from bases.FrameworkServices.SimpleService import SimpleService
# default module values
# update_every = 4
priority = 90000 priority = 90000
ORDER = ['random'] ORDER = [
'random',
]
CHARTS = { CHARTS = {
'random': { 'random': {
'options': [None, 'A random number', 'random number', 'random', 'random', 'line'], 'options': [None, 'A random number', 'random number', 'random', 'random', 'line'],

View File

@ -5,12 +5,12 @@
from bases.FrameworkServices.ExecutableService import ExecutableService from bases.FrameworkServices.ExecutableService import ExecutableService
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
# charts order (can be overridden if you want less charts, or different order) EXIM_COMMAND = 'exim -bpc'
ORDER = ['qemails']
ORDER = [
'qemails',
]
CHARTS = { CHARTS = {
'qemails': { 'qemails': {
@ -25,9 +25,9 @@ CHARTS = {
class Service(ExecutableService): class Service(ExecutableService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
ExecutableService.__init__(self, configuration=configuration, name=name) ExecutableService.__init__(self, configuration=configuration, name=name)
self.command = 'exim -bpc'
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.command = EXIM_COMMAND
def _get_data(self): def _get_data(self):
""" """

View File

@ -35,8 +35,19 @@ def charts(jails):
}, },
} }
for jail in jails: for jail in jails:
ch[ORDER[0]]['lines'].append([jail, jail, 'incremental']) dim = [
ch[ORDER[1]]['lines'].append(['{0}_in_jail'.format(jail), jail, 'absolute']) jail,
jail,
'incremental',
]
ch[ORDER[0]]['lines'].append(dim)
dim = [
'{0}_in_jail'.format(jail),
jail,
'absolute',
]
ch[ORDER[1]]['lines'].append(dim)
return ch return ch
@ -59,12 +70,10 @@ class Service(LogService):
LogService.__init__(self, configuration=configuration, name=name) LogService.__init__(self, configuration=configuration, name=name)
self.order = ORDER self.order = ORDER
self.definitions = dict() self.definitions = dict()
self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log') self.log_path = self.configuration.get('log_path', '/var/log/fail2ban.log')
self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local') self.conf_path = self.configuration.get('conf_path', '/etc/fail2ban/jail.local')
self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/') self.conf_dir = self.configuration.get('conf_dir', '/etc/fail2ban/jail.d/')
self.exclude = self.configuration.get('exclude', str()) self.exclude = self.configuration.get('exclude', str())
self.monitoring_jails = list() self.monitoring_jails = list()
self.banned_ips = defaultdict(set) self.banned_ips = defaultdict(set)
self.data = dict() self.data = dict()

View File

@ -3,24 +3,37 @@
# Author: l2isbad # Author: l2isbad
# SPDX-License-Identifier: GPL-3.0-or-later # SPDX-License-Identifier: GPL-3.0-or-later
from re import findall import re
from subprocess import Popen, PIPE from subprocess import Popen, PIPE
from bases.collection import find_binary from bases.collection import find_binary
from bases.FrameworkServices.SimpleService import SimpleService from bases.FrameworkServices.SimpleService import SimpleService
# default module values (can be overridden per job in `config`)
priority = 60000
update_every = 15 update_every = 15
PARSER = re.compile(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)')
RADIUS_MSG = 'Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept' RADIUS_MSG = 'Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept'
# charts order (can be overridden if you want less charts, or different order) RADCLIENT_RETRIES = 1
ORDER = ['authentication', 'accounting', 'proxy-auth', 'proxy-acct'] RADCLIENT_TIMEOUT = 1
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 18121
DEFAULT_DO_ACCT = False
DEFAULT_DO_PROXY_AUTH = False
DEFAULT_DO_PROXY_ACCT = False
ORDER = [
'authentication',
'accounting',
'proxy-auth',
'proxy-acct',
]
CHARTS = { CHARTS = {
'authentication': { 'authentication': {
'options': [None, 'Authentication', 'packets/s', 'Authentication', 'freerad.auth', 'line'], 'options': [None, 'Authentication', 'packets/s', 'authentication', 'freerad.auth', 'line'],
'lines': [ 'lines': [
['access-accepts', None, 'incremental'], ['access-accepts', None, 'incremental'],
['access-rejects', None, 'incremental'], ['access-rejects', None, 'incremental'],
@ -32,7 +45,7 @@ CHARTS = {
] ]
}, },
'accounting': { 'accounting': {
'options': [None, 'Accounting', 'packets/s', 'Accounting', 'freerad.acct', 'line'], 'options': [None, 'Accounting', 'packets/s', 'accounting', 'freerad.acct', 'line'],
'lines': [ 'lines': [
['accounting-requests', 'requests', 'incremental'], ['accounting-requests', 'requests', 'incremental'],
['accounting-responses', 'responses', 'incremental'], ['accounting-responses', 'responses', 'incremental'],
@ -44,7 +57,7 @@ CHARTS = {
] ]
}, },
'proxy-auth': { 'proxy-auth': {
'options': [None, 'Proxy Authentication', 'packets/s', 'Authentication', 'freerad.proxy.auth', 'line'], 'options': [None, 'Proxy Authentication', 'packets/s', 'authentication', 'freerad.proxy.auth', 'line'],
'lines': [ 'lines': [
['proxy-access-accepts', 'access-accepts', 'incremental'], ['proxy-access-accepts', 'access-accepts', 'incremental'],
['proxy-access-rejects', 'access-rejects', 'incremental'], ['proxy-access-rejects', 'access-rejects', 'incremental'],
@ -56,7 +69,7 @@ CHARTS = {
] ]
}, },
'proxy-acct': { 'proxy-acct': {
'options': [None, 'Proxy Accounting', 'packets/s', 'Accounting', 'freerad.proxy.acct', 'line'], 'options': [None, 'Proxy Accounting', 'packets/s', 'accounting', 'freerad.proxy.acct', 'line'],
'lines': [ 'lines': [
['proxy-accounting-requests', 'requests', 'incremental'], ['proxy-accounting-requests', 'requests', 'incremental'],
['proxy-accounting-responses', 'responses', 'incremental'], ['proxy-accounting-responses', 'responses', 'incremental'],
@ -70,46 +83,80 @@ CHARTS = {
} }
def radclient_status(radclient, retries, timeout, host, port, secret):
# radclient -r 1 -t 1 -x 127.0.0.1:18121 status secret
return '{radclient} -r {num_retries} -t {timeout} -x {host}:{port} status {secret}'.format(
radclient=radclient,
num_retries=retries,
timeout=timeout,
host=host,
port=port,
secret=secret,
).split()
class Service(SimpleService): class Service(SimpleService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
SimpleService.__init__(self, configuration=configuration, name=name) SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.host = self.configuration.get('host', 'localhost') self.host = self.configuration.get('host', DEFAULT_HOST)
self.port = self.configuration.get('port', '18121') self.port = self.configuration.get('port', DEFAULT_PORT)
self.secret = self.configuration.get('secret') self.secret = self.configuration.get('secret')
self.acct = self.configuration.get('acct', False) self.do_acct = self.configuration.get('acct', DEFAULT_DO_ACCT)
self.proxy_auth = self.configuration.get('proxy_auth', False) self.do_proxy_auth = self.configuration.get('proxy_auth', DEFAULT_DO_PROXY_AUTH)
self.proxy_acct = self.configuration.get('proxy_acct', False) self.do_proxy_acct = self.configuration.get('proxy_acct', DEFAULT_DO_PROXY_ACCT)
chart_choice = [True, bool(self.acct), bool(self.proxy_auth), bool(self.proxy_acct)]
self.order = [chart for chart, choice in zip(ORDER, chart_choice) if choice]
self.echo = find_binary('echo') self.echo = find_binary('echo')
self.radclient = find_binary('radclient') self.radclient = find_binary('radclient')
self.sub_echo = [self.echo, RADIUS_MSG] self.sub_echo = [self.echo, RADIUS_MSG]
self.sub_radclient = [self.radclient, '-r', '1', '-t', '1', '-x', self.sub_radclient = radclient_status(
':'.join([self.host, self.port]), 'status', self.secret] self.radclient, RADCLIENT_RETRIES, RADCLIENT_TIMEOUT, self.host, self.port, self.secret,
)
def check(self): def check(self):
if not all([self.echo, self.radclient]): if not self.radclient:
self.error('Can\'t locate "radclient" binary or binary is not executable by netdata') self.error("Can't locate 'radclient' binary or binary is not executable by netdata user")
return False return False
if not self.secret:
self.error('"secret" not set') if not self.echo:
self.error("Can't locate 'echo' binary or binary is not executable by netdata user")
return None return None
if self._get_raw_data(): if not self.secret:
return True self.error("'secret' isn't set")
self.error('Request returned no data. Is server alive?') return None
return False
def _get_data(self): if not self.get_raw_data():
self.error('Request returned no data. Is server alive?')
return False
if not self.do_acct:
self.order.remove('accounting')
if not self.do_proxy_auth:
self.order.remove('proxy-auth')
if not self.do_proxy_acct:
self.order.remove('proxy-acct')
return True
def get_data(self):
""" """
Format data received from shell command Format data received from shell command
:return: dict :return: dict
""" """
result = self._get_raw_data() result = self.get_raw_data()
return dict([(elem[0].lower(), int(elem[1])) for elem in findall(r'((?<=-)[AP][a-zA-Z-]+) = (\d+)', result)])
def _get_raw_data(self): if not result:
return None
return dict(
(key.lower(), value) for key, value in PARSER.findall(result)
)
def get_raw_data(self):
""" """
The following code is equivalent to The following code is equivalent to
'echo "Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept" 'echo "Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 15, Response-Packet-Type = Access-Accept"
@ -123,6 +170,8 @@ class Service(SimpleService):
raw_result = process_rad.communicate()[0] raw_result = process_rad.communicate()[0]
except OSError: except OSError:
return None return None
if process_rad.returncode is 0: if process_rad.returncode is 0:
return raw_result.decode() return raw_result.decode()
return None return None

View File

@ -8,13 +8,20 @@ import json
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
# update_every = 2 MEMSTATS_ORDER = [
priority = 60000 'memstats_heap',
'memstats_stack',
'memstats_mspan',
'memstats_mcache',
'memstats_sys',
'memstats_live_objects',
'memstats_gc_pauses',
]
MEMSTATS_CHARTS = { MEMSTATS_CHARTS = {
'memstats_heap': { 'memstats_heap': {
'options': ['heap', 'memory: size of heap memory structures', 'kB', 'memstats', 'options': ['heap', 'memory: size of heap memory structures', 'KiB', 'memstats',
'expvar.memstats.heap', 'line'], 'expvar.memstats.heap', 'line'],
'lines': [ 'lines': [
['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024], ['memstats_heap_alloc', 'alloc', 'absolute', 1, 1024],
@ -22,21 +29,21 @@ MEMSTATS_CHARTS = {
] ]
}, },
'memstats_stack': { 'memstats_stack': {
'options': ['stack', 'memory: size of stack memory structures', 'kB', 'memstats', 'options': ['stack', 'memory: size of stack memory structures', 'KiB', 'memstats',
'expvar.memstats.stack', 'line'], 'expvar.memstats.stack', 'line'],
'lines': [ 'lines': [
['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024] ['memstats_stack_inuse', 'inuse', 'absolute', 1, 1024]
] ]
}, },
'memstats_mspan': { 'memstats_mspan': {
'options': ['mspan', 'memory: size of mspan memory structures', 'kB', 'memstats', 'options': ['mspan', 'memory: size of mspan memory structures', 'KiB', 'memstats',
'expvar.memstats.mspan', 'line'], 'expvar.memstats.mspan', 'line'],
'lines': [ 'lines': [
['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024] ['memstats_mspan_inuse', 'inuse', 'absolute', 1, 1024]
] ]
}, },
'memstats_mcache': { 'memstats_mcache': {
'options': ['mcache', 'memory: size of mcache memory structures', 'kB', 'memstats', 'options': ['mcache', 'memory: size of mcache memory structures', 'KiB', 'memstats',
'expvar.memstats.mcache', 'line'], 'expvar.memstats.mcache', 'line'],
'lines': [ 'lines': [
['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024] ['memstats_mcache_inuse', 'inuse', 'absolute', 1, 1024]
@ -50,7 +57,7 @@ MEMSTATS_CHARTS = {
] ]
}, },
'memstats_sys': { 'memstats_sys': {
'options': ['sys', 'memory: size of reserved virtual address space', 'kB', 'memstats', 'options': ['sys', 'memory: size of reserved virtual address space', 'KiB', 'memstats',
'expvar.memstats.sys', 'line'], 'expvar.memstats.sys', 'line'],
'lines': [ 'lines': [
['memstats_sys', 'sys', 'absolute', 1, 1024] ['memstats_sys', 'sys', 'absolute', 1, 1024]
@ -65,9 +72,6 @@ MEMSTATS_CHARTS = {
} }
} }
MEMSTATS_ORDER = ['memstats_heap', 'memstats_stack', 'memstats_mspan', 'memstats_mcache',
'memstats_sys', 'memstats_live_objects', 'memstats_gc_pauses']
def flatten(d, top='', sep='.'): def flatten(d, top='', sep='.'):
items = [] items = []
@ -83,7 +87,6 @@ def flatten(d, top='', sep='.'):
class Service(UrlService): class Service(UrlService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name) UrlService.__init__(self, configuration=configuration, name=name)
# if memstats collection is enabled, add the charts and their order # if memstats collection is enabled, add the charts and their order
if self.configuration.get('collect_memstats'): if self.configuration.get('collect_memstats'):
self.definitions = dict(MEMSTATS_CHARTS) self.definitions = dict(MEMSTATS_CHARTS)

View File

@ -14,11 +14,6 @@ except ImportError:
from bases.FrameworkServices.SocketService import SocketService from bases.FrameworkServices.SocketService import SocketService
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
# charts order (can be overridden if you want less charts, or different order) # charts order (can be overridden if you want less charts, or different order)
ORDER = [ ORDER = [
'fbin', 'fbin',
@ -55,11 +50,11 @@ ORDER = [
CHARTS = { CHARTS = {
'fbin': { 'fbin': {
'options': [None, 'Kilobytes In', 'KB/s', 'frontend', 'haproxy_f.bin', 'line'], 'options': [None, 'Kilobytes In', 'KiB/s', 'frontend', 'haproxy_f.bin', 'line'],
'lines': [] 'lines': []
}, },
'fbout': { 'fbout': {
'options': [None, 'Kilobytes Out', 'KB/s', 'frontend', 'haproxy_f.bout', 'line'], 'options': [None, 'Kilobytes Out', 'KiB/s', 'frontend', 'haproxy_f.bout', 'line'],
'lines': [] 'lines': []
}, },
'fscur': { 'fscur': {
@ -100,11 +95,11 @@ CHARTS = {
'lines': [] 'lines': []
}, },
'bbin': { 'bbin': {
'options': [None, 'Kilobytes In', 'KB/s', 'backend', 'haproxy_b.bin', 'line'], 'options': [None, 'Kilobytes In', 'KiB/s', 'backend', 'haproxy_b.bin', 'line'],
'lines': [] 'lines': []
}, },
'bbout': { 'bbout': {
'options': [None, 'Kilobytes Out', 'KB/s', 'backend', 'haproxy_b.bout', 'line'], 'options': [None, 'Kilobytes Out', 'KiB/s', 'backend', 'haproxy_b.bout', 'line'],
'lines': [] 'lines': []
}, },
'bscur': { 'bscur': {
@ -145,41 +140,39 @@ CHARTS = {
'lines': [] 'lines': []
}, },
'bqtime': { 'bqtime': {
'options': [None, 'The average queue time over the 1024 last requests', 'ms', 'backend', 'options': [None, 'The average queue time over the 1024 last requests', 'milliseconds', 'backend',
'haproxy_b.qtime', 'line'], 'haproxy_b.qtime', 'line'],
'lines': [] 'lines': []
}, },
'bctime': { 'bctime': {
'options': [None, 'The average connect time over the 1024 last requests', 'ms', 'backend', 'options': [None, 'The average connect time over the 1024 last requests', 'milliseconds', 'backend',
'haproxy_b.ctime', 'line'], 'haproxy_b.ctime', 'line'],
'lines': [] 'lines': []
}, },
'brtime': { 'brtime': {
'options': [None, 'The average response time over the 1024 last requests', 'ms', 'backend', 'options': [None, 'The average response time over the 1024 last requests', 'milliseconds', 'backend',
'haproxy_b.rtime', 'line'], 'haproxy_b.rtime', 'line'],
'lines': [] 'lines': []
}, },
'bttime': { 'bttime': {
'options': [None, 'The average total session time over the 1024 last requests', 'ms', 'backend', 'options': [None, 'The average total session time over the 1024 last requests', 'milliseconds', 'backend',
'haproxy_b.ttime', 'line'], 'haproxy_b.ttime', 'line'],
'lines': [] 'lines': []
}, },
'health_sdown': { 'health_sdown': {
'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health', 'options': [None, 'Backend Servers In DOWN State', 'failed servers', 'health', 'haproxy_hs.down', 'line'],
'haproxy_hs.down', 'line'],
'lines': [] 'lines': []
}, },
'health_sup': { 'health_sup': {
'options': [None, 'Backend Servers In UP State', 'health servers', 'health', 'options': [None, 'Backend Servers In UP State', 'health servers', 'health', 'haproxy_hs.up', 'line'],
'haproxy_hs.up', 'line'],
'lines': [] 'lines': []
}, },
'health_bdown': { 'health_bdown': {
'options': [None, 'Is Backend Alive? 1 = DOWN', 'failed backend', 'health', 'haproxy_hb.down', 'line'], 'options': [None, 'Is Backend Failed?', 'boolean', 'health', 'haproxy_hb.down', 'line'],
'lines': [] 'lines': []
}, },
'health_idle': { 'health_idle': {
'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percent', 'health', 'haproxy.idle', 'line'], 'options': [None, 'The Ratio Of Polling Time Vs Total Time', 'percentage', 'health', 'haproxy.idle', 'line'],
'lines': [ 'lines': [
['idle', None, 'absolute'] ['idle', None, 'absolute']
] ]
@ -213,6 +206,7 @@ REGEX = dict(url=re_compile(r'idle = (?P<idle>[0-9]+)'),
socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)')) socket=re_compile(r'Idle_pct: (?P<idle>[0-9]+)'))
# TODO: the code is unreadable
class Service(UrlService, SocketService): class Service(UrlService, SocketService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
if 'socket' in configuration: if 'socket' in configuration:

View File

@ -12,7 +12,9 @@ from copy import deepcopy
from bases.FrameworkServices.SocketService import SocketService from bases.FrameworkServices.SocketService import SocketService
ORDER = ['temperatures'] ORDER = [
'temperatures',
]
CHARTS = { CHARTS = {
'temperatures': { 'temperatures': {
@ -39,11 +41,11 @@ class Service(SocketService):
SocketService.__init__(self, configuration=configuration, name=name) SocketService.__init__(self, configuration=configuration, name=name)
self.order = ORDER self.order = ORDER
self.definitions = deepcopy(CHARTS) self.definitions = deepcopy(CHARTS)
self.do_only = self.configuration.get('devices')
self._keep_alive = False self._keep_alive = False
self.request = "" self.request = ""
self.host = "127.0.0.1" self.host = "127.0.0.1"
self.port = 7634 self.port = 7634
self.do_only = self.configuration.get('devices')
def get_disks(self): def get_disks(self):
r = self._get_raw_data() r = self._get_raw_data()

View File

@ -28,11 +28,15 @@ HTTP_BAD_STATUS = 'bad_status'
HTTP_TIMEOUT = 'timeout' HTTP_TIMEOUT = 'timeout'
HTTP_NO_CONNECTION = 'no_connection' HTTP_NO_CONNECTION = 'no_connection'
ORDER = ['response_time', 'response_length', 'status'] ORDER = [
'response_time',
'response_length',
'status',
]
CHARTS = { CHARTS = {
'response_time': { 'response_time': {
'options': [None, 'HTTP response time', 'ms', 'response', 'httpcheck.responsetime', 'line'], 'options': [None, 'HTTP response time', 'milliseconds', 'response', 'httpcheck.responsetime', 'line'],
'lines': [ 'lines': [
[HTTP_RESPONSE_TIME, 'time', 'absolute', 100, 1000] [HTTP_RESPONSE_TIME, 'time', 'absolute', 100, 1000]
] ]
@ -59,12 +63,12 @@ CHARTS = {
class Service(UrlService): class Service(UrlService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name) UrlService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
pattern = self.configuration.get('regex') pattern = self.configuration.get('regex')
self.regex = re.compile(pattern) if pattern else None self.regex = re.compile(pattern) if pattern else None
self.status_codes_accepted = self.configuration.get('status_accepted', [200]) self.status_codes_accepted = self.configuration.get('status_accepted', [200])
self.follow_redirect = self.configuration.get('redirect', True) self.follow_redirect = self.configuration.get('redirect', True)
self.order = ORDER
self.definitions = CHARTS
def _get_data(self): def _get_data(self):
""" """

View File

@ -8,10 +8,9 @@ import json
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
priority = 60000 ORDER = [
'listeners',
# charts order (can be overridden if you want less charts, or different order) ]
ORDER = ['listeners']
CHARTS = { CHARTS = {
'listeners': { 'listeners': {

View File

@ -7,24 +7,17 @@ import json
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
# default job configuration (overridden by python.d.plugin) ORDER = [
# config = {'local': { 'bandwidth',
# 'update_every': update_every, 'peers',
# 'retries': retries, 'repo_size',
# 'priority': priority, 'repo_objects',
# 'url': 'http://localhost:5001' ]
# }}
# charts order (can be overridden if you want less charts, or different order)
ORDER = ['bandwidth', 'peers', 'repo_size', 'repo_objects']
CHARTS = { CHARTS = {
'bandwidth': { 'bandwidth': {
'options': [None, 'IPFS Bandwidth', 'kbits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'], 'options': [None, 'IPFS Bandwidth', 'kilobits/s', 'Bandwidth', 'ipfs.bandwidth', 'line'],
'lines': [ 'lines': [
['in', None, 'absolute', 8, 1000], ['in', None, 'absolute', 8, 1000],
['out', None, 'absolute', -8, 1000] ['out', None, 'absolute', -8, 1000]
@ -37,10 +30,10 @@ CHARTS = {
] ]
}, },
'repo_size': { 'repo_size': {
'options': [None, 'IPFS Repo Size', 'GB', 'Size', 'ipfs.repo_size', 'area'], 'options': [None, 'IPFS Repo Size', 'GiB', 'Size', 'ipfs.repo_size', 'area'],
'lines': [ 'lines': [
['avail', None, 'absolute', 1, 1e9], ['avail', None, 'absolute', 1, 1 << 30],
['size', None, 'absolute', 1, 1e9], ['size', None, 'absolute', 1, 1 << 30],
] ]
}, },
'repo_objects': { 'repo_objects': {
@ -68,11 +61,11 @@ SI_zeroes = {
class Service(UrlService): class Service(UrlService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name) UrlService.__init__(self, configuration=configuration, name=name)
self.baseurl = self.configuration.get('url', 'http://localhost:5001')
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.__storage_max = None self.baseurl = self.configuration.get('url', 'http://localhost:5001')
self.do_pinapi = self.configuration.get('pinapi') self.do_pinapi = self.configuration.get('pinapi')
self.__storage_max = None
def _get_json(self, sub_url): def _get_json(self, sub_url):
""" """

View File

@ -19,14 +19,16 @@ from copy import deepcopy
from bases.FrameworkServices.SimpleService import SimpleService from bases.FrameworkServices.SimpleService import SimpleService
priority = 60000
retries = 60
ORDER = ['pools_utilization', 'pools_active_leases', 'leases_total'] ORDER = [
'pools_utilization',
'pools_active_leases',
'leases_total',
]
CHARTS = { CHARTS = {
'pools_utilization': { 'pools_utilization': {
'options': [None, 'Pools Utilization', '%', 'utilization', 'isc_dhcpd.utilization', 'line'], 'options': [None, 'Pools Utilization', 'percentage', 'utilization', 'isc_dhcpd.utilization', 'line'],
'lines': [] 'lines': []
}, },
'pools_active_leases': { 'pools_active_leases': {
@ -120,7 +122,6 @@ class Service(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name) SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER self.order = ORDER
self.definitions = deepcopy(CHARTS) self.definitions = deepcopy(CHARTS)
lease_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases') lease_path = self.configuration.get('leases_path', '/var/lib/dhcp/dhcpd.leases')
self.dhcpd_leases = DhcpdLeasesFile(path=lease_path) self.dhcpd_leases = DhcpdLeasesFile(path=lease_path)
self.pools = list() self.pools = list()
@ -131,7 +132,7 @@ class Service(SimpleService):
def check(self): def check(self):
if not HAVE_IP_ADDRESS: if not HAVE_IP_ADDRESS:
self.error("'python-ipaddress' module is needed") self.error("'python-ipaddress' package is needed")
return False return False
if not self.dhcpd_leases.is_valid(): if not self.dhcpd_leases.is_valid():
@ -190,6 +191,17 @@ class Service(SimpleService):
def create_charts(self): def create_charts(self):
for pool in self.pools: for pool in self.pools:
self.definitions['pools_utilization']['lines'].append([pool.id + '_utilization', pool.name, dim = [
'absolute', 1, 100]) pool.id + '_utilization',
self.definitions['pools_active_leases']['lines'].append([pool.id + '_active_leases', pool.name]) pool.name,
'absolute',
1,
100,
]
self.definitions['pools_utilization']['lines'].append(dim)
dim = [
pool.id + '_active_leases',
pool.name,
]
self.definitions['pools_active_leases']['lines'].append(dim)

View File

@ -16,11 +16,15 @@ update_every = 10
# charts order (can be overridden if you want less charts, or different order) # charts order (can be overridden if you want less charts, or different order)
ORDER = [ ORDER = [
'net_throughput_http', 'net_throughput_https', # net throughput 'net_throughput_http', # net throughput
'connections_http', 'connections_https', # connections 'net_throughput_https', # net throughput
'requests', 'requests_processing', # requests 'connections_http', # connections
'pub_cache_hits', 'private_cache_hits', # cache 'connections_https', # connections
'static_hits' # static 'requests', # requests
'requests_processing', # requests
'pub_cache_hits', # cache
'private_cache_hits', # cache
'static_hits', # static
] ]
CHARTS = { CHARTS = {

View File

@ -8,7 +8,13 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
priority = 59999 priority = 59999
disabled_by_default = True disabled_by_default = True
ORDER = ['sessions', 'users', 'seats'] LOGINCTL_COMMAND = 'loginctl list-sessions --no-legend'
ORDER = [
'sessions',
'users',
'seats',
]
CHARTS = { CHARTS = {
'sessions': { 'sessions': {
@ -39,9 +45,9 @@ CHARTS = {
class Service(ExecutableService): class Service(ExecutableService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
ExecutableService.__init__(self, configuration=configuration, name=name) ExecutableService.__init__(self, configuration=configuration, name=name)
self.command = 'loginctl list-sessions --no-legend'
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.command = LOGINCTL_COMMAND
def _get_data(self): def _get_data(self):
ret = { ret = {

View File

@ -66,7 +66,7 @@ def battery_charts(bats):
charts.update( charts.update(
{ {
'bbu_{0}_relative_charge'.format(b.id): { 'bbu_{0}_relative_charge'.format(b.id): {
'options': [None, 'Relative State of Charge', '%', 'battery', 'options': [None, 'Relative State of Charge', 'percentage', 'battery',
'megacli.bbu_relative_charge', 'line'], 'megacli.bbu_relative_charge', 'line'],
'lines': [ 'lines': [
['bbu_{0}_relative_charge'.format(b.id), 'adapter {0}'.format(b.id)], ['bbu_{0}_relative_charge'.format(b.id), 'adapter {0}'.format(b.id)],
@ -180,8 +180,8 @@ class Service(ExecutableService):
ExecutableService.__init__(self, configuration=configuration, name=name) ExecutableService.__init__(self, configuration=configuration, name=name)
self.order = list() self.order = list()
self.definitions = dict() self.definitions = dict()
self.megacli = Megacli()
self.do_battery = self.configuration.get('do_battery') self.do_battery = self.configuration.get('do_battery')
self.megacli = Megacli()
def check_sudo(self): def check_sudo(self):
err = self._get_raw_data(command=self.megacli.sudo_check, stderr=True) err = self._get_raw_data(command=self.megacli.sudo_check, stderr=True)

View File

@ -5,36 +5,37 @@
from bases.FrameworkServices.SocketService import SocketService from bases.FrameworkServices.SocketService import SocketService
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
# default job configuration (overridden by python.d.plugin) ORDER = [
# config = {'local': { 'cache',
# 'update_every': update_every, 'net',
# 'retries': retries, 'connections',
# 'priority': priority, 'items',
# 'host': 'localhost', 'evicted_reclaimed',
# 'port': 11211, 'get',
# 'unix_socket': None 'get_rate',
# }} 'set_rate',
'cas',
ORDER = ['cache', 'net', 'connections', 'items', 'evicted_reclaimed', 'delete',
'get', 'get_rate', 'set_rate', 'cas', 'delete', 'increment', 'decrement', 'touch', 'touch_rate'] 'increment',
'decrement',
'touch',
'touch_rate',
]
CHARTS = { CHARTS = {
'cache': { 'cache': {
'options': [None, 'Cache Size', 'megabytes', 'cache', 'memcached.cache', 'stacked'], 'options': [None, 'Cache Size', 'MiB', 'cache', 'memcached.cache', 'stacked'],
'lines': [ 'lines': [
['avail', 'available', 'absolute', 1, 1048576], ['avail', 'available', 'absolute', 1, 1 << 20],
['used', 'used', 'absolute', 1, 1048576] ['used', 'used', 'absolute', 1, 1 << 20]
] ]
}, },
'net': { 'net': {
'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'], 'options': [None, 'Network', 'kilobits/s', 'network', 'memcached.net', 'area'],
'lines': [ 'lines': [
['bytes_read', 'in', 'incremental', 8, 1024], ['bytes_read', 'in', 'incremental', 8, 1000],
['bytes_written', 'out', 'incremental', -8, 1024] ['bytes_written', 'out', 'incremental', -8, 1000],
] ]
}, },
'connections': { 'connections': {
@ -126,13 +127,13 @@ CHARTS = {
class Service(SocketService): class Service(SocketService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name) SocketService.__init__(self, configuration=configuration, name=name)
self.order = ORDER
self.definitions = CHARTS
self.request = 'stats\r\n' self.request = 'stats\r\n'
self.host = 'localhost' self.host = 'localhost'
self.port = 11211 self.port = 11211
self._keep_alive = True self._keep_alive = True
self.unix_socket = None self.unix_socket = None
self.order = ORDER
self.definitions = CHARTS
def _get_data(self): def _get_data(self):
""" """

View File

@ -16,10 +16,6 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService from bases.FrameworkServices.SimpleService import SimpleService
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
retries = 60
REPL_SET_STATES = [ REPL_SET_STATES = [
('1', 'primary'), ('1', 'primary'),
@ -209,21 +205,21 @@ CHARTS = {
] ]
}, },
'journaling_volume': { 'journaling_volume': {
'options': [None, 'Volume of data written to the journal', 'MB', 'database performance', 'options': [None, 'Volume of data written to the journal', 'MiB', 'database performance',
'mongodb.journaling_volume', 'line'], 'mongodb.journaling_volume', 'line'],
'lines': [ 'lines': [
['journaledMB', 'volume', 'absolute', 1, 100] ['journaledMB', 'volume', 'absolute', 1, 100]
] ]
}, },
'background_flush_average': { 'background_flush_average': {
'options': [None, 'Average time taken by flushes to execute', 'ms', 'database performance', 'options': [None, 'Average time taken by flushes to execute', 'milliseconds', 'database performance',
'mongodb.background_flush_average', 'line'], 'mongodb.background_flush_average', 'line'],
'lines': [ 'lines': [
['average_ms', 'time', 'absolute', 1, 100] ['average_ms', 'time', 'absolute', 1, 100]
] ]
}, },
'background_flush_last': { 'background_flush_last': {
'options': [None, 'Time taken by the last flush operation to execute', 'ms', 'database performance', 'options': [None, 'Time taken by the last flush operation to execute', 'milliseconds', 'database performance',
'mongodb.background_flush_last', 'line'], 'mongodb.background_flush_last', 'line'],
'lines': [ 'lines': [
['last_ms', 'time', 'absolute', 1, 100] ['last_ms', 'time', 'absolute', 1, 100]
@ -269,7 +265,7 @@ CHARTS = {
] ]
}, },
'memory': { 'memory': {
'options': [None, 'Memory metrics', 'MB', 'resource utilization', 'mongodb.memory', 'stacked'], 'options': [None, 'Memory metrics', 'MiB', 'resource utilization', 'mongodb.memory', 'stacked'],
'lines': [ 'lines': [
['virtual', None, 'absolute', 1, 1], ['virtual', None, 'absolute', 1, 1],
['resident', None, 'absolute', 1, 1], ['resident', None, 'absolute', 1, 1],
@ -313,7 +309,7 @@ CHARTS = {
}, },
'wiredtiger_cache': { 'wiredtiger_cache': {
'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes', 'options': [None, 'The percentage of the wiredTiger cache that is in use and cache with dirty bytes',
'percent', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'], 'percentage', 'resource utilization', 'mongodb.wiredtiger_cache', 'stacked'],
'lines': [ 'lines': [
['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000], ['wiredTiger_percent_clean', 'inuse', 'absolute', 1, 1000],
['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000] ['wiredTiger_percent_dirty', 'dirty', 'absolute', 1, 1000]
@ -333,14 +329,14 @@ CHARTS = {
'lines': [] 'lines': []
}, },
'tcmalloc_generic': { 'tcmalloc_generic': {
'options': [None, 'Tcmalloc generic metrics', 'MB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'], 'options': [None, 'Tcmalloc generic metrics', 'MiB', 'tcmalloc', 'mongodb.tcmalloc_generic', 'stacked'],
'lines': [ 'lines': [
['current_allocated_bytes', 'allocated', 'absolute', 1, 1048576], ['current_allocated_bytes', 'allocated', 'absolute', 1, 1 << 20],
['heap_size', 'heap_size', 'absolute', 1, 1048576] ['heap_size', 'heap_size', 'absolute', 1, 1 << 20]
] ]
}, },
'tcmalloc_metrics': { 'tcmalloc_metrics': {
'options': [None, 'Tcmalloc metrics', 'KB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'], 'options': [None, 'Tcmalloc metrics', 'KiB', 'tcmalloc', 'mongodb.tcmalloc_metrics', 'stacked'],
'lines': [ 'lines': [
['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024], ['central_cache_free_bytes', 'central_cache_free', 'absolute', 1, 1024],
['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024], ['current_total_thread_cache_bytes', 'current_total_thread_cache', 'absolute', 1, 1024],

View File

@ -6,12 +6,20 @@
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
# see enum State_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h) # see enum State_Type from monit.h (https://bitbucket.org/tildeslash/monit/src/master/src/monit.h)
MONIT_SERVICE_NAMES = ['Filesystem', 'Directory', 'File', 'Process', 'Host', 'System', 'Fifo', 'Program', 'Net'] MONIT_SERVICE_NAMES = [
'Filesystem',
'Directory',
'File',
'Process',
'Host',
'System',
'Fifo',
'Program',
'Net',
]
DEFAULT_SERVICES_IDS = [0, 1, 2, 3, 4, 6, 7, 8] DEFAULT_SERVICES_IDS = [0, 1, 2, 3, 4, 6, 7, 8]
# charts order (can be overridden if you want less charts, or different order) # charts order (can be overridden if you want less charts, or different order)
@ -89,10 +97,10 @@ CHARTS = {
class Service(UrlService): class Service(UrlService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name) UrlService.__init__(self, configuration=configuration, name=name)
base_url = self.configuration.get('url', 'http://localhost:2812')
self.url = '{0}/_status?format=xml&level=full'.format(base_url)
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
base_url = self.configuration.get('url', 'http://localhost:2812')
self.url = '{0}/_status?format=xml&level=full'.format(base_url)
def parse(self, data): def parse(self, data):
try: try:
@ -104,15 +112,19 @@ class Service(UrlService):
def check(self): def check(self):
self._manager = self._build_manager() self._manager = self._build_manager()
raw_data = self._get_raw_data() raw_data = self._get_raw_data()
if not raw_data: if not raw_data:
return None return None
return bool(self.parse(raw_data)) return bool(self.parse(raw_data))
def _get_data(self): def _get_data(self):
raw_data = self._get_raw_data() raw_data = self._get_raw_data()
if not raw_data: if not raw_data:
return None return None
xml = self.parse(raw_data) xml = self.parse(raw_data)
if not xml: if not xml:
return None return None
@ -120,6 +132,7 @@ class Service(UrlService):
data = {} data = {}
for service_id in DEFAULT_SERVICES_IDS: for service_id in DEFAULT_SERVICES_IDS:
service_category = MONIT_SERVICE_NAMES[service_id].lower() service_category = MONIT_SERVICE_NAMES[service_id].lower()
if service_category == 'system': if service_category == 'system':
self.debug("Skipping service from 'System' category, because it's useless in graphs") self.debug("Skipping service from 'System' category, because it's useless in graphs")
continue continue

View File

@ -6,9 +6,6 @@
from bases.FrameworkServices.MySQLService import MySQLService from bases.FrameworkServices.MySQLService import MySQLService
# default module values (can be overridden per job in `config`)
# update_every = 3
priority = 60000
# query executed on MySQL server # query executed on MySQL server
QUERY_GLOBAL = 'SHOW GLOBAL STATUS;' QUERY_GLOBAL = 'SHOW GLOBAL STATUS;'
@ -207,8 +204,8 @@ CHARTS = {
'net': { 'net': {
'options': [None, 'mysql Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'], 'options': [None, 'mysql Bandwidth', 'kilobits/s', 'bandwidth', 'mysql.net', 'area'],
'lines': [ 'lines': [
['Bytes_received', 'in', 'incremental', 8, 1024], ['Bytes_received', 'in', 'incremental', 8, 1000],
['Bytes_sent', 'out', 'incremental', -8, 1024] ['Bytes_sent', 'out', 'incremental', -8, 1000]
] ]
}, },
'queries': { 'queries': {
@ -320,7 +317,7 @@ CHARTS = {
] ]
}, },
'innodb_io': { 'innodb_io': {
'options': [None, 'mysql InnoDB I/O Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_io', 'area'], 'options': [None, 'mysql InnoDB I/O Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_io', 'area'],
'lines': [ 'lines': [
['Innodb_data_read', 'read', 'incremental', 1, 1024], ['Innodb_data_read', 'read', 'incremental', 1, 1024],
['Innodb_data_written', 'write', 'incremental', -1, 1024] ['Innodb_data_written', 'write', 'incremental', -1, 1024]
@ -360,7 +357,7 @@ CHARTS = {
] ]
}, },
'innodb_os_log_io': { 'innodb_os_log_io': {
'options': [None, 'mysql InnoDB OS Log Bandwidth', 'kilobytes/s', 'innodb', 'mysql.innodb_os_log_io', 'area'], 'options': [None, 'mysql InnoDB OS Log Bandwidth', 'KiB/s', 'innodb', 'mysql.innodb_os_log_io', 'area'],
'lines': [ 'lines': [
['Innodb_os_log_written', 'write', 'incremental', -1, 1024], ['Innodb_os_log_written', 'write', 'incremental', -1, 1024],
] ]
@ -394,7 +391,7 @@ CHARTS = {
] ]
}, },
'innodb_buffer_pool_bytes': { 'innodb_buffer_pool_bytes': {
'options': [None, 'mysql InnoDB Buffer Pool Bytes', 'MB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'], 'options': [None, 'mysql InnoDB Buffer Pool Bytes', 'MiB', 'innodb', 'mysql.innodb_buffer_pool_bytes', 'area'],
'lines': [ 'lines': [
['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024], ['Innodb_buffer_pool_bytes_data', 'data', 'absolute', 1, 1024 * 1024],
['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024] ['Innodb_buffer_pool_bytes_dirty', 'dirty', 'absolute', -1, 1024 * 1024]
@ -441,7 +438,7 @@ CHARTS = {
] ]
}, },
'qcache_freemem': { 'qcache_freemem': {
'options': [None, 'mysql QCache Free Memory', 'MB', 'qcache', 'mysql.qcache_freemem', 'area'], 'options': [None, 'mysql QCache Free Memory', 'MiB', 'qcache', 'mysql.qcache_freemem', 'area'],
'lines': [ 'lines': [
['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024] ['Qcache_free_memory', 'free', 'absolute', 1, 1024 * 1024]
] ]
@ -529,7 +526,7 @@ CHARTS = {
] ]
}, },
'galera_bytes': { 'galera_bytes': {
'options': [None, 'Replicated bytes', 'KB/s', 'galera', 'mysql.galera_bytes', 'area'], 'options': [None, 'Replicated bytes', 'KiB/s', 'galera', 'mysql.galera_bytes', 'area'],
'lines': [ 'lines': [
['wsrep_received_bytes', 'rx', 'incremental', 1, 1024], ['wsrep_received_bytes', 'rx', 'incremental', 1, 1024],
['wsrep_replicated_bytes', 'tx', 'incremental', -1, 1024], ['wsrep_replicated_bytes', 'tx', 'incremental', -1, 1024],
@ -563,7 +560,11 @@ class Service(MySQLService):
MySQLService.__init__(self, configuration=configuration, name=name) MySQLService.__init__(self, configuration=configuration, name=name)
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.queries = dict(global_status=QUERY_GLOBAL, slave_status=QUERY_SLAVE, variables=QUERY_VARIABLES) self.queries = dict(
global_status=QUERY_GLOBAL,
slave_status=QUERY_SLAVE,
variables=QUERY_VARIABLES,
)
def _get_data(self): def _get_data(self):

View File

@ -5,37 +5,30 @@
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
# default job configuration (overridden by python.d.plugin) ORDER = [
# config = {'local': { 'connections',
# 'update_every': update_every, 'requests',
# 'retries': retries, 'connection_status',
# 'priority': priority, 'connect_rate',
# 'url': 'http://localhost/stub_status' ]
# }}
# charts order (can be overridden if you want less charts, or different order)
ORDER = ['connections', 'requests', 'connection_status', 'connect_rate']
CHARTS = { CHARTS = {
'connections': { 'connections': {
'options': [None, 'nginx Active Connections', 'connections', 'active connections', 'options': [None, 'Active Connections', 'connections', 'active connections',
'nginx.connections', 'line'], 'nginx.connections', 'line'],
'lines': [ 'lines': [
['active'] ['active']
] ]
}, },
'requests': { 'requests': {
'options': [None, 'nginx Requests', 'requests/s', 'requests', 'nginx.requests', 'line'], 'options': [None, 'Requests', 'requests/s', 'requests', 'nginx.requests', 'line'],
'lines': [ 'lines': [
['requests', None, 'incremental'] ['requests', None, 'incremental']
] ]
}, },
'connection_status': { 'connection_status': {
'options': [None, 'nginx Active Connections by Status', 'connections', 'status', 'options': [None, 'Active Connections by Status', 'connections', 'status',
'nginx.connection_status', 'line'], 'nginx.connection_status', 'line'],
'lines': [ 'lines': [
['reading'], ['reading'],
@ -44,7 +37,7 @@ CHARTS = {
] ]
}, },
'connect_rate': { 'connect_rate': {
'options': [None, 'nginx Connections Rate', 'connections/s', 'connections rate', 'options': [None, 'Connections Rate', 'connections/s', 'connections rate',
'nginx.connect_rate', 'line'], 'nginx.connect_rate', 'line'],
'lines': [ 'lines': [
['accepts', 'accepted', 'incremental'], ['accepts', 'accepted', 'incremental'],
@ -57,9 +50,9 @@ CHARTS = {
class Service(UrlService): class Service(UrlService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name) UrlService.__init__(self, configuration=configuration, name=name)
self.url = self.configuration.get('url', 'http://localhost/stub_status')
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.url = self.configuration.get('url', 'http://localhost/stub_status')
def _get_data(self): def _get_data(self):
""" """

View File

@ -16,11 +16,7 @@ except ImportError:
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
update_every = 1
priority = 60000
# charts order (can be overridden if you want less charts, or different order)
ORDER = [ ORDER = [
'requests_total', 'requests_total',
'requests_current', 'requests_current',
@ -75,7 +71,7 @@ CHARTS = {
] ]
}, },
'ssl_memory_usage': { 'ssl_memory_usage': {
'options': [None, 'Memory Usage', '%', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'], 'options': [None, 'Memory Usage', 'percentage', 'ssl', 'nginx_plus.ssl_memory_usage', 'area'],
'lines': [ 'lines': [
['ssl_memory_usage', 'usage', 'absolute', 1, 100] ['ssl_memory_usage', 'usage', 'absolute', 1, 100]
] ]
@ -94,7 +90,7 @@ def cache_charts(cache):
charts = OrderedDict() charts = OrderedDict()
charts['{0}_traffic'.format(cache.name)] = { charts['{0}_traffic'.format(cache.name)] = {
'options': [None, 'Traffic', 'KB', family, 'nginx_plus.cache_traffic', 'stacked'], 'options': [None, 'Traffic', 'KiB', family, 'nginx_plus.cache_traffic', 'stacked'],
'lines': [ 'lines': [
['_'.join([cache.name, 'hit_bytes']), 'served', 'absolute', 1, 1024], ['_'.join([cache.name, 'hit_bytes']), 'served', 'absolute', 1, 1024],
['_'.join([cache.name, 'miss_bytes_written']), 'written', 'absolute', 1, 1024], ['_'.join([cache.name, 'miss_bytes_written']), 'written', 'absolute', 1, 1024],
@ -102,7 +98,7 @@ def cache_charts(cache):
] ]
} }
charts['{0}_memory_usage'.format(cache.name)] = { charts['{0}_memory_usage'.format(cache.name)] = {
'options': [None, 'Memory Usage', '%', family, 'nginx_plus.cache_memory_usage', 'area'], 'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.cache_memory_usage', 'area'],
'lines': [ 'lines': [
['_'.join([cache.name, 'memory_usage']), 'usage', 'absolute', 1, 100], ['_'.join([cache.name, 'memory_usage']), 'usage', 'absolute', 1, 100],
] ]
@ -199,7 +195,8 @@ def web_upstream_charts(wu):
'lines': dimensions('active') 'lines': dimensions('active')
} }
charts['web_upstream_{name}_connections_usage'.format(name=wu.name)] = { charts['web_upstream_{name}_connections_usage'.format(name=wu.name)] = {
'options': [None, 'Peers Connections Usage', '%', family, 'nginx_plus.web_upstream_connections_usage', 'line'], 'options': [None, 'Peers Connections Usage', 'percentage', family,
'nginx_plus.web_upstream_connections_usage', 'line'],
'lines': dimensions('connections_usage', d=100) 'lines': dimensions('connections_usage', d=100)
} }
# Traffic # Traffic
@ -222,7 +219,7 @@ def web_upstream_charts(wu):
# Response Time # Response Time
for peer in wu: for peer in wu:
charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.server)] = { charts['web_upstream_{0}_{1}_timings'.format(wu.name, peer.server)] = {
'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'ms', family, 'options': [None, 'Peer "{0}" Timings'.format(peer.real_server), 'milliseconds', family,
'nginx_plus.web_upstream_peer_timings', 'line'], 'nginx_plus.web_upstream_peer_timings', 'line'],
'lines': [ 'lines': [
['_'.join([wu.name, peer.server, 'header_time']), 'header'], ['_'.join([wu.name, peer.server, 'header_time']), 'header'],
@ -231,7 +228,7 @@ def web_upstream_charts(wu):
} }
# Memory Usage # Memory Usage
charts['web_upstream_{name}_memory_usage'.format(name=wu.name)] = { charts['web_upstream_{name}_memory_usage'.format(name=wu.name)] = {
'options': [None, 'Memory Usage', '%', family, 'nginx_plus.web_upstream_memory_usage', 'area'], 'options': [None, 'Memory Usage', 'percentage', family, 'nginx_plus.web_upstream_memory_usage', 'area'],
'lines': [ 'lines': [
['_'.join([wu.name, 'memory_usage']), 'usage', 'absolute', 1, 100] ['_'.join([wu.name, 'memory_usage']), 'usage', 'absolute', 1, 100]
] ]

View File

@ -7,12 +7,20 @@ import re
from bases.FrameworkServices.ExecutableService import ExecutableService from bases.FrameworkServices.ExecutableService import ExecutableService
# default module values (can be overridden per job in `config`)
priority = 60000
update_every = 30 update_every = 30
# charts order (can be overridden if you want less charts, or different order) NSD_CONTROL_COMMAND = 'nsd-control stats_noreset'
ORDER = ['queries', 'zones', 'protocol', 'type', 'transfer', 'rcode'] REGEX = re.compile(r'([A-Za-z0-9.]+)=(\d+)')
ORDER = [
'queries',
'zones',
'protocol',
'type',
'transfer',
'rcode',
]
CHARTS = { CHARTS = {
'queries': { 'queries': {
@ -78,22 +86,21 @@ CHARTS = {
class Service(ExecutableService): class Service(ExecutableService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
ExecutableService.__init__( ExecutableService.__init__(self, configuration=configuration, name=name)
self, configuration=configuration, name=name)
self.command = 'nsd-control stats_noreset'
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.regex = re.compile(r'([A-Za-z0-9.]+)=(\d+)') self.command = NSD_CONTROL_COMMAND
def _get_data(self): def _get_data(self):
lines = self._get_raw_data() lines = self._get_raw_data()
if not lines: if not lines:
return None return None
r = self.regex stats = dict(
stats = dict((k.replace('.', '_'), int(v)) (k.replace('.', '_'), int(v)) for k, v in REGEX.findall(''.join(lines))
for k, v in r.findall(''.join(lines))) )
stats.setdefault('num_opcode_NOTIFY', 0) stats.setdefault('num_opcode_NOTIFY', 0)
stats.setdefault('num_type_TYPE252', 0) stats.setdefault('num_type_TYPE252', 0)
stats.setdefault('num_type_TYPE255', 0) stats.setdefault('num_type_TYPE255', 0)
return stats return stats

View File

@ -9,9 +9,6 @@ import re
from bases.FrameworkServices.SocketService import SocketService from bases.FrameworkServices.SocketService import SocketService
# default module values
update_every = 1
priority = 60000
# NTP Control Message Protocol constants # NTP Control Message Protocol constants
MODE = 6 MODE = 6
@ -53,13 +50,15 @@ ORDER = [
CHARTS = { CHARTS = {
'sys_offset': { 'sys_offset': {
'options': [None, 'Combined offset of server relative to this host', 'ms', 'system', 'ntpd.sys_offset', 'area'], 'options': [None, 'Combined offset of server relative to this host', 'milliseconds',
'system', 'ntpd.sys_offset', 'area'],
'lines': [ 'lines': [
['offset', 'offset', 'absolute', 1, PRECISION] ['offset', 'offset', 'absolute', 1, PRECISION]
] ]
}, },
'sys_jitter': { 'sys_jitter': {
'options': [None, 'Combined system jitter and clock jitter', 'ms', 'system', 'ntpd.sys_jitter', 'line'], 'options': [None, 'Combined system jitter and clock jitter', 'milliseconds',
'system', 'ntpd.sys_jitter', 'line'],
'lines': [ 'lines': [
['sys_jitter', 'system', 'absolute', 1, PRECISION], ['sys_jitter', 'system', 'absolute', 1, PRECISION],
['clk_jitter', 'clock', 'absolute', 1, PRECISION] ['clk_jitter', 'clock', 'absolute', 1, PRECISION]
@ -78,14 +77,14 @@ CHARTS = {
] ]
}, },
'sys_rootdelay': { 'sys_rootdelay': {
'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'system', 'options': [None, 'Total roundtrip delay to the primary reference clock', 'milliseconds', 'system',
'ntpd.sys_rootdelay', 'area'], 'ntpd.sys_rootdelay', 'area'],
'lines': [ 'lines': [
['rootdelay', 'delay', 'absolute', 1, PRECISION] ['rootdelay', 'delay', 'absolute', 1, PRECISION]
] ]
}, },
'sys_rootdisp': { 'sys_rootdisp': {
'options': [None, 'Total root dispersion to the primary reference clock', 'ms', 'system', 'options': [None, 'Total root dispersion to the primary reference clock', 'milliseconds', 'system',
'ntpd.sys_rootdisp', 'area'], 'ntpd.sys_rootdisp', 'area'],
'lines': [ 'lines': [
['rootdisp', 'dispersion', 'absolute', 1, PRECISION] ['rootdisp', 'dispersion', 'absolute', 1, PRECISION]
@ -114,27 +113,27 @@ CHARTS = {
PEER_CHARTS = { PEER_CHARTS = {
'peer_offset': { 'peer_offset': {
'options': [None, 'Filter offset', 'ms', 'peers', 'ntpd.peer_offset', 'line'], 'options': [None, 'Filter offset', 'milliseconds', 'peers', 'ntpd.peer_offset', 'line'],
'lines': [] 'lines': []
}, },
'peer_delay': { 'peer_delay': {
'options': [None, 'Filter delay', 'ms', 'peers', 'ntpd.peer_delay', 'line'], 'options': [None, 'Filter delay', 'milliseconds', 'peers', 'ntpd.peer_delay', 'line'],
'lines': [] 'lines': []
}, },
'peer_dispersion': { 'peer_dispersion': {
'options': [None, 'Filter dispersion', 'ms', 'peers', 'ntpd.peer_dispersion', 'line'], 'options': [None, 'Filter dispersion', 'milliseconds', 'peers', 'ntpd.peer_dispersion', 'line'],
'lines': [] 'lines': []
}, },
'peer_jitter': { 'peer_jitter': {
'options': [None, 'Filter jitter', 'ms', 'peers', 'ntpd.peer_jitter', 'line'], 'options': [None, 'Filter jitter', 'milliseconds', 'peers', 'ntpd.peer_jitter', 'line'],
'lines': [] 'lines': []
}, },
'peer_xleave': { 'peer_xleave': {
'options': [None, 'Interleave delay', 'ms', 'peers', 'ntpd.peer_xleave', 'line'], 'options': [None, 'Interleave delay', 'milliseconds', 'peers', 'ntpd.peer_xleave', 'line'],
'lines': [] 'lines': []
}, },
'peer_rootdelay': { 'peer_rootdelay': {
'options': [None, 'Total roundtrip delay to the primary reference clock', 'ms', 'peers', 'options': [None, 'Total roundtrip delay to the primary reference clock', 'milliseconds', 'peers',
'ntpd.peer_rootdelay', 'line'], 'ntpd.peer_rootdelay', 'line'],
'lines': [] 'lines': []
}, },
@ -234,7 +233,6 @@ class Service(SocketService):
SocketService.__init__(self, configuration=configuration, name=name) SocketService.__init__(self, configuration=configuration, name=name)
self.order = list(ORDER) self.order = list(ORDER)
self.definitions = dict(CHARTS) self.definitions = dict(CHARTS)
self.port = 'ntp' self.port = 'ntp'
self.dgram_socket = True self.dgram_socket = True
self.system = System() self.system = System()
@ -243,7 +241,6 @@ class Service(SocketService):
self.retries = 0 self.retries = 0
self.show_peers = self.configuration.get('show_peers', False) self.show_peers = self.configuration.get('show_peers', False)
self.peer_rescan = self.configuration.get('peer_rescan', 60) self.peer_rescan = self.configuration.get('peer_rescan', 60)
if self.show_peers: if self.show_peers:
self.definitions.update(PEER_CHARTS) self.definitions.update(PEER_CHARTS)

View File

@ -49,39 +49,39 @@ def gpu_charts(gpu):
charts = { charts = {
PCI_BANDWIDTH: { PCI_BANDWIDTH: {
'options': [None, 'PCI Express Bandwidth Utilization', 'KB/s', fam, 'nvidia_smi.pci_bandwidth', 'area'], 'options': [None, 'PCI Express Bandwidth Utilization', 'KiB/s', fam, 'nvidia_smi.pci_bandwidth', 'area'],
'lines': [ 'lines': [
['rx_util', 'rx', 'absolute', 1, 1], ['rx_util', 'rx', 'absolute', 1, 1],
['tx_util', 'tx', 'absolute', 1, -1], ['tx_util', 'tx', 'absolute', 1, -1],
] ]
}, },
FAN_SPEED: { FAN_SPEED: {
'options': [None, 'Fan Speed', '%', fam, 'nvidia_smi.fan_speed', 'line'], 'options': [None, 'Fan Speed', 'percentage', fam, 'nvidia_smi.fan_speed', 'line'],
'lines': [ 'lines': [
['fan_speed', 'speed'], ['fan_speed', 'speed'],
] ]
}, },
GPU_UTIL: { GPU_UTIL: {
'options': [None, 'GPU Utilization', '%', fam, 'nvidia_smi.gpu_utilization', 'line'], 'options': [None, 'GPU Utilization', 'percentage', fam, 'nvidia_smi.gpu_utilization', 'line'],
'lines': [ 'lines': [
['gpu_util', 'utilization'], ['gpu_util', 'utilization'],
] ]
}, },
MEM_UTIL: { MEM_UTIL: {
'options': [None, 'Memory Bandwidth Utilization', '%', fam, 'nvidia_smi.mem_utilization', 'line'], 'options': [None, 'Memory Bandwidth Utilization', 'percentage', fam, 'nvidia_smi.mem_utilization', 'line'],
'lines': [ 'lines': [
['memory_util', 'utilization'], ['memory_util', 'utilization'],
] ]
}, },
ENCODER_UTIL: { ENCODER_UTIL: {
'options': [None, 'Encoder/Decoder Utilization', '%', fam, 'nvidia_smi.encoder_utilization', 'line'], 'options': [None, 'Encoder/Decoder Utilization', 'percentage', fam, 'nvidia_smi.encoder_utilization', 'line'],
'lines': [ 'lines': [
['encoder_util', 'encoder'], ['encoder_util', 'encoder'],
['decoder_util', 'decoder'], ['decoder_util', 'decoder'],
] ]
}, },
MEM_ALLOCATED: { MEM_ALLOCATED: {
'options': [None, 'Memory Allocated', 'MB', fam, 'nvidia_smi.memory_allocated', 'line'], 'options': [None, 'Memory Allocated', 'MiB', fam, 'nvidia_smi.memory_allocated', 'line'],
'lines': [ 'lines': [
['fb_memory_usage', 'used'], ['fb_memory_usage', 'used'],
] ]
@ -316,7 +316,6 @@ class Service(SimpleService):
super(Service, self).__init__(configuration=configuration, name=name) super(Service, self).__init__(configuration=configuration, name=name)
self.order = list() self.order = list()
self.definitions = dict() self.definitions = dict()
poll = int(configuration.get('poll_seconds', 1)) poll = int(configuration.get('poll_seconds', 1))
self.poller = NvidiaSMIPoller(poll) self.poller = NvidiaSMIPoller(poll)

View File

@ -11,8 +11,6 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService from bases.FrameworkServices.SimpleService import SimpleService
# default module values (can be overridden per job in `config`)
priority = 60000
DEFAULT_SERVER = 'localhost' DEFAULT_SERVER = 'localhost'
DEFAULT_PORT = '389' DEFAULT_PORT = '389'
@ -36,7 +34,7 @@ CHARTS = {
] ]
}, },
'bytes_sent': { 'bytes_sent': {
'options': [None, 'Traffic', 'KB/s', 'ldap', 'openldap.traffic_stats', 'line'], 'options': [None, 'Traffic', 'KiB/s', 'ldap', 'openldap.traffic_stats', 'line'],
'lines': [ 'lines': [
['bytes_sent', 'sent', 'incremental', 1, 1024] ['bytes_sent', 'sent', 'incremental', 1, 1024]
] ]
@ -136,13 +134,11 @@ class Service(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name) SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.server = configuration.get('server', DEFAULT_SERVER) self.server = configuration.get('server', DEFAULT_SERVER)
self.port = configuration.get('port', DEFAULT_PORT) self.port = configuration.get('port', DEFAULT_PORT)
self.username = configuration.get('username') self.username = configuration.get('username')
self.password = configuration.get('password') self.password = configuration.get('password')
self.timeout = configuration.get('timeout', DEFAULT_TIMEOUT) self.timeout = configuration.get('timeout', DEFAULT_TIMEOUT)
self.alive = False self.alive = False
self.conn = None self.conn = None

View File

@ -3,14 +3,18 @@
# Author: l2isbad # Author: l2isbad
# SPDX-License-Identifier: GPL-3.0-or-later # SPDX-License-Identifier: GPL-3.0-or-later
from re import compile as r_compile import re
from bases.FrameworkServices.SimpleService import SimpleService from bases.FrameworkServices.SimpleService import SimpleService
priority = 60000
update_every = 10 update_every = 10
ORDER = ['users', 'traffic'] ORDER = [
'users',
'traffic',
]
CHARTS = { CHARTS = {
'users': { 'users': {
'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'], 'options': [None, 'OpenVPN Active Users', 'active users', 'users', 'openvpn_status.users', 'line'],
@ -19,15 +23,20 @@ CHARTS = {
] ]
}, },
'traffic': { 'traffic': {
'options': [None, 'OpenVPN Traffic', 'KB/s', 'traffic', 'openvpn_status.traffic', 'area'], 'options': [None, 'OpenVPN Traffic', 'KiB/s', 'traffic', 'openvpn_status.traffic', 'area'],
'lines': [ 'lines': [
['bytes_in', 'in', 'incremental', 1, 1 << 10], ['bytes_out', 'out', 'incremental', 1, -1 << 10] ['bytes_in', 'in', 'incremental', 1, 1 << 10],
['bytes_out', 'out', 'incremental', -1, 1 << 10]
] ]
} }
} }
TLS_REGEX = r_compile(r'(?:[0-9a-f]+:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P<bytes_in>\d+) (?P<bytes_out>\d+)') TLS_REGEX = re.compile(
STATIC_KEY_REGEX = r_compile(r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)') r'(?:[0-9a-f]+:[0-9a-f:]+|(?:\d{1,3}(?:\.\d{1,3}){3}(?::\d+)?)) (?P<bytes_in>\d+) (?P<bytes_out>\d+)'
)
STATIC_KEY_REGEX = re.compile(
r'TCP/[A-Z]+ (?P<direction>(?:read|write)) bytes,(?P<bytes>\d+)'
)
class Service(SimpleService): class Service(SimpleService):

View File

@ -9,19 +9,8 @@ import re
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
# default job configuration (overridden by python.d.plugin) REGEX = re.compile(r'([a-z][a-z ]+): ([\d.]+)')
# config = {'local': {
# 'update_every': update_every,
# 'retries': retries,
# 'priority': priority,
# 'url': 'http://localhost/status?full&json'
# }}
# charts order (can be overridden if you want less charts, or different order)
POOL_INFO = [ POOL_INFO = [
('active processes', 'active'), ('active processes', 'active'),
@ -49,7 +38,14 @@ CALC = [
('avg', average) ('avg', average)
] ]
ORDER = ['connections', 'requests', 'performance', 'request_duration', 'request_cpu', 'request_mem'] ORDER = [
'connections',
'requests',
'performance',
'request_duration',
'request_cpu',
'request_mem',
]
CHARTS = { CHARTS = {
'connections': { 'connections': {
@ -84,7 +80,7 @@ CHARTS = {
] ]
}, },
'request_cpu': { 'request_cpu': {
'options': [None, 'PHP-FPM Request CPU', 'percent', 'request CPU', 'phpfpm.request_cpu', 'line'], 'options': [None, 'PHP-FPM Request CPU', 'percentage', 'request CPU', 'phpfpm.request_cpu', 'line'],
'lines': [ 'lines': [
['minReqCpu', 'min'], ['minReqCpu', 'min'],
['maxReqCpu', 'max'], ['maxReqCpu', 'max'],
@ -92,7 +88,7 @@ CHARTS = {
] ]
}, },
'request_mem': { 'request_mem': {
'options': [None, 'PHP-FPM Request Memory', 'kilobytes', 'request memory', 'phpfpm.request_mem', 'line'], 'options': [None, 'PHP-FPM Request Memory', 'KB', 'request memory', 'phpfpm.request_mem', 'line'],
'lines': [ 'lines': [
['minReqMem', 'min', 'absolute', 1, 1024], ['minReqMem', 'min', 'absolute', 1, 1024],
['maxReqMem', 'max', 'absolute', 1, 1024], ['maxReqMem', 'max', 'absolute', 1, 1024],
@ -105,14 +101,14 @@ CHARTS = {
class Service(UrlService): class Service(UrlService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name) UrlService.__init__(self, configuration=configuration, name=name)
self.url = self.configuration.get('url', 'http://localhost/status?full&json')
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.regex = re.compile(r'([a-z][a-z ]+): ([\d.]+)') self.url = self.configuration.get('url', 'http://localhost/status?full&json')
self.json = '&json' in self.url or '?json' in self.url self.json = '&json' in self.url or '?json' in self.url
self.json_full = self.url.endswith(('?full&json', '?json&full')) self.json_full = self.url.endswith(('?full&json', '?json&full'))
self.if_all_processes_running = dict([(c_name + p_name, 0) for c_name, func in CALC self.if_all_processes_running = dict(
for metric, p_name in PER_PROCESS_INFO]) [(c_name + p_name, 0) for c_name, func in CALC for metric, p_name in PER_PROCESS_INFO]
)
def _get_data(self): def _get_data(self):
""" """
@ -123,7 +119,7 @@ class Service(UrlService):
if not raw: if not raw:
return None return None
raw_json = parse_raw_data_(is_json=self.json, regex=self.regex, raw_data=raw) raw_json = parse_raw_data_(is_json=self.json, raw_data=raw)
# Per Pool info: active connections, requests and performance charts # Per Pool info: active connections, requests and performance charts
to_netdata = fetch_data_(raw_data=raw_json, metrics_list=POOL_INFO) to_netdata = fetch_data_(raw_data=raw_json, metrics_list=POOL_INFO)
@ -159,7 +155,7 @@ def fetch_data_(raw_data, metrics_list, pid=''):
return result return result
def parse_raw_data_(is_json, regex, raw_data): def parse_raw_data_(is_json, raw_data):
""" """
:param is_json: bool :param is_json: bool
:param regex: compiled regular expr :param regex: compiled regular expr
@ -173,4 +169,4 @@ def parse_raw_data_(is_json, regex, raw_data):
return dict() return dict()
else: else:
raw_data = ' '.join(raw_data.split()) raw_data = ' '.join(raw_data.split())
return dict(regex.findall(raw_data)) return dict(REGEX.findall(raw_data))

View File

@ -12,8 +12,6 @@ except ImportError:
from bases.FrameworkServices.SimpleService import SimpleService from bases.FrameworkServices.SimpleService import SimpleService
# default module values (can be overridden per job in `config`)
priority = 60000
PORT_LATENCY = 'connect' PORT_LATENCY = 'connect'
@ -25,7 +23,7 @@ ORDER = ['latency', 'status']
CHARTS = { CHARTS = {
'latency': { 'latency': {
'options': [None, 'TCP connect latency', 'ms', 'latency', 'portcheck.latency', 'line'], 'options': [None, 'TCP connect latency', 'milliseconds', 'latency', 'portcheck.latency', 'line'],
'lines': [ 'lines': [
[PORT_LATENCY, 'connect', 'absolute', 100, 1000] [PORT_LATENCY, 'connect', 'absolute', 100, 1000]
] ]

View File

@ -5,12 +5,12 @@
from bases.FrameworkServices.ExecutableService import ExecutableService from bases.FrameworkServices.ExecutableService import ExecutableService
# default module values (can be overridden per job in `config`) POSTQUEUE_COMMAND = 'postqueue -p'
# update_every = 2
priority = 60000
# charts order (can be overridden if you want less charts, or different order) ORDER = [
ORDER = ['qemails', 'qsize'] 'qemails',
'qsize',
]
CHARTS = { CHARTS = {
'qemails': { 'qemails': {
@ -20,7 +20,7 @@ CHARTS = {
] ]
}, },
'qsize': { 'qsize': {
'options': [None, 'Postfix Queue Emails Size', 'emails size in KB', 'queue', 'postfix.qsize', 'area'], 'options': [None, 'Postfix Queue Emails Size', 'KiB', 'queue', 'postfix.qsize', 'area'],
'lines': [ 'lines': [
['size', None, 'absolute'] ['size', None, 'absolute']
] ]
@ -31,9 +31,9 @@ CHARTS = {
class Service(ExecutableService): class Service(ExecutableService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
ExecutableService.__init__(self, configuration=configuration, name=name) ExecutableService.__init__(self, configuration=configuration, name=name)
self.command = 'postqueue -p'
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.command = POSTQUEUE_COMMAND
def _get_data(self): def _get_data(self):
""" """

View File

@ -636,7 +636,7 @@ CHARTS = {
] ]
}, },
'db_stat_temp_bytes': { 'db_stat_temp_bytes': {
'options': [None, 'Temp files written to disk', 'KB/s', 'db statistics', 'postgres.db_stat_temp_bytes', 'options': [None, 'Temp files written to disk', 'KiB/s', 'db statistics', 'postgres.db_stat_temp_bytes',
'line'], 'line'],
'lines': [ 'lines': [
['temp_bytes', 'size', 'incremental', 1, 1024] ['temp_bytes', 'size', 'incremental', 1, 1024]
@ -650,7 +650,7 @@ CHARTS = {
] ]
}, },
'database_size': { 'database_size': {
'options': [None, 'Database size', 'MB', 'database size', 'postgres.db_size', 'stacked'], 'options': [None, 'Database size', 'MiB', 'database size', 'postgres.db_size', 'stacked'],
'lines': [ 'lines': [
] ]
}, },
@ -669,7 +669,7 @@ CHARTS = {
] ]
}, },
'index_size': { 'index_size': {
'options': [None, 'Indexes size', 'MB', 'indexes', 'postgres.index_size', 'line'], 'options': [None, 'Indexes size', 'MiB', 'indexes', 'postgres.index_size', 'line'],
'lines': [ 'lines': [
['index_size', 'size', 'absolute', 1, 1024 * 1024] ['index_size', 'size', 'absolute', 1, 1024 * 1024]
] ]
@ -681,7 +681,7 @@ CHARTS = {
] ]
}, },
'table_size': { 'table_size': {
'options': [None, 'Tables size', 'MB', 'tables', 'postgres.table_size', 'line'], 'options': [None, 'Tables size', 'MiB', 'tables', 'postgres.table_size', 'line'],
'lines': [ 'lines': [
['table_size', 'size', 'absolute', 1, 1024 * 1024] ['table_size', 'size', 'absolute', 1, 1024 * 1024]
] ]
@ -695,7 +695,7 @@ CHARTS = {
] ]
}, },
'wal_writes': { 'wal_writes': {
'options': [None, 'Write-Ahead Logs', 'kilobytes/s', 'wal_writes', 'postgres.wal_writes', 'line'], 'options': [None, 'Write-Ahead Logs', 'KiB/s', 'wal_writes', 'postgres.wal_writes', 'line'],
'lines': [ 'lines': [
['wal_writes', 'writes', 'incremental', 1, 1024] ['wal_writes', 'writes', 'incremental', 1, 1024]
] ]
@ -716,20 +716,20 @@ CHARTS = {
] ]
}, },
'stat_bgwriter_alloc': { 'stat_bgwriter_alloc': {
'options': [None, 'Buffers allocated', 'kilobytes/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'], 'options': [None, 'Buffers allocated', 'KiB/s', 'bgwriter', 'postgres.stat_bgwriter_alloc', 'line'],
'lines': [ 'lines': [
['buffers_alloc', 'alloc', 'incremental', 1, 1024] ['buffers_alloc', 'alloc', 'incremental', 1, 1024]
] ]
}, },
'stat_bgwriter_checkpoint': { 'stat_bgwriter_checkpoint': {
'options': [None, 'Buffers written during checkpoints', 'kilobytes/s', 'bgwriter', 'options': [None, 'Buffers written during checkpoints', 'KiB/s', 'bgwriter',
'postgres.stat_bgwriter_checkpoint', 'line'], 'postgres.stat_bgwriter_checkpoint', 'line'],
'lines': [ 'lines': [
['buffers_checkpoint', 'checkpoint', 'incremental', 1, 1024] ['buffers_checkpoint', 'checkpoint', 'incremental', 1, 1024]
] ]
}, },
'stat_bgwriter_backend': { 'stat_bgwriter_backend': {
'options': [None, 'Buffers written directly by a backend', 'kilobytes/s', 'bgwriter', 'options': [None, 'Buffers written directly by a backend', 'KiB/s', 'bgwriter',
'postgres.stat_bgwriter_backend', 'line'], 'postgres.stat_bgwriter_backend', 'line'],
'lines': [ 'lines': [
['buffers_backend', 'backend', 'incremental', 1, 1024] ['buffers_backend', 'backend', 'incremental', 1, 1024]
@ -742,7 +742,7 @@ CHARTS = {
] ]
}, },
'stat_bgwriter_bgwriter': { 'stat_bgwriter_bgwriter': {
'options': [None, 'Buffers written by the background writer', 'kilobytes/s', 'bgwriter', 'options': [None, 'Buffers written by the background writer', 'KiB/s', 'bgwriter',
'postgres.bgwriter_bgwriter', 'line'], 'postgres.bgwriter_bgwriter', 'line'],
'lines': [ 'lines': [
['buffers_clean', 'clean', 'incremental', 1, 1024] ['buffers_clean', 'clean', 'incremental', 1, 1024]
@ -766,7 +766,7 @@ CHARTS = {
] ]
}, },
'standby_delta': { 'standby_delta': {
'options': [None, 'Standby delta', 'kilobytes', 'replication delta', 'postgres.standby_delta', 'line'], 'options': [None, 'Standby delta', 'KiB', 'replication delta', 'postgres.standby_delta', 'line'],
'lines': [ 'lines': [
['sent_delta', 'sent delta', 'absolute', 1, 1024], ['sent_delta', 'sent delta', 'absolute', 1, 1024],
['write_delta', 'write delta', 'absolute', 1, 1024], ['write_delta', 'write delta', 'absolute', 1, 1024],
@ -789,24 +789,19 @@ class Service(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name) SimpleService.__init__(self, configuration=configuration, name=name)
self.order = list(ORDER) self.order = list(ORDER)
self.definitions = deepcopy(CHARTS) self.definitions = deepcopy(CHARTS)
self.do_table_stats = configuration.pop('table_stats', False) self.do_table_stats = configuration.pop('table_stats', False)
self.do_index_stats = configuration.pop('index_stats', False) self.do_index_stats = configuration.pop('index_stats', False)
self.databases_to_poll = configuration.pop('database_poll', None) self.databases_to_poll = configuration.pop('database_poll', None)
self.statement_timeout = configuration.pop('statement_timeout', DEFAULT_STATEMENT_TIMEOUT) self.statement_timeout = configuration.pop('statement_timeout', DEFAULT_STATEMENT_TIMEOUT)
self.configuration = configuration self.configuration = configuration
self.conn = None self.conn = None
self.server_version = None self.server_version = None
self.is_superuser = False self.is_superuser = False
self.alive = False self.alive = False
self.databases = list() self.databases = list()
self.secondaries = list() self.secondaries = list()
self.replication_slots = list() self.replication_slots = list()
self.queries = dict() self.queries = dict()
self.data = dict() self.data = dict()
def reconnect(self): def reconnect(self):

View File

@ -8,9 +8,14 @@ from json import loads
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
priority = 60000
ORDER = ['questions', 'cache_usage', 'cache_size', 'latency'] ORDER = [
'questions',
'cache_usage',
'cache_size',
'latency',
]
CHARTS = { CHARTS = {
'questions': { 'questions': {
'options': [None, 'PowerDNS Queries and Answers', 'count', 'questions', 'powerdns.questions', 'line'], 'options': [None, 'PowerDNS Queries and Answers', 'count', 'questions', 'powerdns.questions', 'line'],

View File

@ -5,10 +5,6 @@
from bases.FrameworkServices.MySQLService import MySQLService from bases.FrameworkServices.MySQLService import MySQLService
# default module values (can be overridden per job in `config`)
# update_every = 3
priority = 60000
def query(table, *params): def query(table, *params):
return 'SELECT {params} FROM {table}'.format(table=table, params=', '.join(params)) return 'SELECT {params} FROM {table}'.format(table=table, params=', '.join(params))
@ -132,8 +128,8 @@ CHARTS = {
'options': [None, 'ProxySQL Backend Overall Bandwidth', 'kilobits/s', 'overall_bandwidth', 'options': [None, 'ProxySQL Backend Overall Bandwidth', 'kilobits/s', 'overall_bandwidth',
'proxysql.pool_overall_net', 'area'], 'proxysql.pool_overall_net', 'area'],
'lines': [ 'lines': [
['bytes_data_recv', 'in', 'incremental', 8, 1024], ['bytes_data_recv', 'in', 'incremental', 8, 1000],
['bytes_data_sent', 'out', 'incremental', -8, 1024] ['bytes_data_sent', 'out', 'incremental', -8, 1000]
] ]
}, },
'questions': { 'questions': {
@ -155,7 +151,7 @@ CHARTS = {
] ]
}, },
'pool_latency': { 'pool_latency': {
'options': [None, 'ProxySQL Backend Latency', 'ms', 'latency', 'proxysql.latency', 'line'], 'options': [None, 'ProxySQL Backend Latency', 'milliseconds', 'latency', 'proxysql.latency', 'line'],
'lines': [] 'lines': []
}, },
'connections': { 'connections': {
@ -193,7 +189,7 @@ CHARTS = {
'lines': [] 'lines': []
}, },
'commands_duration': { 'commands_duration': {
'options': [None, 'ProxySQL Commands Duration', 'ms', 'commands', 'proxysql.commands_duration', 'line'], 'options': [None, 'ProxySQL Commands Duration', 'milliseconds', 'commands', 'proxysql.commands_duration', 'line'],
'lines': [] 'lines': []
} }
} }

View File

@ -11,28 +11,31 @@
# and tls_cert_file options then. # and tls_cert_file options then.
# #
from bases.FrameworkServices.UrlService import UrlService
from json import loads
import socket import socket
from json import loads
from bases.FrameworkServices.UrlService import UrlService
update_every = 5 update_every = 5
priority = 60000
MB = 1048576 MiB = 1 << 20
CPU_SCALE = 1000 CPU_SCALE = 1000
ORDER = [ ORDER = [
'jvm_heap', 'jvm_heap',
'jvm_nonheap', 'jvm_nonheap',
'cpu', 'cpu',
'fd_open', 'fd_open',
] ]
CHARTS = { CHARTS = {
'jvm_heap': { 'jvm_heap': {
'options': [None, 'JVM Heap', 'MB', 'resources', 'puppet.jvm', 'area'], 'options': [None, 'JVM Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
'lines': [ 'lines': [
['jvm_heap_committed', 'committed', 'absolute', 1, MB], ['jvm_heap_committed', 'committed', 'absolute', 1, MiB],
['jvm_heap_used', 'used', 'absolute', 1, MB], ['jvm_heap_used', 'used', 'absolute', 1, MiB],
], ],
'variables': [ 'variables': [
['jvm_heap_max'], ['jvm_heap_max'],
@ -40,10 +43,10 @@ CHARTS = {
], ],
}, },
'jvm_nonheap': { 'jvm_nonheap': {
'options': [None, 'JVM Non-Heap', 'MB', 'resources', 'puppet.jvm', 'area'], 'options': [None, 'JVM Non-Heap', 'MiB', 'resources', 'puppet.jvm', 'area'],
'lines': [ 'lines': [
['jvm_nonheap_committed', 'committed', 'absolute', 1, MB], ['jvm_nonheap_committed', 'committed', 'absolute', 1, MiB],
['jvm_nonheap_used', 'used', 'absolute', 1, MB], ['jvm_nonheap_used', 'used', 'absolute', 1, MiB],
], ],
'variables': [ 'variables': [
['jvm_nonheap_max'], ['jvm_nonheap_max'],
@ -72,9 +75,9 @@ CHARTS = {
class Service(UrlService): class Service(UrlService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name) UrlService.__init__(self, configuration=configuration, name=name)
self.url = 'https://{0}:8140'.format(socket.getfqdn())
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.url = 'https://{0}:8140'.format(socket.getfqdn())
def _get_data(self): def _get_data(self):
# NOTE: there are several ways to retrieve data # NOTE: there are several ways to retrieve data

View File

@ -14,9 +14,6 @@ except ImportError:
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
update_every = 1
priority = 60000
METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats']) METHODS = namedtuple('METHODS', ['get_data', 'url', 'stats'])
@ -63,15 +60,15 @@ CHARTS = {
] ]
}, },
'memory': { 'memory': {
'options': [None, 'Memory', 'MB', 'overview', 'rabbitmq.memory', 'area'], 'options': [None, 'Memory', 'MiB', 'overview', 'rabbitmq.memory', 'area'],
'lines': [ 'lines': [
['mem_used', 'used', 'absolute', 1, 1024 << 10] ['mem_used', 'used', 'absolute', 1, 1 << 20]
] ]
}, },
'disk_space': { 'disk_space': {
'options': [None, 'Disk Space', 'GB', 'overview', 'rabbitmq.disk_space', 'area'], 'options': [None, 'Disk Space', 'GiB', 'overview', 'rabbitmq.disk_space', 'area'],
'lines': [ 'lines': [
['disk_free', 'free', 'absolute', 1, 1024 ** 3] ['disk_free', 'free', 'absolute', 1, 1 << 30]
] ]
}, },
'socket_descriptors': { 'socket_descriptors': {

View File

@ -47,13 +47,13 @@ CHARTS = {
] ]
}, },
'hit_rate': { 'hit_rate': {
'options': [None, 'Hit rate', 'percent', 'hits', 'redis.hit_rate', 'line'], 'options': [None, 'Hit rate', 'percentage', 'hits', 'redis.hit_rate', 'line'],
'lines': [ 'lines': [
['hit_rate', 'rate', 'absolute'] ['hit_rate', 'rate', 'absolute']
] ]
}, },
'memory': { 'memory': {
'options': [None, 'Memory utilization', 'kilobytes', 'memory', 'redis.memory', 'line'], 'options': [None, 'Memory utilization', 'KiB', 'memory', 'redis.memory', 'line'],
'lines': [ 'lines': [
['used_memory', 'total', 'absolute', 1, 1024], ['used_memory', 'total', 'absolute', 1, 1024],
['used_memory_lua', 'lua', 'absolute', 1, 1024] ['used_memory_lua', 'lua', 'absolute', 1, 1024]
@ -62,8 +62,8 @@ CHARTS = {
'net': { 'net': {
'options': [None, 'Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'], 'options': [None, 'Bandwidth', 'kilobits/s', 'network', 'redis.net', 'area'],
'lines': [ 'lines': [
['total_net_input_bytes', 'in', 'incremental', 8, 1024], ['total_net_input_bytes', 'in', 'incremental', 8, 1000],
['total_net_output_bytes', 'out', 'incremental', -8, 1024] ['total_net_output_bytes', 'out', 'incremental', -8, 1000]
] ]
}, },
'keys_redis': { 'keys_redis': {
@ -146,16 +146,13 @@ RE = re.compile(r'\n([a-z_0-9 ]+):(?:keys=)?([^,\r]+)')
class Service(SocketService): class Service(SocketService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
SocketService.__init__(self, configuration=configuration, name=name) SocketService.__init__(self, configuration=configuration, name=name)
self._keep_alive = True
self.order = list() self.order = list()
self.definitions = dict() self.definitions = dict()
self._keep_alive = True
self.host = self.configuration.get('host', 'localhost') self.host = self.configuration.get('host', 'localhost')
self.port = self.configuration.get('port', 6379) self.port = self.configuration.get('port', 6379)
self.unix_socket = self.configuration.get('socket') self.unix_socket = self.configuration.get('socket')
p = self.configuration.get('pass') p = self.configuration.get('pass')
self.auth_request = 'AUTH {0} \r\n'.format(p).encode() if p else None self.auth_request = 'AUTH {0} \r\n'.format(p).encode() if p else None
self.request = 'INFO\r\n'.encode() self.request = 'INFO\r\n'.encode()
self.bgsave_time = 0 self.bgsave_time = 0

View File

@ -136,13 +136,11 @@ class Service(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name) SimpleService.__init__(self, configuration=configuration, name=name)
self.order = list(ORDER) self.order = list(ORDER)
self.definitions = cluster_charts() self.definitions = cluster_charts()
self.host = self.configuration.get('host', '127.0.0.1') self.host = self.configuration.get('host', '127.0.0.1')
self.port = self.configuration.get('port', 28015) self.port = self.configuration.get('port', 28015)
self.user = self.configuration.get('user', 'admin') self.user = self.configuration.get('user', 'admin')
self.password = self.configuration.get('password') self.password = self.configuration.get('password')
self.timeout = self.configuration.get('timeout', 2) self.timeout = self.configuration.get('timeout', 2)
self.conn = None self.conn = None
self.alive = True self.alive = True

View File

@ -7,25 +7,25 @@ import json
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
# charts order (can be overridden if you want less charts, or different order) ORDER = [
ORDER = ['bandwidth', 'peers', 'dht'] 'bandwidth',
'peers',
'dht',
]
CHARTS = { CHARTS = {
'bandwidth': { 'bandwidth': {
'options': [None, 'RetroShare Bandwidth', 'kB/s', 'RetroShare', 'retroshare.bandwidth', 'area'], 'options': [None, 'RetroShare Bandwidth', 'kilobits/s', 'RetroShare', 'retroshare.bandwidth', 'area'],
'lines': [ 'lines': [
['bandwidth_up_kb', 'Upload'], ['bandwidth_up_kb', 'Upload'],
['bandwidth_down_kb', 'Download'] ['bandwidth_down_kb', 'Download']
] ]
}, },
'peers': { 'peers': {
'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'], 'options': [None, 'RetroShare Peers', 'peers', 'RetroShare', 'retroshare.peers', 'line'],
'lines': [ 'lines': [
['peers_all', 'All friends'], ['peers_all', 'All friends'],
['peers_connected', 'Connected friends'] ['peers_connected', 'Connected friends']
] ]
}, },
@ -33,7 +33,7 @@ CHARTS = {
'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'], 'options': [None, 'Retroshare DHT', 'peers', 'RetroShare', 'retroshare.dht', 'line'],
'lines': [ 'lines': [
['dht_size_all', 'DHT nodes estimated'], ['dht_size_all', 'DHT nodes estimated'],
['dht_size_rs', 'RS nodes estimated'] ['dht_size_rs', 'RS nodes estimated']
] ]
} }
} }
@ -42,9 +42,9 @@ CHARTS = {
class Service(UrlService): class Service(UrlService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name) UrlService.__init__(self, configuration=configuration, name=name)
self.baseurl = self.configuration.get('url', 'http://localhost:9090')
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.baseurl = self.configuration.get('url', 'http://localhost:9090')
def _get_stats(self): def _get_stats(self):
""" """

View File

@ -24,9 +24,7 @@ from bases.FrameworkServices.ExecutableService import ExecutableService
disabled_by_default = True disabled_by_default = True
# default module values (can be overridden per job in `config`)
update_every = 5 update_every = 5
priority = 60000
ORDER = [ ORDER = [
'syscall_rw', 'syscall_rw',
@ -40,14 +38,14 @@ ORDER = [
CHARTS = { CHARTS = {
'syscall_rw': { 'syscall_rw': {
'options': [None, 'R/Ws', 'kilobytes/s', 'syscall', 'syscall.rw', 'area'], 'options': [None, 'R/Ws', 'KiB/s', 'syscall', 'syscall.rw', 'area'],
'lines': [ 'lines': [
['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024], ['syscall_sendfile_bytes', 'sendfile', 'incremental', 1, 1024],
['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024] ['syscall_recvfile_bytes', 'recvfile', 'incremental', -1, 1024]
] ]
}, },
'smb2_rw': { 'smb2_rw': {
'options': [None, 'R/Ws', 'kilobytes/s', 'smb2', 'smb2.rw', 'area'], 'options': [None, 'R/Ws', 'KiB/s', 'smb2', 'smb2.rw', 'area'],
'lines': [ 'lines': [
['smb2_read_outbytes', 'readout', 'incremental', 1, 1024], ['smb2_read_outbytes', 'readout', 'incremental', 1, 1024],
['smb2_write_inbytes', 'writein', 'incremental', -1, 1024], ['smb2_write_inbytes', 'writein', 'incremental', -1, 1024],

View File

@ -7,8 +7,6 @@ from third_party import lm_sensors as sensors
from bases.FrameworkServices.SimpleService import SimpleService from bases.FrameworkServices.SimpleService import SimpleService
# default module values (can be overridden per job in `config`)
# update_every = 2
ORDER = [ ORDER = [
'temperature', 'temperature',

View File

@ -268,7 +268,7 @@ CHARTS = {
'algo': INCREMENTAL, 'algo': INCREMENTAL,
}, },
'reserved_block_count': { 'reserved_block_count': {
'options': [None, 'Reserved Block Count', '%', 'wear', 'smartd_log.reserved_block_count', 'line'], 'options': [None, 'Reserved Block Count', 'percentage', 'wear', 'smartd_log.reserved_block_count', 'line'],
'lines': [], 'lines': [],
'attrs': [ATTR170], 'attrs': [ATTR170],
'algo': ABSOLUTE, 'algo': ABSOLUTE,
@ -321,7 +321,7 @@ CHARTS = {
}, },
'percent_lifetime_used': { 'percent_lifetime_used': {
'options': [None, 'Percent Lifetime Used', '%', 'wear', 'smartd_log.percent_lifetime_used', 'line'], 'options': [None, 'Percent Lifetime Used', 'percentage', 'wear', 'smartd_log.percent_lifetime_used', 'line'],
'lines': [], 'lines': [],
'attrs': [ATTR202], 'attrs': [ATTR202],
'algo': ABSOLUTE, 'algo': ABSOLUTE,
@ -586,11 +586,9 @@ class Service(SimpleService):
SimpleService.__init__(self, configuration=configuration, name=name) SimpleService.__init__(self, configuration=configuration, name=name)
self.order = ORDER self.order = ORDER
self.definitions = deepcopy(CHARTS) self.definitions = deepcopy(CHARTS)
self.log_path = configuration.get('log_path', DEF_PATH) self.log_path = configuration.get('log_path', DEF_PATH)
self.age = configuration.get('age', DEF_AGE) self.age = configuration.get('age', DEF_AGE)
self.exclude = configuration.get('exclude_disks', str()).split() self.exclude = configuration.get('exclude_disks', str()).split()
self.disks = list() self.disks = list()
self.runs = 0 self.runs = 0

View File

@ -16,7 +16,10 @@ update_every = 5
PRECISION = 100 PRECISION = 100
ORDER = ['tps', 'users'] ORDER = [
'tps',
'users',
]
CHARTS = { CHARTS = {
'tps': { 'tps': {

View File

@ -6,12 +6,14 @@
import json import json
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
DEFAULT_ORDER = [
DEFAULT_ORDER = ['response_code', 'threads', 'gc_time', 'gc_ope', 'heap'] 'response_code',
'threads',
'gc_time',
'gc_ope',
'heap',
]
DEFAULT_CHARTS = { DEFAULT_CHARTS = {
'response_code': { 'response_code': {
@ -59,7 +61,7 @@ DEFAULT_CHARTS = {
] ]
}, },
'heap': { 'heap': {
'options': [None, "Heap Memory Usage", "KB", "heap memory", "springboot.heap", "area"], 'options': [None, "Heap Memory Usage", "KiB", "heap memory", "springboot.heap", "area"],
'lines': [ 'lines': [
["heap_committed", 'committed', "absolute"], ["heap_committed", 'committed', "absolute"],
["heap_used", 'used', "absolute"], ["heap_used", 'used', "absolute"],

View File

@ -6,12 +6,12 @@
from bases.FrameworkServices.SocketService import SocketService from bases.FrameworkServices.SocketService import SocketService
# default module values (can be overridden per job in `config`) ORDER = [
# update_every = 2 'clients_net',
priority = 60000 'clients_requests',
'servers_net',
# charts order (can be overridden if you want less charts, or different order) 'servers_requests',
ORDER = ['clients_net', 'clients_requests', 'servers_net', 'servers_requests'] ]
CHARTS = { CHARTS = {
'clients_net': { 'clients_net': {

View File

@ -8,12 +8,18 @@ import xml.etree.ElementTree as ET
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`) MiB = 1 << 20
# update_every = 2
priority = 60000
# charts order (can be overridden if you want less charts, or different order) ORDER = [
ORDER = ['accesses', 'bandwidth', 'processing_time', 'threads', 'jvm', 'jvm_eden', 'jvm_survivor', 'jvm_tenured'] 'accesses',
'bandwidth',
'processing_time',
'threads',
'jvm',
'jvm_eden',
'jvm_survivor',
'jvm_tenured',
]
CHARTS = { CHARTS = {
'accesses': { 'accesses': {
@ -24,7 +30,7 @@ CHARTS = {
] ]
}, },
'bandwidth': { 'bandwidth': {
'options': [None, 'Bandwidth', 'KB/s', 'statistics', 'tomcat.bandwidth', 'area'], 'options': [None, 'Bandwidth', 'KiB/s', 'statistics', 'tomcat.bandwidth', 'area'],
'lines': [ 'lines': [
['bytesSent', 'sent', 'incremental', 1, 1024], ['bytesSent', 'sent', 'incremental', 1, 1024],
['bytesReceived', 'received', 'incremental', 1, 1024], ['bytesReceived', 'received', 'incremental', 1, 1024],
@ -44,39 +50,39 @@ CHARTS = {
] ]
}, },
'jvm': { 'jvm': {
'options': [None, 'JVM Memory Pool Usage', 'MB', 'memory', 'tomcat.jvm', 'stacked'], 'options': [None, 'JVM Memory Pool Usage', 'MiB', 'memory', 'tomcat.jvm', 'stacked'],
'lines': [ 'lines': [
['free', 'free', 'absolute', 1, 1048576], ['free', 'free', 'absolute', 1, MiB],
['eden_used', 'eden', 'absolute', 1, 1048576], ['eden_used', 'eden', 'absolute', 1, MiB],
['survivor_used', 'survivor', 'absolute', 1, 1048576], ['survivor_used', 'survivor', 'absolute', 1, MiB],
['tenured_used', 'tenured', 'absolute', 1, 1048576], ['tenured_used', 'tenured', 'absolute', 1, MiB],
['code_cache_used', 'code cache', 'absolute', 1, 1048576], ['code_cache_used', 'code cache', 'absolute', 1, MiB],
['compressed_used', 'compressed', 'absolute', 1, 1048576], ['compressed_used', 'compressed', 'absolute', 1, MiB],
['metaspace_used', 'metaspace', 'absolute', 1, 1048576], ['metaspace_used', 'metaspace', 'absolute', 1, MiB],
] ]
}, },
'jvm_eden': { 'jvm_eden': {
'options': [None, 'Eden Memory Usage', 'MB', 'memory', 'tomcat.jvm_eden', 'area'], 'options': [None, 'Eden Memory Usage', 'MiB', 'memory', 'tomcat.jvm_eden', 'area'],
'lines': [ 'lines': [
['eden_used', 'used', 'absolute', 1, 1048576], ['eden_used', 'used', 'absolute', 1, MiB],
['eden_committed', 'committed', 'absolute', 1, 1048576], ['eden_committed', 'committed', 'absolute', 1, MiB],
['eden_max', 'max', 'absolute', 1, 1048576] ['eden_max', 'max', 'absolute', 1, MiB]
] ]
}, },
'jvm_survivor': { 'jvm_survivor': {
'options': [None, 'Survivor Memory Usage', 'MB', 'memory', 'tomcat.jvm_survivor', 'area'], 'options': [None, 'Survivor Memory Usage', 'MiB', 'memory', 'tomcat.jvm_survivor', 'area'],
'lines': [ 'lines': [
['survivor_used', 'used', 'absolute', 1, 1048576], ['survivor_used', 'used', 'absolute', 1, MiB],
['survivor_committed', 'committed', 'absolute', 1, 1048576], ['survivor_committed', 'committed', 'absolute', 1, MiB],
['survivor_max', 'max', 'absolute', 1, 1048576] ['survivor_max', 'max', 'absolute', 1, MiB],
] ]
}, },
'jvm_tenured': { 'jvm_tenured': {
'options': [None, 'Tenured Memory Usage', 'MB', 'memory', 'tomcat.jvm_tenured', 'area'], 'options': [None, 'Tenured Memory Usage', 'MiB', 'memory', 'tomcat.jvm_tenured', 'area'],
'lines': [ 'lines': [
['tenured_used', 'used', 'absolute', 1, 1048576], ['tenured_used', 'used', 'absolute', 1, MiB],
['tenured_committed', 'committed', 'absolute', 1, 1048576], ['tenured_committed', 'committed', 'absolute', 1, MiB],
['tenured_max', 'max', 'absolute', 1, 1048576] ['tenured_max', 'max', 'absolute', 1, MiB]
] ]
} }
} }
@ -85,10 +91,10 @@ CHARTS = {
class Service(UrlService): class Service(UrlService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
UrlService.__init__(self, configuration=configuration, name=name) UrlService.__init__(self, configuration=configuration, name=name)
self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
self.connector_name = self.configuration.get('connector_name', None)
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.url = self.configuration.get('url', 'http://127.0.0.1:8080/manager/status?XML=true')
self.connector_name = self.configuration.get('connector_name', None)
def _get_data(self): def _get_data(self):
""" """

View File

@ -24,7 +24,7 @@ ORDER = [
CHARTS = { CHARTS = {
'traffic': { 'traffic': {
'options': [None, 'Tor Traffic', 'KB/s', 'traffic', 'tor.traffic', 'area'], 'options': [None, 'Tor Traffic', 'KiB/s', 'traffic', 'tor.traffic', 'area'],
'lines': [ 'lines': [
['read', 'read', 'incremental', 1, 1024], ['read', 'read', 'incremental', 1, 1024],
['write', 'write', 'incremental', 1, -1024], ['write', 'write', 'incremental', 1, -1024],
@ -39,10 +39,8 @@ class Service(SimpleService):
super(Service, self).__init__(configuration=configuration, name=name) super(Service, self).__init__(configuration=configuration, name=name)
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.port = self.configuration.get('control_port', DEF_PORT) self.port = self.configuration.get('control_port', DEF_PORT)
self.password = self.configuration.get('password') self.password = self.configuration.get('password')
self.use_socket = isinstance(self.port, str) and self.port != DEF_PORT and not self.port.isdigit() self.use_socket = isinstance(self.port, str) and self.port != DEF_PORT and not self.port.isdigit()
self.conn = None self.conn = None
self.alive = False self.alive = False

View File

@ -3,15 +3,13 @@
# Author: Alexandre Menezes (@ale_menezes) # Author: Alexandre Menezes (@ale_menezes)
# SPDX-License-Identifier: GPL-3.0-or-later # SPDX-License-Identifier: GPL-3.0-or-later
from json import loads
from collections import defaultdict from collections import defaultdict
from json import loads
from bases.FrameworkServices.UrlService import UrlService from bases.FrameworkServices.UrlService import UrlService
# default module values (can be overridden per job in `config`)
update_every = 1
priority = 60000
# charts order (can be overridden if you want less charts, or different order)
ORDER = [ ORDER = [
'response_statuses', 'response_statuses',
'response_codes', 'response_codes',
@ -98,14 +96,22 @@ class Service(UrlService):
self.url = self.configuration.get('url', 'http://localhost:8080/health') self.url = self.configuration.get('url', 'http://localhost:8080/health')
self.order = ORDER self.order = ORDER
self.definitions = CHARTS self.definitions = CHARTS
self.data = {
'successful_requests': 0, 'redirects': 0, 'bad_requests': 0,
'server_errors': 0, 'other_requests': 0, '1xx': 0, '2xx': 0,
'3xx': 0, '4xx': 0, '5xx': 0, 'other': 0,
'average_response_time_per_iteration_sec': 0
}
self.last_total_response_time = 0 self.last_total_response_time = 0
self.last_total_count = 0 self.last_total_count = 0
self.data = {
'successful_requests': 0,
'redirects': 0,
'bad_requests': 0,
'server_errors': 0,
'other_requests': 0,
'1xx': 0,
'2xx': 0,
'3xx': 0,
'4xx': 0,
'5xx': 0,
'other': 0,
'average_response_time_per_iteration_sec': 0,
}
def _get_data(self): def _get_data(self):
data = self._get_raw_data() data = self._get_raw_data()

View File

@ -13,7 +13,11 @@ from bases.loaders import YamlOrderedLoader
PRECISION = 1000 PRECISION = 1000
ORDER = ['queries', 'recursion', 'reqlist'] ORDER = [
'queries',
'recursion',
'reqlist',
]
CHARTS = { CHARTS = {
'queries': { 'queries': {

View File

@ -7,9 +7,6 @@ import json
from copy import deepcopy from copy import deepcopy
from bases.FrameworkServices.SocketService import SocketService from bases.FrameworkServices.SocketService import SocketService
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
ORDER = [ ORDER = [
'requests', 'requests',
@ -39,27 +36,27 @@ CHARTS = {
] ]
}, },
'tx': { 'tx': {
'options': [None, 'Transmitted data', 'KB/s', 'requests', 'uwsgi.tx', 'stacked'], 'options': [None, 'Transmitted data', 'KiB/s', 'requests', 'uwsgi.tx', 'stacked'],
'lines': [ 'lines': [
['tx', 'tx', 'incremental'] ['tx', 'tx', 'incremental']
] ]
}, },
'avg_rt': { 'avg_rt': {
'options': [None, 'Average request time', 'ms', 'requests', 'uwsgi.avg_rt', 'line'], 'options': [None, 'Average request time', 'milliseconds', 'requests', 'uwsgi.avg_rt', 'line'],
'lines': [ 'lines': [
['avg_rt', 'avg_rt', 'absolute'] ['avg_rt', 'avg_rt', 'absolute']
] ]
}, },
'memory_rss': { 'memory_rss': {
'options': [None, 'RSS (Resident Set Size)', 'MB', 'memory', 'uwsgi.memory_rss', 'stacked'], 'options': [None, 'RSS (Resident Set Size)', 'MiB', 'memory', 'uwsgi.memory_rss', 'stacked'],
'lines': [ 'lines': [
['memory_rss', 'memory_rss', 'absolute', 1, 1024 * 1024] ['memory_rss', 'memory_rss', 'absolute', 1, 1 << 20]
] ]
}, },
'memory_vsz': { 'memory_vsz': {
'options': [None, 'VSZ (Virtual Memory Size)', 'MB', 'memory', 'uwsgi.memory_vsz', 'stacked'], 'options': [None, 'VSZ (Virtual Memory Size)', 'MiB', 'memory', 'uwsgi.memory_vsz', 'stacked'],
'lines': [ 'lines': [
['memory_vsz', 'memory_vsz', 'absolute', 1, 1024 * 1024] ['memory_vsz', 'memory_vsz', 'absolute', 1, 1 << 20]
] ]
}, },
'exceptions': { 'exceptions': {
@ -86,15 +83,13 @@ CHARTS = {
class Service(SocketService): class Service(SocketService):
def __init__(self, configuration=None, name=None): def __init__(self, configuration=None, name=None):
super(Service, self).__init__(configuration=configuration, name=name) super(Service, self).__init__(configuration=configuration, name=name)
self.url = self.configuration.get('host', 'localhost')
self.port = self.configuration.get('port', 1717)
self.order = ORDER self.order = ORDER
self.definitions = deepcopy(CHARTS) self.definitions = deepcopy(CHARTS)
self.url = self.configuration.get('host', 'localhost')
self.port = self.configuration.get('port', 1717)
# Clear dynamic dimensions, these are added during `_get_data()` to allow adding workers at run-time # Clear dynamic dimensions, these are added during `_get_data()` to allow adding workers at run-time
for chart in DYNAMIC_CHARTS: for chart in DYNAMIC_CHARTS:
self.definitions[chart]['lines'] = [] self.definitions[chart]['lines'] = []
self.last_result = {} self.last_result = {}
self.workers = [] self.workers = []

View File

@ -8,9 +8,6 @@ import re
from bases.collection import find_binary from bases.collection import find_binary
from bases.FrameworkServices.ExecutableService import ExecutableService from bases.FrameworkServices.ExecutableService import ExecutableService
# default module values (can be overridden per job in `config`)
# update_every = 2
priority = 60000
ORDER = [ ORDER = [
'session_connections', 'session_connections',
@ -46,7 +43,7 @@ CHARTS = {
] ]
}, },
'all_time_hit_rate': { 'all_time_hit_rate': {
'options': [None, 'All History Hit Rate Ratio', 'percent', 'cache performance', 'options': [None, 'All History Hit Rate Ratio', 'percentage', 'cache performance',
'varnish.all_time_hit_rate', 'stacked'], 'varnish.all_time_hit_rate', 'stacked'],
'lines': [ 'lines': [
['cache_hit', 'hit', 'percentage-of-absolute-row'], ['cache_hit', 'hit', 'percentage-of-absolute-row'],
@ -54,7 +51,7 @@ CHARTS = {
['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']] ['cache_hitpass', 'hitpass', 'percentage-of-absolute-row']]
}, },
'current_poll_hit_rate': { 'current_poll_hit_rate': {
'options': [None, 'Current Poll Hit Rate Ratio', 'percent', 'cache performance', 'options': [None, 'Current Poll Hit Rate Ratio', 'percentage', 'cache performance',
'varnish.current_poll_hit_rate', 'stacked'], 'varnish.current_poll_hit_rate', 'stacked'],
'lines': [ 'lines': [
['cache_hit', 'hit', 'percentage-of-incremental-row'], ['cache_hit', 'hit', 'percentage-of-incremental-row'],
@ -126,7 +123,7 @@ CHARTS = {
] ]
}, },
'memory_usage': { 'memory_usage': {
'options': [None, 'Memory Usage', 'MB', 'memory usage', 'varnish.memory_usage', 'stacked'], 'options': [None, 'Memory Usage', 'MiB', 'memory usage', 'varnish.memory_usage', 'stacked'],
'lines': [ 'lines': [
['memory_free', 'free', 'absolute', 1, 1 << 20], ['memory_free', 'free', 'absolute', 1, 1 << 20],
['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]] ['memory_allocated', 'allocated', 'absolute', 1, 1 << 20]]

View File

@ -16,7 +16,9 @@ W1_DIR = '/sys/bus/w1/devices/'
# Lines matching the following regular expression contain a temperature value # Lines matching the following regular expression contain a temperature value
RE_TEMP = re.compile(r' t=(\d+)') RE_TEMP = re.compile(r' t=(\d+)')
ORDER = ['temp'] ORDER = [
'temp',
]
CHARTS = { CHARTS = {
'temp': { 'temp': {

View File

@ -25,7 +25,9 @@ from bases.collection import read_last_line
from bases.FrameworkServices.LogService import LogService from bases.FrameworkServices.LogService import LogService
ORDER_APACHE_CACHE = ['apache_cache'] ORDER_APACHE_CACHE = [
'apache_cache',
]
ORDER_WEB = [ ORDER_WEB = [
'response_statuses', 'response_statuses',
@ -182,7 +184,7 @@ CHARTS_WEB = {
CHARTS_APACHE_CACHE = { CHARTS_APACHE_CACHE = {
'apache_cache': { 'apache_cache': {
'options': [None, 'Apache Cached Responses', 'percent cached', 'cached', 'web_log.apache_cache_cache', 'options': [None, 'Apache Cached Responses', 'percentage', 'cached', 'web_log.apache_cache_cache',
'stacked'], 'stacked'],
'lines': [ 'lines': [
['hit', 'cache', 'percentage-of-absolute-row'], ['hit', 'cache', 'percentage-of-absolute-row'],