Remove node.d.plugin and relevant files (#12769)

* Remove node.d.plugin and relevant files

* fix build packages

* remove node.d related words/phrases from docs and tests
This commit is contained in:
Suraj Neupane 2022-05-03 09:16:21 +03:00 committed by GitHub
parent 4c88988ed3
commit 22863c42b9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
44 changed files with 24 additions and 7207 deletions

View File

@ -4,7 +4,6 @@ exclude_paths:
- collectors/python.d.plugin/python_modules/pyyaml3/**
- collectors/python.d.plugin/python_modules/urllib3/**
- collectors/python.d.plugin/python_modules/third_party/**
- collectors/node.d.plugin/node_modules/**
- contrib/**
- packaging/makeself/**
- web/gui/css/**

View File

@ -91,8 +91,3 @@ exclude_patterns:
- "collectors/python.d.plugin/python_modules/pyyaml2/"
- "collectors/python.d.plugin/python_modules/pyyaml3/"
- "collectors/python.d.plugin/python_modules/urllib3/"
- "collectors/node.d.plugin/node_modules/lib/"
- "collectors/node.d.plugin/node_modules/asn1-ber.js"
- "collectors/node.d.plugin/node_modules/extend.js"
- "collectors/node.d.plugin/node_modules/pixl-xml.js"
- "collectors/node.d.plugin/node_modules/net-snmp.js"

2
.github/CODEOWNERS vendored
View File

@ -15,8 +15,6 @@ collectors/ebpf.plugin/ @thiagoftsm @vlvkobal
collectors/charts.d.plugin/ @ilyam8 @surajnpn @Ferroin
collectors/freebsd.plugin/ @vlvkobal @thiagoftsm @surajnpn
collectors/macos.plugin/ @vlvkobal @thiagoftsm @surajnpn
collectors/node.d.plugin/ @jacekkolasa
collectors/node.d.plugin/snmp/ @jacekkolasa
collectors/python.d.plugin/ @ilyam8
collectors/cups.plugin/ @simonnagl @vlvkobal @thiagoftsm
exporting/ @vlvkobal @thiagoftsm

4
.github/labeler.yml vendored
View File

@ -116,10 +116,6 @@ collectors/nfacct:
- collectors/nfacct.plugin/*
- collectors/nfacct.plugin/**/*
collectors/node.d:
- collectors/node.d.plugin/*
- collectors/node.d.plugin/**/*
collectors/perf:
- collectors/perf.plugin/*
- collectors/perf.plugin/**/*

1
.gitignore vendored
View File

@ -136,7 +136,6 @@ health/notifications/alarm-notify.sh
claim/netdata-claim.sh
collectors/tc.plugin/tc-qos-helper.sh
collectors/charts.d.plugin/charts.d.plugin
collectors/node.d.plugin/node.d.plugin
collectors/python.d.plugin/python.d.plugin
collectors/fping.plugin/fping.plugin
collectors/ioping.plugin/ioping.plugin

View File

@ -12,11 +12,6 @@ path_classifiers:
- collectors/python.d.plugin/python_modules/urllib3/
- collectors/python.d.plugin/python_modules/pyyaml2/
- collectors/python.d.plugin/python_modules/pyyaml3/
- collectors/node.d.plugin/node_modules/lib/
- collectors/node.d.plugin/node_modules/asn1-ber.js
- collectors/node.d.plugin/node_modules/extend.js
- collectors/node.d.plugin/node_modules/net-snmp.js
- collectors/node.d.plugin/node_modules/pixl-xml.js
- ml/kmeans/dlib/
- ml/json/
- web/gui/lib/

View File

@ -96,7 +96,6 @@
# check for new plugins every = 60
# slabinfo = no
# go.d = yes
# node.d = yes
# apps = yes
# charts.d = yes
# fping = yes
@ -265,10 +264,6 @@
# update every = 1
# command options =
[plugin:node.d]
# update every = 1
# command options =
[plugin:apps]
# update every = 1
# command options =

View File

@ -28,7 +28,6 @@ errors flood protection period = 0
fping = no
python.d = no
charts.d = no
node.d = no
nfacct = no
cups = no
freeipmi = no

View File

@ -250,13 +250,15 @@ configure any of these collectors according to your setup and infrastructure.
- [AM2320](/collectors/python.d.plugin/am2320/README.md): Monitor sensor temperature and humidity.
- [Access point](/collectors/charts.d.plugin/ap/README.md): Monitor client, traffic and signal metrics using the `aw`
tool.
tool.
- [APC UPS](/collectors/charts.d.plugin/apcupsd/README.md): Capture status information using the `apcaccess` tool.
- [Energi Core](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/energid): Monitor
blockchain indexes, memory usage, network usage, and transactions of wallet instances.
- [Energi Core (Go)](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/energid): Monitor
blockchain indexes, memory usage, network usage, and transactions of wallet instances.
- [Energi Core (Python)](/collectors/python.d.plugin/energid/README.md): Monitor blockchain, memory, network, and
unspent transactions statistics.
- [UPS/PDU](/collectors/charts.d.plugin/nut/README.md): Read the status of UPS/PDU devices using the `upsc` tool.
- [SNMP devices](/collectors/node.d.plugin/snmp/README.md): Gather data using the SNMP protocol.
- [1-Wire sensors](/collectors/python.d.plugin/w1sensor/README.md): Monitor sensor temperature.
- [SNMP devices](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/modules/snmp): Gather data using the SNMP protocol.
- [1-Wire sensors](/collectors/python.d.plugin/w1sensor/README.md): Monitor sensor temperature.
### Search
@ -481,10 +483,9 @@ Plugin orchestrators organize and run many of the above collectors.
If you're interested in developing a new collector that you'd like to contribute to Netdata, we highly recommend using
the `go.d.plugin`.
- [go.d.plugin](https://github.com/netdata/go.d.plugin): An orchestrator for data collection modules written in `go`.
- [python.d.plugin](python.d.plugin/README.md): An orchestrator for data collection modules written in `python` v2/v3.
- [charts.d.plugin](charts.d.plugin/README.md): An orchestrator for data collection modules written in `bash` v4+.
- [node.d.plugin](node.d.plugin/README.md): An orchestrator for data collection modules written in `node.js`.
- [go.d.plugin](https://github.com/netdata/go.d.plugin): An orchestrator for data collection modules written in `go`.
- [python.d.plugin](python.d.plugin/README.md): An orchestrator for data collection modules written in `python` v2/v3.
- [charts.d.plugin](charts.d.plugin/README.md): An orchestrator for data collection modules written in `bash` v4+.
## Third-party collectors

View File

@ -20,7 +20,6 @@ SUBDIRS = \
nfacct.plugin \
xenstat.plugin \
perf.plugin \
node.d.plugin \
proc.plugin \
python.d.plugin \
slabinfo.plugin \

View File

@ -67,9 +67,6 @@ field contains `go.d`, that collector uses the Go orchestrator.
# Python orchestrator (python.d.plugin)
./python.d.plugin <MODULE_NAME> debug trace
# Node orchestrator (node.d.plugin)
./node.d.plugin debug 1 <MODULE_NAME>
# Bash orchestrator (bash.d.plugin)
./charts.d.plugin debug 1 <MODULE_NAME>
```
@ -100,7 +97,6 @@ This section features a list of Netdata's plugins, with a boolean setting to ena
# slabinfo = no
# fping = yes
# ioping = yes
# node.d = yes
# python.d = yes
# go.d = yes
# apps = yes

View File

@ -82,7 +82,6 @@ cups.plugin: cups.plugin
xenstat.plugin: xenstat.plugin
perf.plugin: perf.plugin
charts.d.plugin: *charts.d.plugin*
node.d.plugin: *node.d.plugin*
python.d.plugin: *python.d.plugin*
tc-qos-helper: *tc-qos-helper.sh*
fping: fping

View File

@ -1,57 +0,0 @@
# SPDX-License-Identifier: GPL-3.0-or-later
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
CLEANFILES = \
node.d.plugin \
$(NULL)
include $(top_srcdir)/build/subst.inc
SUFFIXES = .in
dist_libconfig_DATA = \
node.d.conf \
$(NULL)
dist_plugins_SCRIPTS = \
node.d.plugin \
$(NULL)
dist_noinst_DATA = \
node.d.plugin.in \
README.md \
$(NULL)
usernodeconfigdir=$(configdir)/node.d
dist_usernodeconfig_DATA = \
$(NULL)
# Explicitly install directories to avoid permission issues due to umask
install-exec-local:
$(INSTALL) -d $(DESTDIR)$(usernodeconfigdir)
nodeconfigdir=$(libconfigdir)/node.d
dist_nodeconfig_DATA = \
$(NULL)
dist_node_DATA = \
$(NULL)
include snmp/Makefile.inc
nodemodulesdir=$(nodedir)/node_modules
dist_nodemodules_DATA = \
node_modules/netdata.js \
node_modules/extend.js \
node_modules/pixl-xml.js \
node_modules/net-snmp.js \
node_modules/asn1-ber.js \
$(NULL)
nodemoduleslibberdir=$(nodedir)/node_modules/lib/ber
dist_nodemoduleslibber_DATA = \
node_modules/lib/ber/index.js \
node_modules/lib/ber/errors.js \
node_modules/lib/ber/reader.js \
node_modules/lib/ber/types.js \
node_modules/lib/ber/writer.js \
$(NULL)

View File

@ -1,236 +0,0 @@
<!--
title: "node.d.plugin"
custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/node.d.plugin/README.md
-->
# node.d.plugin
`node.d.plugin` is a Netdata external plugin. It is an **orchestrator** for data collection modules written in `node.js`.
1. It runs as an independent process `ps fax` shows it
2. It is started and stopped automatically by Netdata
3. It communicates with Netdata via a unidirectional pipe (sending data to the `netdata` daemon)
4. Supports any number of data collection **modules**
5. Allows each **module** to have one or more data collection **jobs**
6. Each **job** is collecting one or more metrics from a single data source
## Pull Request Checklist for Node.js Plugins
This is a generic checklist for submitting a new Node.js plugin for Netdata. It is by no means comprehensive.
At minimum, to be buildable and testable, the PR needs to include:
- The module itself, following proper naming conventions: `node.d/<module_dir>/<module_name>.node.js`
- A README.md file for the plugin.
- The configuration file for the module
- A basic configuration for the plugin in the appropriate global config file: `conf.d/node.d.conf`, which is also in JSON format. If the module should be enabled by default, add a section for it in the `modules` dictionary.
- A line for the plugin in the appropriate `Makefile.am` file: `node.d/Makefile.am` under `dist_node_DATA`.
- A line for the plugin configuration file in `conf.d/Makefile.am`: under `dist_nodeconfig_DATA`
- Optionally, chart information in `web/dashboard_info.js`. This generally involves specifying a name and icon for the section, and may include descriptions for the section or individual charts.
## Motivation
Node.js is perfect for asynchronous operations. It is very fast and quite common (actually the whole web is based on it).
Since data collection is not a CPU intensive task, node.js is an ideal solution for it.
`node.d.plugin` is a Netdata plugin that provides an abstraction layer to allow easy and quick development of data
collectors in node.js. It also manages all its data collectors (placed in `/usr/libexec/netdata/node.d`) using a single
instance of node, thus lowering the memory footprint of data collection.
Of course, there can be independent plugins written in node.js (placed in `/usr/libexec/netdata/plugins`).
These will have to be developed using the guidelines of **[External Plugins](/collectors/plugins.d/README.md)**.
To run `node.js` plugins you need to have `node` installed in your system.
In some older systems, the package named `node` is not node.js. It is a terminal emulation program called `ax25-node`.
In this case the node.js package may be referred as `nodejs`. Once you install `nodejs`, we suggest to link
`/usr/bin/nodejs` to `/usr/bin/node`, so that typing `node` in your terminal, opens node.js.
## configuring `node.d.plugin`
`node.d.plugin` can work even without any configuration. Its default configuration file is
`node.d.conf`. To edit it on your system, run `/etc/netdata/edit-config node.d.conf`.
## configuring `node.d.plugin` modules
`node.d.plugin` modules accept configuration in `JSON` format.
Unfortunately, `JSON` files do not accept comments. So, the best way to describe them is to have markdown text files
with instructions.
`JSON` has a very strict formatting. If you get errors from Netdata at `/var/log/netdata/error.log` that a certain
configuration file cannot be loaded, we suggest to verify it at <http://jsonlint.com/>.
The files in this directory, provide usable examples for configuring each `node.d.plugin` module.
## debugging modules written for node.d.plugin
To test `node.d.plugin` modules, which are placed in `/usr/libexec/netdata/node.d`, you can run `node.d.plugin` by hand,
like this:
```sh
# become user netdata
sudo su -s /bin/sh netdata
# run the plugin in debug mode
/usr/libexec/netdata/plugins.d/node.d.plugin debug 1 X Y Z
```
`node.d.plugin` will run in `debug` mode (lots of debug info), with an update frequency of `1` second, evaluating only
the collector scripts `X` (i.e. `/usr/libexec/netdata/node.d/X.node.js`), `Y` and `Z`.
You can define zero or more modules. If none is defined, `node.d.plugin` will evaluate all modules available.
Keep in mind that if your configs are not in `/etc/netdata`, you should do the following before running `node.d.plugin`:
```sh
export NETDATA_USER_CONFIG_DIR="/path/to/etc/netdata"
```
---
## developing `node.d.plugin` modules
Your data collection module should be split in 3 parts:
- a function to fetch the data from its source. `node.d.plugin` already can fetch data from web sources,
so you don't need to do anything about it for http.
- a function to process the fetched/manipulate the data fetched. This function will make a number of calls
to create charts and dimensions and pass the collected values to Netdata.
This is the only function you need to write for collecting http JSON data.
- a `configure` and an `update` function, which take care of your module configuration and data refresh
respectively. You can use the supplied ones.
Your module will automatically be able to process any number of servers, with different settings (even different
data collection frequencies). You will write just the work needed for one and `node.d.plugin` will do the rest.
For each server you are going to fetch data from, you will have to create a `service` (more later).
### writing the data collection module
To provide a module called `mymodule`, you have create the file `/usr/libexec/netdata/node.d/mymodule.node.js`, with this structure:
```js
// the processor is needed only
// if you need a custom processor
// other than http
netdata.processors.myprocessor = {
name: 'myprocessor',
process: function(service, callback) {
/* do data collection here */
callback(data);
}
};
// this is the mymodule definition
var mymodule = {
processResponse: function(service, data) {
/* send information to the Netdata server here */
},
configure: function(config) {
var eligible_services = 0;
if(typeof(config.servers) === 'undefined' || config.servers.length === 0) {
/*
* create a service using internal defaults;
* this is used for auto-detecting the settings
* if possible
*/
netdata.service({
name: 'a name for this service',
update_every: this.update_every,
module: this,
processor: netdata.processors.myprocessor,
// any other information your processor needs
}).execute(this.processResponse);
eligible_services++;
}
else {
/*
* create a service for each server in the
* configuration file
*/
var len = config.servers.length;
while(len--) {
var server = config.servers[len];
netdata.service({
name: server.name,
update_every: server.update_every,
module: this,
processor: netdata.processors.myprocessor,
// any other information your processor needs
}).execute(this.processResponse);
eligible_services++;
}
}
return eligible_services;
},
update: function(service, callback) {
/*
* this function is called when each service
* created by the configure function, needs to
* collect updated values.
*
* You normally will not need to change it.
*/
service.execute(function(service, data) {
mymodule.processResponse(service, data);
callback();
});
},
};
module.exports = mymodule;
```
#### configure(config)
`configure(config)` is called just once, when `node.d.plugin` starts.
The config file will contain the contents of `/etc/netdata/node.d/mymodule.conf`.
This file should have the following format:
```js
{
"enable_autodetect": false,
"update_every": 5,
"servers": [ { /* server 1 */ }, { /* server 2 */ } ]
}
```
If the config file `/etc/netdata/node.d/mymodule.conf` does not give a `enable_autodetect` or `update_every`, these
will be added by `node.d.plugin`. So you module will always have them.
The configuration file `/etc/netdata/node.d/mymodule.conf` may contain whatever else is needed for `mymodule`.
#### processResponse(data)
`data` may be `null` or whatever the processor specified in the `service` returned.
The `service` object defines a set of functions to allow you send information to the Netdata core about:
1. Charts and dimension definitions
2. Updated values, from the collected values
---
_FIXME: document an operational node.d.plugin data collector - the best example is the
[snmp collector](https://raw.githubusercontent.com/netdata/netdata/master/collectors/node.d.plugin/snmp/snmp.node.js)_

View File

@ -1,33 +0,0 @@
{
"___help_1": "Default options for node.d.plugin - this is a JSON file.",
"___help_2": "Use http://jsonlint.com/ to verify it is valid JSON.",
"___help_3": "------------------------------------------------------------",
"___help_update_every": "Minimum data collection frequency for all node.d/*.node.js modules. Set it to 0 to inherit it from netdata.",
"update_every": 0,
"___help_modules_enable_autodetect": "Enable/disable auto-detection for node.d/*.node.js modules that support it.",
"modules_enable_autodetect": true,
"___help_modules_enable_all": "Enable all node.d/*.node.js modules by default.",
"modules_enable_all": true,
"___help_modules": "Enable/disable the following modules. Give only XXX for node.d/XXX.node.js",
"modules": {
"snmp": {
"enabled": true
}
},
"___help_paths": "Paths that control the operation of node.d.plugin",
"paths": {
"___help_plugins": "The full path to the modules javascript node.d/ directory",
"plugins": null,
"___help_config": "The full path to the modules configs node.d/ directory",
"config": null,
"___help_modules": "Array of paths to add to node.js when searching for node_modules",
"modules": []
}
}

View File

@ -1,303 +0,0 @@
#!/usr/bin/env bash
':' //; exec "$(command -v nodejs || command -v node || echo "ERROR node IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@"
// shebang hack from:
// http://unix.stackexchange.com/questions/65235/universal-node-js-shebang
// Initially this is run as a shell script.
// Then, the second line, finds nodejs or node or js in the system path
// and executes it with the shell parameters.
// netdata
// real-time performance and health monitoring, done right!
// (C) 2017 Costa Tsaousis <costa@tsaousis.gr>
// SPDX-License-Identifier: GPL-3.0-or-later
// --------------------------------------------------------------------------------------------------------------------
'use strict';
// --------------------------------------------------------------------------------------------------------------------
// get NETDATA environment variables
var NETDATA_PLUGINS_DIR = process.env.NETDATA_PLUGINS_DIR || __dirname;
var NETDATA_USER_CONFIG_DIR = process.env.NETDATA_USER_CONFIG_DIR || '@configdir_POST@';
var NETDATA_STOCK_CONFIG_DIR = process.env.NETDATA_STOCK_CONFIG_DIR || '@libconfigdir_POST@';
var NETDATA_UPDATE_EVERY = process.env.NETDATA_UPDATE_EVERY || 1;
var NODE_D_DIR = NETDATA_PLUGINS_DIR + '/../node.d';
// make sure the modules are found
process.mainModule.paths.unshift(NODE_D_DIR + '/node_modules');
process.mainModule.paths.unshift(NODE_D_DIR);
// --------------------------------------------------------------------------------------------------------------------
// load required modules
var fs = require('fs');
var url = require('url');
var util = require('util');
var http = require('http');
var path = require('path');
var extend = require('extend');
var netdata = require('netdata');
// --------------------------------------------------------------------------------------------------------------------
// configuration
function netdata_read_json_config_file(module_filename) {
var f = path.basename(module_filename);
var ufilename, sfilename;
var m = f.match('.plugin' + '$');
if(m !== null) {
ufilename = netdata.options.paths.config + '/' + f.substring(0, m.index) + '.conf';
sfilename = netdata.options.paths.stock_config + '/' + f.substring(0, m.index) + '.conf';
}
m = f.match('.node.js' + '$');
if(m !== null) {
ufilename = netdata.options.paths.config + '/node.d/' + f.substring(0, m.index) + '.conf';
sfilename = netdata.options.paths.stock_config + '/node.d/' + f.substring(0, m.index) + '.conf';
}
try {
netdata.debug('loading module\'s ' + module_filename + ' user-config ' + ufilename);
return JSON.parse(fs.readFileSync(ufilename, 'utf8'));
}
catch(e) {
netdata.error('Cannot read user-configuration file ' + ufilename + ': ' + e.message + '.');
dumpError(e);
}
try {
netdata.debug('loading module\'s ' + module_filename + ' stock-config ' + sfilename);
return JSON.parse(fs.readFileSync(sfilename, 'utf8'));
}
catch(e) {
netdata.error('Cannot read stock-configuration file ' + sfilename + ': ' + e.message + ', using internal defaults.');
dumpError(e);
}
return {};
}
// internal defaults
extend(true, netdata.options, {
filename: path.basename(__filename),
update_every: NETDATA_UPDATE_EVERY,
paths: {
plugins: NETDATA_PLUGINS_DIR,
config: NETDATA_USER_CONFIG_DIR,
stock_config: NETDATA_STOCK_CONFIG_DIR,
modules: []
},
modules_enable_autodetect: true,
modules_enable_all: true,
modules: {}
});
// load configuration file
netdata.options_loaded = netdata_read_json_config_file(__filename);
extend(true, netdata.options, netdata.options_loaded);
if(!netdata.options.paths.plugins)
netdata.options.paths.plugins = NETDATA_PLUGINS_DIR;
if(!netdata.options.paths.config)
netdata.options.paths.config = NETDATA_USER_CONFIG_DIR;
if(!netdata.options.paths.stock_config)
netdata.options.paths.stock_config = NETDATA_STOCK_CONFIG_DIR;
// console.error('merged netdata object:');
// console.error(util.inspect(netdata, {depth: 10}));
// apply module paths to node.js process
function applyModulePaths() {
var len = netdata.options.paths.modules.length;
while(len--)
process.mainModule.paths.unshift(netdata.options.paths.modules[len]);
}
applyModulePaths();
// --------------------------------------------------------------------------------------------------------------------
// tracing
function dumpError(err) {
if (typeof err === 'object') {
if (err.stack) {
netdata.debug(err.stack);
}
}
}
// --------------------------------------------------------------------------------------------------------------------
// get command line arguments
{
var found_myself = false;
var found_number = false;
var found_modules = false;
process.argv.forEach(function (val, index, array) {
netdata.debug('PARAM: ' + val);
if(!found_myself) {
if(val === __filename)
found_myself = true;
}
else {
switch(val) {
case 'debug':
netdata.options.DEBUG = true;
netdata.debug('DEBUG enabled');
break;
default:
if(found_number === true) {
if(found_modules === false) {
for(var i in netdata.options.modules)
netdata.options.modules[i].enabled = false;
}
if(typeof netdata.options.modules[val] === 'undefined')
netdata.options.modules[val] = {};
netdata.options.modules[val].enabled = true;
netdata.options.modules_enable_all = false;
netdata.debug('enabled module ' + val);
}
else {
try {
var x = parseInt(val);
if(x > 0) {
netdata.options.update_every = x;
if(netdata.options.update_every < NETDATA_UPDATE_EVERY) {
netdata.options.update_every = NETDATA_UPDATE_EVERY;
netdata.debug('Update frequency ' + x + 's is too low');
}
found_number = true;
netdata.debug('Update frequency set to ' + netdata.options.update_every + ' seconds');
}
else netdata.error('Ignoring parameter: ' + val);
}
catch(e) {
netdata.error('Cannot get value of parameter: ' + val);
dumpError(e);
}
}
break;
}
}
});
}
if(netdata.options.update_every < 1) {
netdata.debug('Adjusting update frequency to 1 second');
netdata.options.update_every = 1;
}
// --------------------------------------------------------------------------------------------------------------------
// find modules
function findModules() {
var found = 0;
var files = fs.readdirSync(NODE_D_DIR);
var len = files.length;
while(len--) {
var m = files[len].match('.node.js' + '$');
if(m !== null) {
var n = files[len].substring(0, m.index);
if(typeof(netdata.options.modules[n]) === 'undefined')
netdata.options.modules[n] = { name: n, enabled: netdata.options.modules_enable_all };
if(netdata.options.modules[n].enabled === true) {
netdata.options.modules[n].name = n;
netdata.options.modules[n].filename = NODE_D_DIR + '/' + files[len];
netdata.options.modules[n].loaded = false;
// load the module
try {
netdata.debug('loading module ' + netdata.options.modules[n].filename);
netdata.options.modules[n].module = require(netdata.options.modules[n].filename);
netdata.options.modules[n].module.name = n;
netdata.debug('loaded module ' + netdata.options.modules[n].name + ' from ' + netdata.options.modules[n].filename);
}
catch(e) {
netdata.options.modules[n].enabled = false;
netdata.error('Cannot load module: ' + netdata.options.modules[n].filename + ' exception: ' + e);
dumpError(e);
continue;
}
// load its configuration
var c = {
enable_autodetect: netdata.options.modules_enable_autodetect,
update_every: netdata.options.update_every
};
var c2 = netdata_read_json_config_file(files[len]);
extend(true, c, c2);
// call module auto-detection / configuration
try {
netdata.modules_configuring++;
netdata.debug('Configuring module ' + netdata.options.modules[n].name);
var serv = netdata.configure(netdata.options.modules[n].module, c, function() {
netdata.debug('Configured module ' + netdata.options.modules[n].name);
netdata.modules_configuring--;
});
netdata.debug('Configuring module ' + netdata.options.modules[n].name + ' reports ' + serv + ' eligible services.');
}
catch(e) {
netdata.modules_configuring--;
netdata.options.modules[n].enabled = false;
netdata.error('Failed module auto-detection: ' + netdata.options.modules[n].name + ' exception: ' + e + ', disabling module.');
dumpError(e);
continue;
}
netdata.options.modules[n].loaded = true;
found++;
}
}
}
// netdata.debug(netdata.options.modules);
return found;
}
if(findModules() === 0) {
netdata.error('Cannot load any .node.js module from: ' + NODE_D_DIR);
netdata.disableNodePlugin();
process.exit(1);
}
// --------------------------------------------------------------------------------------------------------------------
// start
function start_when_configuring_ends() {
if(netdata.modules_configuring > 0) {
netdata.debug('Waiting modules configuration, still running ' + netdata.modules_configuring);
setTimeout(start_when_configuring_ends, 500);
return;
}
netdata.modules_configuring = 0;
netdata.start();
}
start_when_configuring_ends();
//netdata.debug('netdata object:')
//netdata.debug(netdata);

View File

@ -1,7 +0,0 @@
// SPDX-License-Identifier: MIT
var Ber = require('./lib/ber/index')
exports.Ber = Ber
exports.BerReader = Ber.Reader
exports.BerWriter = Ber.Writer

View File

@ -1,88 +0,0 @@
// https://github.com/justmoon/node-extend
// SPDX-License-Identifier: MIT
'use strict';
var hasOwn = Object.prototype.hasOwnProperty;
var toStr = Object.prototype.toString;
var isArray = function isArray(arr) {
if (typeof Array.isArray === 'function') {
return Array.isArray(arr);
}
return toStr.call(arr) === '[object Array]';
};
var isPlainObject = function isPlainObject(obj) {
if (!obj || toStr.call(obj) !== '[object Object]') {
return false;
}
var hasOwnConstructor = hasOwn.call(obj, 'constructor');
var hasIsPrototypeOf = obj.constructor && obj.constructor.prototype && hasOwn.call(obj.constructor.prototype, 'isPrototypeOf');
// Not own constructor property must be Object
if (obj.constructor && !hasOwnConstructor && !hasIsPrototypeOf) {
return false;
}
// Own properties are enumerated firstly, so to speed up,
// if last one is own, then all properties are own.
var key;
for (key in obj) { /**/ }
return typeof key === 'undefined' || hasOwn.call(obj, key);
};
module.exports = function extend() {
var options, name, src, copy, copyIsArray, clone;
var target = arguments[0];
var i = 1;
var length = arguments.length;
var deep = false;
// Handle a deep copy situation
if (typeof target === 'boolean') {
deep = target;
target = arguments[1] || {};
// skip the boolean and the target
i = 2;
} else if ((typeof target !== 'object' && typeof target !== 'function') || target == null) {
target = {};
}
for (; i < length; ++i) {
options = arguments[i];
// Only deal with non-null/undefined values
if (options != null) {
// Extend the base object
for (name in options) {
src = target[name];
copy = options[name];
// Prevent never-ending loop
if (target !== copy) {
// Recurse if we're merging plain objects or arrays
if (deep && copy && (isPlainObject(copy) || (copyIsArray = isArray(copy)))) {
if (copyIsArray) {
copyIsArray = false;
clone = src && isArray(src) ? src : [];
} else {
clone = src && isPlainObject(src) ? src : {};
}
// Never move original objects, clone them
target[name] = extend(deep, clone, copy);
// Don't bring in undefined values
} else if (typeof copy !== 'undefined') {
target[name] = copy;
}
}
}
}
}
// Return the modified object
return target;
};

View File

@ -1,10 +0,0 @@
// SPDX-License-Identifier: MIT
module.exports = {
InvalidAsn1Error: function(msg) {
var e = new Error()
e.name = 'InvalidAsn1Error'
e.message = msg || ''
return e
}
}

View File

@ -1,18 +0,0 @@
// SPDX-License-Identifier: MIT
var errors = require('./errors')
var types = require('./types')
var Reader = require('./reader')
var Writer = require('./writer')
for (var t in types)
if (types.hasOwnProperty(t))
exports[t] = types[t]
for (var e in errors)
if (errors.hasOwnProperty(e))
exports[e] = errors[e]
exports.Reader = Reader
exports.Writer = Writer

View File

@ -1,270 +0,0 @@
// SPDX-License-Identifier: MIT
var assert = require('assert');
var ASN1 = require('./types');
var errors = require('./errors');
///--- Globals
var InvalidAsn1Error = errors.InvalidAsn1Error;
///--- API
function Reader(data) {
if (!data || !Buffer.isBuffer(data))
throw new TypeError('data must be a node Buffer');
this._buf = data;
this._size = data.length;
// These hold the "current" state
this._len = 0;
this._offset = 0;
}
Object.defineProperty(Reader.prototype, 'length', {
enumerable: true,
get: function () { return (this._len); }
});
Object.defineProperty(Reader.prototype, 'offset', {
enumerable: true,
get: function () { return (this._offset); }
});
Object.defineProperty(Reader.prototype, 'remain', {
get: function () { return (this._size - this._offset); }
});
Object.defineProperty(Reader.prototype, 'buffer', {
get: function () { return (this._buf.slice(this._offset)); }
});
/**
* Reads a single byte and advances offset; you can pass in `true` to make this
* a "peek" operation (i.e., get the byte, but don't advance the offset).
*
* @param {Boolean} peek true means don't move offset.
* @return {Number} the next byte, null if not enough data.
*/
Reader.prototype.readByte = function(peek) {
if (this._size - this._offset < 1)
return null;
var b = this._buf[this._offset] & 0xff;
if (!peek)
this._offset += 1;
return b;
};
Reader.prototype.peek = function() {
return this.readByte(true);
};
/**
* Reads a (potentially) variable length off the BER buffer. This call is
* not really meant to be called directly, as callers have to manipulate
* the internal buffer afterwards.
*
* As a result of this call, you can call `Reader.length`, until the
* next thing called that does a readLength.
*
* @return {Number} the amount of offset to advance the buffer.
* @throws {InvalidAsn1Error} on bad ASN.1
*/
Reader.prototype.readLength = function(offset) {
if (offset === undefined)
offset = this._offset;
if (offset >= this._size)
return null;
var lenB = this._buf[offset++] & 0xff;
if (lenB === null)
return null;
if ((lenB & 0x80) == 0x80) {
lenB &= 0x7f;
if (lenB == 0)
throw InvalidAsn1Error('Indefinite length not supported');
if (lenB > 4)
throw InvalidAsn1Error('encoding too long');
if (this._size - offset < lenB)
return null;
this._len = 0;
for (var i = 0; i < lenB; i++)
this._len = (this._len << 8) + (this._buf[offset++] & 0xff);
} else {
// Wasn't a variable length
this._len = lenB;
}
return offset;
};
/**
* Parses the next sequence in this BER buffer.
*
* To get the length of the sequence, call `Reader.length`.
*
* @return {Number} the sequence's tag.
*/
Reader.prototype.readSequence = function(tag) {
var seq = this.peek();
if (seq === null)
return null;
if (tag !== undefined && tag !== seq)
throw InvalidAsn1Error('Expected 0x' + tag.toString(16) +
': got 0x' + seq.toString(16));
var o = this.readLength(this._offset + 1); // stored in `length`
if (o === null)
return null;
this._offset = o;
return seq;
};
Reader.prototype.readInt = function(tag) {
if (typeof(tag) !== 'number')
tag = ASN1.Integer;
return this._readTag(ASN1.Integer);
};
Reader.prototype.readBoolean = function(tag) {
if (typeof(tag) !== 'number')
tag = ASN1.Boolean;
return (this._readTag(tag) === 0 ? false : true);
};
Reader.prototype.readEnumeration = function(tag) {
if (typeof(tag) !== 'number')
tag = ASN1.Enumeration;
return this._readTag(ASN1.Enumeration);
};
Reader.prototype.readString = function(tag, retbuf) {
if (!tag)
tag = ASN1.OctetString;
var b = this.peek();
if (b === null)
return null;
if (b !== tag)
throw InvalidAsn1Error('Expected 0x' + tag.toString(16) +
': got 0x' + b.toString(16));
var o = this.readLength(this._offset + 1); // stored in `length`
if (o === null)
return null;
if (this.length > this._size - o)
return null;
this._offset = o;
if (this.length === 0)
return retbuf ? new Buffer(0) : '';
var str = this._buf.slice(this._offset, this._offset + this.length);
this._offset += this.length;
return retbuf ? str : str.toString('utf8');
};
Reader.prototype.readOID = function(tag) {
if (!tag)
tag = ASN1.OID;
var b = this.readString(tag, true);
if (b === null)
return null;
var values = [];
var value = 0;
for (var i = 0; i < b.length; i++) {
var byte = b[i] & 0xff;
value <<= 7;
value += byte & 0x7f;
if ((byte & 0x80) == 0) {
values.push(value >>> 0);
value = 0;
}
}
value = values.shift();
values.unshift(value % 40);
values.unshift((value / 40) >> 0);
return values.join('.');
};
Reader.prototype._readTag = function(tag) {
assert.ok(tag !== undefined);
var b = this.peek();
if (b === null)
return null;
if (b !== tag)
throw InvalidAsn1Error('Expected 0x' + tag.toString(16) +
': got 0x' + b.toString(16));
var o = this.readLength(this._offset + 1); // stored in `length`
if (o === null)
return null;
if (this.length > 4)
throw InvalidAsn1Error('Integer too long: ' + this.length);
if (this.length > this._size - o)
return null;
this._offset = o;
var fb = this._buf[this._offset];
var value = 0;
for (var i = 0; i < this.length; i++) {
value <<= 8;
value |= (this._buf[this._offset++] & 0xff);
}
if ((fb & 0x80) == 0x80 && i !== 4)
value -= (1 << (i * 8));
return value >> 0;
};
///--- Exported API
module.exports = Reader;

View File

@ -1,35 +0,0 @@
// SPDX-License-Identifier: MIT
module.exports = {
EOC: 0,
Boolean: 1,
Integer: 2,
BitString: 3,
OctetString: 4,
Null: 5,
OID: 6,
ObjectDescriptor: 7,
External: 8,
Real: 9,
Enumeration: 10,
PDV: 11,
Utf8String: 12,
RelativeOID: 13,
Sequence: 16,
Set: 17,
NumericString: 18,
PrintableString: 19,
T61String: 20,
VideotexString: 21,
IA5String: 22,
UTCTime: 23,
GeneralizedTime: 24,
GraphicString: 25,
VisibleString: 26,
GeneralString: 28,
UniversalString: 29,
CharacterString: 30,
BMPString: 31,
Constructor: 32,
Context: 128
}

View File

@ -1,318 +0,0 @@
// SPDX-License-Identifier: MIT
var assert = require('assert');
var ASN1 = require('./types');
var errors = require('./errors');
///--- Globals
var InvalidAsn1Error = errors.InvalidAsn1Error;
var DEFAULT_OPTS = {
size: 1024,
growthFactor: 8
};
///--- Helpers
function merge(from, to) {
assert.ok(from);
assert.equal(typeof(from), 'object');
assert.ok(to);
assert.equal(typeof(to), 'object');
var keys = Object.getOwnPropertyNames(from);
keys.forEach(function(key) {
if (to[key])
return;
var value = Object.getOwnPropertyDescriptor(from, key);
Object.defineProperty(to, key, value);
});
return to;
}
///--- API
function Writer(options) {
options = merge(DEFAULT_OPTS, options || {});
this._buf = new Buffer(options.size || 1024);
this._size = this._buf.length;
this._offset = 0;
this._options = options;
// A list of offsets in the buffer where we need to insert
// sequence tag/len pairs.
this._seq = [];
}
Object.defineProperty(Writer.prototype, 'buffer', {
get: function () {
if (this._seq.length)
throw new InvalidAsn1Error(this._seq.length + ' unended sequence(s)');
return (this._buf.slice(0, this._offset));
}
});
Writer.prototype.writeByte = function(b) {
if (typeof(b) !== 'number')
throw new TypeError('argument must be a Number');
this._ensure(1);
this._buf[this._offset++] = b;
};
Writer.prototype.writeInt = function(i, tag) {
if (typeof(i) !== 'number')
throw new TypeError('argument must be a Number');
if (typeof(tag) !== 'number')
tag = ASN1.Integer;
var sz = 4;
while ((((i & 0xff800000) === 0) || ((i & 0xff800000) === 0xff800000 >> 0)) &&
(sz > 1)) {
sz--;
i <<= 8;
}
if (sz > 4)
throw new InvalidAsn1Error('BER ints cannot be > 0xffffffff');
this._ensure(2 + sz);
this._buf[this._offset++] = tag;
this._buf[this._offset++] = sz;
while (sz-- > 0) {
this._buf[this._offset++] = ((i & 0xff000000) >>> 24);
i <<= 8;
}
};
Writer.prototype.writeNull = function() {
this.writeByte(ASN1.Null);
this.writeByte(0x00);
};
Writer.prototype.writeEnumeration = function(i, tag) {
if (typeof(i) !== 'number')
throw new TypeError('argument must be a Number');
if (typeof(tag) !== 'number')
tag = ASN1.Enumeration;
return this.writeInt(i, tag);
};
Writer.prototype.writeBoolean = function(b, tag) {
if (typeof(b) !== 'boolean')
throw new TypeError('argument must be a Boolean');
if (typeof(tag) !== 'number')
tag = ASN1.Boolean;
this._ensure(3);
this._buf[this._offset++] = tag;
this._buf[this._offset++] = 0x01;
this._buf[this._offset++] = b ? 0xff : 0x00;
};
Writer.prototype.writeString = function(s, tag) {
if (typeof(s) !== 'string')
throw new TypeError('argument must be a string (was: ' + typeof(s) + ')');
if (typeof(tag) !== 'number')
tag = ASN1.OctetString;
var len = Buffer.byteLength(s);
this.writeByte(tag);
this.writeLength(len);
if (len) {
this._ensure(len);
this._buf.write(s, this._offset);
this._offset += len;
}
};
Writer.prototype.writeBuffer = function(buf, tag) {
if (!Buffer.isBuffer(buf))
throw new TypeError('argument must be a buffer');
// If no tag is specified we will assume `buf` already contains tag and length
if (typeof(tag) === 'number') {
this.writeByte(tag);
this.writeLength(buf.length);
}
this._ensure(buf.length);
buf.copy(this._buf, this._offset, 0, buf.length);
this._offset += buf.length;
};
Writer.prototype.writeStringArray = function(strings, tag) {
if (! (strings instanceof Array))
throw new TypeError('argument must be an Array[String]');
var self = this;
strings.forEach(function(s) {
self.writeString(s, tag);
});
};
// This is really to solve DER cases, but whatever for now
Writer.prototype.writeOID = function(s, tag) {
if (typeof(s) !== 'string')
throw new TypeError('argument must be a string');
if (typeof(tag) !== 'number')
tag = ASN1.OID;
if (!/^([0-9]+\.){3,}[0-9]+$/.test(s))
throw new Error('argument is not a valid OID string');
function encodeOctet(bytes, octet) {
if (octet < 128) {
bytes.push(octet);
} else if (octet < 16384) {
bytes.push((octet >>> 7) | 0x80);
bytes.push(octet & 0x7F);
} else if (octet < 2097152) {
bytes.push((octet >>> 14) | 0x80);
bytes.push(((octet >>> 7) | 0x80) & 0xFF);
bytes.push(octet & 0x7F);
} else if (octet < 268435456) {
bytes.push((octet >>> 21) | 0x80);
bytes.push(((octet >>> 14) | 0x80) & 0xFF);
bytes.push(((octet >>> 7) | 0x80) & 0xFF);
bytes.push(octet & 0x7F);
} else {
bytes.push(((octet >>> 28) | 0x80) & 0xFF);
bytes.push(((octet >>> 21) | 0x80) & 0xFF);
bytes.push(((octet >>> 14) | 0x80) & 0xFF);
bytes.push(((octet >>> 7) | 0x80) & 0xFF);
bytes.push(octet & 0x7F);
}
}
var tmp = s.split('.');
var bytes = [];
bytes.push(parseInt(tmp[0], 10) * 40 + parseInt(tmp[1], 10));
tmp.slice(2).forEach(function(b) {
encodeOctet(bytes, parseInt(b, 10));
});
var self = this;
this._ensure(2 + bytes.length);
this.writeByte(tag);
this.writeLength(bytes.length);
bytes.forEach(function(b) {
self.writeByte(b);
});
};
Writer.prototype.writeLength = function(len) {
if (typeof(len) !== 'number')
throw new TypeError('argument must be a Number');
this._ensure(4);
if (len <= 0x7f) {
this._buf[this._offset++] = len;
} else if (len <= 0xff) {
this._buf[this._offset++] = 0x81;
this._buf[this._offset++] = len;
} else if (len <= 0xffff) {
this._buf[this._offset++] = 0x82;
this._buf[this._offset++] = len >> 8;
this._buf[this._offset++] = len;
} else if (len <= 0xffffff) {
this._buf[this._offset++] = 0x83;
this._buf[this._offset++] = len >> 16;
this._buf[this._offset++] = len >> 8;
this._buf[this._offset++] = len;
} else {
throw new InvalidAsn1Error('Length too long (> 4 bytes)');
}
};
Writer.prototype.startSequence = function(tag) {
if (typeof(tag) !== 'number')
tag = ASN1.Sequence | ASN1.Constructor;
this.writeByte(tag);
this._seq.push(this._offset);
this._ensure(3);
this._offset += 3;
};
Writer.prototype.endSequence = function() {
var seq = this._seq.pop();
var start = seq + 3;
var len = this._offset - start;
if (len <= 0x7f) {
this._shift(start, len, -2);
this._buf[seq] = len;
} else if (len <= 0xff) {
this._shift(start, len, -1);
this._buf[seq] = 0x81;
this._buf[seq + 1] = len;
} else if (len <= 0xffff) {
this._buf[seq] = 0x82;
this._buf[seq + 1] = len >> 8;
this._buf[seq + 2] = len;
} else if (len <= 0xffffff) {
this._shift(start, len, 1);
this._buf[seq] = 0x83;
this._buf[seq + 1] = len >> 16;
this._buf[seq + 2] = len >> 8;
this._buf[seq + 3] = len;
} else {
throw new InvalidAsn1Error('Sequence too long');
}
};
Writer.prototype._shift = function(start, len, shift) {
assert.ok(start !== undefined);
assert.ok(len !== undefined);
assert.ok(shift);
this._buf.copy(this._buf, start + shift, start, start + len);
this._offset += shift;
};
Writer.prototype._ensure = function(len) {
assert.ok(len);
if (this._size - this._offset < len) {
var sz = this._size * this._options.growthFactor;
if (sz - this._offset < len)
sz += len;
var buf = new Buffer(sz);
this._buf.copy(buf, 0, 0, this._offset);
this._buf = buf;
this._size = sz;
}
};
///--- Exported API
module.exports = Writer;

File diff suppressed because it is too large Load Diff

View File

@ -1,654 +0,0 @@
'use strict';
// netdata
// real-time performance and health monitoring, done right!
// (C) 2016 Costa Tsaousis <costa@tsaousis.gr>
// SPDX-License-Identifier: GPL-3.0-or-later
var url = require('url');
var http = require('http');
var util = require('util');
/*
var netdata = require('netdata');
var example_chart = {
id: 'id', // the unique id of the chart
name: 'name', // the name of the chart
title: 'title', // the title of the chart
units: 'units', // the units of the chart dimensions
family: 'family', // the family of the chart
context: 'context', // the context of the chart
type: netdata.chartTypes.line, // the type of the chart
priority: 0, // the priority relative to others in the same family
update_every: 1, // the expected update frequency of the chart
dimensions: {
'dim1': {
id: 'dim1', // the unique id of the dimension
name: 'name', // the name of the dimension
algorithm: netdata.chartAlgorithms.absolute, // the id of the netdata algorithm
multiplier: 1, // the multiplier
divisor: 1, // the divisor
hidden: false, // is hidden (boolean)
},
'dim2': {
id: 'dim2', // the unique id of the dimension
name: 'name', // the name of the dimension
algorithm: 'absolute', // the id of the netdata algorithm
multiplier: 1, // the multiplier
divisor: 1, // the divisor
hidden: false, // is hidden (boolean)
}
// add as many dimensions as needed
}
};
*/
var netdata = {
options: {
filename: __filename,
DEBUG: false,
update_every: 1
},
chartAlgorithms: {
incremental: 'incremental',
absolute: 'absolute',
percentage_of_absolute_row: 'percentage-of-absolute-row',
percentage_of_incremental_row: 'percentage-of-incremental-row'
},
chartTypes: {
line: 'line',
area: 'area',
stacked: 'stacked'
},
services: new Array(),
modules_configuring: 0,
charts: {},
processors: {
http: {
name: 'http',
process: function(service, callback) {
var __DEBUG = netdata.options.DEBUG;
if(__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': making ' + this.name + ' request: ' + netdata.stringify(service.request));
var req = http.request(service.request, function(response) {
if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': got server response...');
var end = false;
var data = '';
response.setEncoding('utf8');
if(response.statusCode !== 200) {
if(end === false) {
service.error('Got HTTP code ' + response.statusCode + ', failed to get data.');
end = true;
return callback(null);
}
}
response.on('data', function(chunk) {
if(end === false) data += chunk;
});
response.on('error', function() {
if(end === false) {
service.error(': Read error, failed to get data.');
end = true;
return callback(null);
}
});
response.on('end', function() {
if(end === false) {
if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': read completed.');
end = true;
return callback(data);
}
});
});
req.on('error', function(e) {
if(__DEBUG === true) netdata.debug('Failed to make request: ' + netdata.stringify(service.request) + ', message: ' + e.message);
service.error('Failed to make request, message: ' + e.message);
return callback(null);
});
// write data to request body
if(typeof service.postData !== 'undefined' && service.request.method === 'POST') {
if(__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': posting data: ' + service.postData);
req.write(service.postData);
}
req.end();
}
}
},
stringify: function(obj) {
return util.inspect(obj, {depth: 10});
},
zeropad2: function(s) {
return ("00" + s).slice(-2);
},
logdate: function(d) {
if(typeof d === 'undefined') d = new Date();
return d.getFullYear().toString() + '-' + this.zeropad2(d.getMonth() + 1) + '-' + this.zeropad2(d.getDate())
+ ' ' + this.zeropad2(d.getHours()) + ':' + this.zeropad2(d.getMinutes()) + ':' + this.zeropad2(d.getSeconds());
},
// show debug info, if debug is enabled
debug: function(msg) {
if(this.options.DEBUG === true) {
console.error(this.logdate() + ': ' + netdata.options.filename + ': DEBUG: ' + ((typeof(msg) === 'object')?netdata.stringify(msg):msg).toString());
}
},
// log an error
error: function(msg) {
console.error(this.logdate() + ': ' + netdata.options.filename + ': ERROR: ' + ((typeof(msg) === 'object')?netdata.stringify(msg):msg).toString());
},
// send data to netdata
send: function(msg) {
console.log(msg.toString());
},
service: function(service) {
if(typeof service === 'undefined')
service = {};
var now = Date.now();
service._current_chart = null; // the current chart we work on
service._queue = ''; // data to be sent to netdata
service.error_reported = false; // error log flood control
service.added = false; // added to netdata.services
service.enabled = true;
service.updates = 0;
service.running = false;
service.started = 0;
service.ended = 0;
if(typeof service.module === 'undefined') {
service.module = { name: 'not-defined-module' };
service.error('Attempted to create service without a module.');
service.enabled = false;
}
if(typeof service.name === 'undefined') {
service.name = 'unnamed@' + service.module.name + '/' + now;
}
if(typeof service.processor === 'undefined')
service.processor = netdata.processors.http;
if(typeof service.update_every === 'undefined')
service.update_every = service.module.update_every;
if(typeof service.update_every === 'undefined')
service.update_every = netdata.options.update_every;
if(service.update_every < netdata.options.update_every)
service.update_every = netdata.options.update_every;
// align the runs
service.next_run = now - (now % (service.update_every * 1000)) + (service.update_every * 1000);
service.commit = function() {
if(this.added !== true) {
this.added = true;
var now = Date.now();
this.next_run = now - (now % (service.update_every * 1000)) + (service.update_every * 1000);
netdata.services.push(this);
if(netdata.options.DEBUG === true) netdata.debug(this.module.name + ': ' + this.name + ': service committed.');
}
};
service.execute = function(responseProcessor) {
var __DEBUG = netdata.options.DEBUG;
if(service.enabled === false)
return responseProcessor(null);
this.module.active++;
this.running = true;
this.started = Date.now();
this.updates++;
if(__DEBUG === true)
netdata.debug(this.module.name + ': ' + this.name + ': making ' + this.processor.name + ' request: ' + netdata.stringify(this));
this.processor.process(this, function(response) {
service.ended = Date.now();
service.duration = service.ended - service.started;
if(typeof response === 'undefined')
response = null;
if(response !== null)
service.errorClear();
if(__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': processing ' + service.processor.name + ' response (received in ' + (service.ended - service.started).toString() + ' ms)');
try {
responseProcessor(service, response);
}
catch(e) {
netdata.error(e);
service.error("responseProcessor failed process response data.");
}
service.running = false;
service.module.active--;
if(service.module.active < 0) {
service.module.active = 0;
if(__DEBUG === true)
netdata.debug(service.module.name + ': active module counter below zero.');
}
if(service.module.active === 0) {
// check if we run under configure
if(service.module.configure_callback !== null) {
if(__DEBUG === true)
netdata.debug(service.module.name + ': configuration finish callback called from processResponse().');
var configure_callback = service.module.configure_callback;
service.module.configure_callback = null;
configure_callback();
}
}
});
};
service.update = function() {
if(netdata.options.DEBUG === true)
netdata.debug(this.module.name + ': ' + this.name + ': starting data collection...');
this.module.update(this, function() {
if(netdata.options.DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': data collection ended in ' + service.duration.toString() + ' ms.');
});
};
service.error = function(message) {
if(this.error_reported === false) {
netdata.error(this.module.name + ': ' + this.name + ': ' + message);
this.error_reported = true;
}
else if(netdata.options.DEBUG === true)
netdata.debug(this.module.name + ': ' + this.name + ': ' + message);
};
service.errorClear = function() {
this.error_reported = false;
};
service.queue = function(txt) {
this._queue += txt + '\n';
};
service._send_chart_to_netdata = function(chart) {
// internal function to send a chart to netdata
this.queue('CHART "' + chart.id + '" "' + chart.name + '" "' + chart.title + '" "' + chart.units + '" "' + chart.family + '" "' + chart.context + '" "' + chart.type + '" ' + chart.priority.toString() + ' ' + chart.update_every.toString());
if(typeof(chart.dimensions) !== 'undefined') {
var dims = Object.keys(chart.dimensions);
var len = dims.length;
while(len--) {
var d = chart.dimensions[dims[len]];
this.queue('DIMENSION "' + d.id + '" "' + d.name + '" "' + d.algorithm + '" ' + d.multiplier.toString() + ' ' + d.divisor.toString() + ' ' + ((d.hidden === true) ? 'hidden' : '').toString());
d._created = true;
d._updated = false;
}
}
chart._created = true;
chart._updated = false;
};
// begin data collection for a chart
service.begin = function(chart) {
if(this._current_chart !== null && this._current_chart !== chart) {
this.error('Called begin() for chart ' + chart.id + ' while chart ' + this._current_chart.id + ' is still open. Closing it.');
this.end();
}
if(typeof(chart.id) === 'undefined' || netdata.charts[chart.id] !== chart) {
this.error('Called begin() for chart ' + chart.id + ' that is not mine. Where did you find it? Ignoring it.');
return false;
}
if(netdata.options.DEBUG === true) netdata.debug('setting current chart to ' + chart.id);
this._current_chart = chart;
this._current_chart._began = true;
if(this._current_chart._dimensions_count !== 0) {
if(this._current_chart._created === false || this._current_chart._updated === true)
this._send_chart_to_netdata(this._current_chart);
var now = this.ended;
this.queue('BEGIN ' + this._current_chart.id + ' ' + ((this._current_chart._last_updated > 0)?((now - this._current_chart._last_updated) * 1000):'').toString());
}
// else this.error('Called begin() for chart ' + chart.id + ' which is empty.');
this._current_chart._last_updated = now;
this._current_chart._began = true;
this._current_chart._counter++;
return true;
};
// set a collected value for a chart
// we do most things on the first value we attempt to set
service.set = function(dimension, value) {
if(this._current_chart === null) {
this.error('Called set(' + dimension + ', ' + value + ') without an open chart.');
return false;
}
if(typeof(this._current_chart.dimensions[dimension]) === 'undefined') {
this.error('Called set(' + dimension + ', ' + value + ') but dimension "' + dimension + '" does not exist in chart "' + this._current_chart.id + '".');
return false;
}
if(typeof value === 'undefined' || value === null)
return false;
if(this._current_chart._dimensions_count !== 0)
this.queue('SET ' + dimension + ' = ' + value.toString());
return true;
};
// end data collection for the current chart - after calling begin()
service.end = function() {
if(this._current_chart !== null && this._current_chart._began === false) {
this.error('Called end() without an open chart.');
return false;
}
if(this._current_chart._dimensions_count !== 0) {
this.queue('END');
netdata.send(this._queue);
}
this._queue = '';
this._current_chart._began = false;
if(netdata.options.DEBUG === true) netdata.debug('sent chart ' + this._current_chart.id);
this._current_chart = null;
return true;
};
// discard the collected values for the current chart - after calling begin()
service.flush = function() {
if(this._current_chart === null || this._current_chart._began === false) {
this.error('Called flush() without an open chart.');
return false;
}
this._queue = '';
this._current_chart._began = false;
this._current_chart = null;
return true;
};
// create a netdata chart
service.chart = function(id, chart) {
var __DEBUG = netdata.options.DEBUG;
if(typeof(netdata.charts[id]) === 'undefined') {
netdata.charts[id] = {
_created: false,
_updated: true,
_began: false,
_counter: 0,
_last_updated: 0,
_dimensions_count: 0,
id: id,
name: id,
title: 'untitled chart',
units: 'a unit',
family: '',
context: '',
type: netdata.chartTypes.line,
priority: 50000,
update_every: netdata.options.update_every,
dimensions: {}
};
}
var c = netdata.charts[id];
if(typeof(chart.name) !== 'undefined' && chart.name !== c.name) {
if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its name');
c.name = chart.name;
c._updated = true;
}
if(typeof(chart.title) !== 'undefined' && chart.title !== c.title) {
if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its title');
c.title = chart.title;
c._updated = true;
}
if(typeof(chart.units) !== 'undefined' && chart.units !== c.units) {
if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its units');
c.units = chart.units;
c._updated = true;
}
if(typeof(chart.family) !== 'undefined' && chart.family !== c.family) {
if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its family');
c.family = chart.family;
c._updated = true;
}
if(typeof(chart.context) !== 'undefined' && chart.context !== c.context) {
if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its context');
c.context = chart.context;
c._updated = true;
}
if(typeof(chart.type) !== 'undefined' && chart.type !== c.type) {
if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its type');
c.type = chart.type;
c._updated = true;
}
if(typeof(chart.priority) !== 'undefined' && chart.priority !== c.priority) {
if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its priority');
c.priority = chart.priority;
c._updated = true;
}
if(typeof(chart.update_every) !== 'undefined' && chart.update_every !== c.update_every) {
if(__DEBUG === true) netdata.debug('chart ' + id + ' updated its update_every from ' + c.update_every + ' to ' + chart.update_every);
c.update_every = chart.update_every;
c._updated = true;
}
if(typeof(chart.dimensions) !== 'undefined') {
var dims = Object.keys(chart.dimensions);
var len = dims.length;
while(len--) {
var x = dims[len];
if(typeof(c.dimensions[x]) === 'undefined') {
c._dimensions_count++;
c.dimensions[x] = {
_created: false,
_updated: false,
id: x, // the unique id of the dimension
name: x, // the name of the dimension
algorithm: netdata.chartAlgorithms.absolute, // the id of the netdata algorithm
multiplier: 1, // the multiplier
divisor: 1, // the divisor
hidden: false // is hidden (boolean)
};
if(__DEBUG === true) netdata.debug('chart ' + id + ' created dimension ' + x);
c._updated = true;
}
var dim = chart.dimensions[x];
var d = c.dimensions[x];
if(typeof(dim.name) !== 'undefined' && d.name !== dim.name) {
if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its name');
d.name = dim.name;
d._updated = true;
}
if(typeof(dim.algorithm) !== 'undefined' && d.algorithm !== dim.algorithm) {
if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its algorithm from ' + d.algorithm + ' to ' + dim.algorithm);
d.algorithm = dim.algorithm;
d._updated = true;
}
if(typeof(dim.multiplier) !== 'undefined' && d.multiplier !== dim.multiplier) {
if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its multiplier');
d.multiplier = dim.multiplier;
d._updated = true;
}
if(typeof(dim.divisor) !== 'undefined' && d.divisor !== dim.divisor) {
if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its divisor');
d.divisor = dim.divisor;
d._updated = true;
}
if(typeof(dim.hidden) !== 'undefined' && d.hidden !== dim.hidden) {
if(__DEBUG === true) netdata.debug('chart ' + id + ', dimension ' + x + ' updated its hidden status');
d.hidden = dim.hidden;
d._updated = true;
}
if(d._updated) c._updated = true;
}
}
//if(netdata.options.DEBUG === true) netdata.debug(netdata.charts);
return netdata.charts[id];
};
return service;
},
runAllServices: function() {
if(netdata.options.DEBUG === true) netdata.debug('runAllServices()');
var now = Date.now();
var len = netdata.services.length;
while(len--) {
var service = netdata.services[len];
if(service.enabled === false || service.running === true) continue;
if(now <= service.next_run) continue;
service.update();
now = Date.now();
service.next_run = now - (now % (service.update_every * 1000)) + (service.update_every * 1000);
}
// 1/10th of update_every in pause
setTimeout(netdata.runAllServices, netdata.options.update_every * 100);
},
start: function() {
if(netdata.options.DEBUG === true) this.debug('started, services: ' + netdata.stringify(this.services));
if(this.services.length === 0) {
this.disableNodePlugin();
// eslint suggested way to exit
var exit = process.exit;
exit(1);
}
else this.runAllServices();
},
// disable the whole node.js plugin
disableNodePlugin: function() {
this.send('DISABLE');
// eslint suggested way to exit
var exit = process.exit;
exit(1);
},
requestFromParams: function(protocol, hostname, port, path, method) {
return {
protocol: protocol,
hostname: hostname,
port: port,
path: path,
//family: 4,
method: method,
headers: {
'Content-Type': 'application/x-www-form-urlencoded',
'Connection': 'keep-alive'
},
agent: new http.Agent({
keepAlive: true,
keepAliveMsecs: netdata.options.update_every * 1000,
maxSockets: 2, // it must be 2 to work
maxFreeSockets: 1
})
};
},
requestFromURL: function(a_url) {
var u = url.parse(a_url);
return netdata.requestFromParams(u.protocol, u.hostname, u.port, u.path, 'GET');
},
configure: function(module, config, callback) {
if(netdata.options.DEBUG === true) this.debug(module.name + ': configuring (update_every: ' + this.options.update_every + ')...');
module.active = 0;
module.update_every = this.options.update_every;
if(typeof config.update_every !== 'undefined')
module.update_every = config.update_every;
module.enable_autodetect = (config.enable_autodetect)?true:false;
if(typeof(callback) === 'function')
module.configure_callback = callback;
else
module.configure_callback = null;
var added = module.configure(config);
if(netdata.options.DEBUG === true) this.debug(module.name + ': configured, reporting ' + added + ' eligible services.');
if(module.configure_callback !== null && added === 0) {
if(netdata.options.DEBUG === true) this.debug(module.name + ': configuration finish callback called from configure().');
var configure_callback = module.configure_callback;
module.configure_callback = null;
configure_callback();
}
return added;
}
};
if(netdata.options.DEBUG === true) netdata.debug('loaded netdata from:', __filename);
module.exports = netdata;

View File

@ -1,607 +0,0 @@
// SPDX-License-Identifier: MIT
/*
JavaScript XML Library
Plus a bunch of object utility functions
Usage:
var XML = require('pixl-xml');
var myxmlstring = '<?xml version="1.0"?><Document>' +
'<Simple>Hello</Simple>' +
'<Node Key="Value">Content</Node>' +
'</Document>';
var tree = XML.parse( myxmlstring, { preserveAttributes: true });
console.log( tree );
tree.Simple = "Hello2";
tree.Node._Attribs.Key = "Value2";
tree.Node._Data = "Content2";
tree.New = "I added this";
console.log( XML.stringify( tree, 'Document' ) );
Copyright (c) 2004 - 2015 Joseph Huckaby
Released under the MIT License
This version is for Node.JS, converted in 2012.
*/
var fs = require('fs');
var indent_string = "\t";
var xml_header = '<?xml version="1.0"?>';
var sort_args = null;
var re_valid_tag_name = /^\w[\w\-\:]*$/;
var XML = exports.XML = function XML(args) {
// class constructor for XML parser class
// pass in args hash or text to parse
if (!args) args = '';
if (isa_hash(args)) {
for (var key in args) this[key] = args[key];
}
else this.text = args || '';
// stringify buffers
if (this.text instanceof Buffer) {
this.text = this.text.toString();
}
if (!this.text.match(/^\s*</)) {
// try as file path
var file = this.text;
this.text = fs.readFileSync(file, { encoding: 'utf8' });
if (!this.text) throw new Error("File not found: " + file);
}
this.tree = {};
this.errors = [];
this.piNodeList = [];
this.dtdNodeList = [];
this.documentNodeName = '';
if (this.lowerCase) {
this.attribsKey = this.attribsKey.toLowerCase();
this.dataKey = this.dataKey.toLowerCase();
}
this.patTag.lastIndex = 0;
if (this.text) this.parse();
}
XML.prototype.preserveAttributes = false;
XML.prototype.lowerCase = false;
XML.prototype.patTag = /([^<]*?)<([^>]+)>/g;
XML.prototype.patSpecialTag = /^\s*([\!\?])/;
XML.prototype.patPITag = /^\s*\?/;
XML.prototype.patCommentTag = /^\s*\!--/;
XML.prototype.patDTDTag = /^\s*\!DOCTYPE/;
XML.prototype.patCDATATag = /^\s*\!\s*\[\s*CDATA/;
XML.prototype.patStandardTag = /^\s*(\/?)([\w\-\:\.]+)\s*(.*)$/;
XML.prototype.patSelfClosing = /\/\s*$/;
XML.prototype.patAttrib = new RegExp("([\\w\\-\\:\\.]+)\\s*=\\s*([\\\"\\'])([^\\2]*?)\\2", "g");
XML.prototype.patPINode = /^\s*\?\s*([\w\-\:]+)\s*(.*)$/;
XML.prototype.patEndComment = /--$/;
XML.prototype.patNextClose = /([^>]*?)>/g;
XML.prototype.patExternalDTDNode = new RegExp("^\\s*\\!DOCTYPE\\s+([\\w\\-\\:]+)\\s+(SYSTEM|PUBLIC)\\s+\\\"([^\\\"]+)\\\"");
XML.prototype.patInlineDTDNode = /^\s*\!DOCTYPE\s+([\w\-\:]+)\s+\[/;
XML.prototype.patEndDTD = /\]$/;
XML.prototype.patDTDNode = /^\s*\!DOCTYPE\s+([\w\-\:]+)\s+\[(.*)\]/;
XML.prototype.patEndCDATA = /\]\]$/;
XML.prototype.patCDATANode = /^\s*\!\s*\[\s*CDATA\s*\[([^]*)\]\]/;
XML.prototype.attribsKey = '_Attribs';
XML.prototype.dataKey = '_Data';
XML.prototype.parse = function(branch, name) {
// parse text into XML tree, recurse for nested nodes
if (!branch) branch = this.tree;
if (!name) name = null;
var foundClosing = false;
var matches = null;
// match each tag, plus preceding text
while ( matches = this.patTag.exec(this.text) ) {
var before = matches[1];
var tag = matches[2];
// text leading up to tag = content of parent node
if (before.match(/\S/)) {
if (typeof(branch[this.dataKey]) != 'undefined') branch[this.dataKey] += ' '; else branch[this.dataKey] = '';
branch[this.dataKey] += trim(decode_entities(before));
}
// parse based on tag type
if (tag.match(this.patSpecialTag)) {
// special tag
if (tag.match(this.patPITag)) tag = this.parsePINode(tag);
else if (tag.match(this.patCommentTag)) tag = this.parseCommentNode(tag);
else if (tag.match(this.patDTDTag)) tag = this.parseDTDNode(tag);
else if (tag.match(this.patCDATATag)) {
tag = this.parseCDATANode(tag);
if (typeof(branch[this.dataKey]) != 'undefined') branch[this.dataKey] += ' '; else branch[this.dataKey] = '';
branch[this.dataKey] += trim(decode_entities(tag));
} // cdata
else {
this.throwParseError( "Malformed special tag", tag );
break;
} // error
if (tag == null) break;
continue;
} // special tag
else {
// Tag is standard, so parse name and attributes (if any)
var matches = tag.match(this.patStandardTag);
if (!matches) {
this.throwParseError( "Malformed tag", tag );
break;
}
var closing = matches[1];
var nodeName = this.lowerCase ? matches[2].toLowerCase() : matches[2];
var attribsRaw = matches[3];
// If this is a closing tag, make sure it matches its opening tag
if (closing) {
if (nodeName == (name || '')) {
foundClosing = 1;
break;
}
else {
this.throwParseError( "Mismatched closing tag (expected </" + name + ">)", tag );
break;
}
} // closing tag
else {
// Not a closing tag, so parse attributes into hash. If tag
// is self-closing, no recursive parsing is needed.
var selfClosing = !!attribsRaw.match(this.patSelfClosing);
var leaf = {};
var attribs = leaf;
// preserve attributes means they go into a sub-hash named "_Attribs"
// the XML composer honors this for restoring the tree back into XML
if (this.preserveAttributes) {
leaf[this.attribsKey] = {};
attribs = leaf[this.attribsKey];
}
// parse attributes
this.patAttrib.lastIndex = 0;
while ( matches = this.patAttrib.exec(attribsRaw) ) {
var key = this.lowerCase ? matches[1].toLowerCase() : matches[1];
attribs[ key ] = decode_entities( matches[3] );
} // foreach attrib
// if no attribs found, but we created the _Attribs subhash, clean it up now
if (this.preserveAttributes && !num_keys(attribs)) {
delete leaf[this.attribsKey];
}
// Recurse for nested nodes
if (!selfClosing) {
this.parse( leaf, nodeName );
if (this.error()) break;
}
// Compress into simple node if text only
var num_leaf_keys = num_keys(leaf);
if ((typeof(leaf[this.dataKey]) != 'undefined') && (num_leaf_keys == 1)) {
leaf = leaf[this.dataKey];
}
else if (!num_leaf_keys) {
leaf = '';
}
// Add leaf to parent branch
if (typeof(branch[nodeName]) != 'undefined') {
if (isa_array(branch[nodeName])) {
branch[nodeName].push( leaf );
}
else {
var temp = branch[nodeName];
branch[nodeName] = [ temp, leaf ];
}
}
else {
branch[nodeName] = leaf;
}
if (this.error() || (branch == this.tree)) break;
} // not closing
} // standard tag
} // main reg exp
// Make sure we found the closing tag
if (name && !foundClosing) {
this.throwParseError( "Missing closing tag (expected </" + name + ">)", name );
}
// If we are the master node, finish parsing and setup our doc node
if (branch == this.tree) {
if (typeof(this.tree[this.dataKey]) != 'undefined') delete this.tree[this.dataKey];
if (num_keys(this.tree) > 1) {
this.throwParseError( 'Only one top-level node is allowed in document', first_key(this.tree) );
return;
}
this.documentNodeName = first_key(this.tree);
if (this.documentNodeName) {
this.tree = this.tree[this.documentNodeName];
}
}
};
XML.prototype.throwParseError = function(key, tag) {
// log error and locate current line number in source XML document
var parsedSource = this.text.substring(0, this.patTag.lastIndex);
var eolMatch = parsedSource.match(/\n/g);
var lineNum = (eolMatch ? eolMatch.length : 0) + 1;
lineNum -= tag.match(/\n/) ? tag.match(/\n/g).length : 0;
this.errors.push({
type: 'Parse',
key: key,
text: '<' + tag + '>',
line: lineNum
});
// Throw actual error (must wrap parse in try/catch)
throw new Error( this.getLastError() );
};
XML.prototype.error = function() {
// return number of errors
return this.errors.length;
};
XML.prototype.getError = function(error) {
// get formatted error
var text = '';
if (!error) return '';
text = (error.type || 'General') + ' Error';
if (error.code) text += ' ' + error.code;
text += ': ' + error.key;
if (error.line) text += ' on line ' + error.line;
if (error.text) text += ': ' + error.text;
return text;
};
XML.prototype.getLastError = function() {
// Get most recently thrown error in plain text format
if (!this.error()) return '';
return this.getError( this.errors[this.errors.length - 1] );
};
XML.prototype.parsePINode = function(tag) {
// Parse Processor Instruction Node, e.g. <?xml version="1.0"?>
if (!tag.match(this.patPINode)) {
this.throwParseError( "Malformed processor instruction", tag );
return null;
}
this.piNodeList.push( tag );
return tag;
};
XML.prototype.parseCommentNode = function(tag) {
// Parse Comment Node, e.g. <!-- hello -->
var matches = null;
this.patNextClose.lastIndex = this.patTag.lastIndex;
while (!tag.match(this.patEndComment)) {
if (matches = this.patNextClose.exec(this.text)) {
tag += '>' + matches[1];
}
else {
this.throwParseError( "Unclosed comment tag", tag );
return null;
}
}
this.patTag.lastIndex = this.patNextClose.lastIndex;
return tag;
};
XML.prototype.parseDTDNode = function(tag) {
// Parse Document Type Descriptor Node, e.g. <!DOCTYPE ... >
var matches = null;
if (tag.match(this.patExternalDTDNode)) {
// tag is external, and thus self-closing
this.dtdNodeList.push( tag );
}
else if (tag.match(this.patInlineDTDNode)) {
// Tag is inline, so check for nested nodes.
this.patNextClose.lastIndex = this.patTag.lastIndex;
while (!tag.match(this.patEndDTD)) {
if (matches = this.patNextClose.exec(this.text)) {
tag += '>' + matches[1];
}
else {
this.throwParseError( "Unclosed DTD tag", tag );
return null;
}
}
this.patTag.lastIndex = this.patNextClose.lastIndex;
// Make sure complete tag is well-formed, and push onto DTD stack.
if (tag.match(this.patDTDNode)) {
this.dtdNodeList.push( tag );
}
else {
this.throwParseError( "Malformed DTD tag", tag );
return null;
}
}
else {
this.throwParseError( "Malformed DTD tag", tag );
return null;
}
return tag;
};
XML.prototype.parseCDATANode = function(tag) {
// Parse CDATA Node, e.g. <![CDATA[Brooks & Shields]]>
var matches = null;
this.patNextClose.lastIndex = this.patTag.lastIndex;
while (!tag.match(this.patEndCDATA)) {
if (matches = this.patNextClose.exec(this.text)) {
tag += '>' + matches[1];
}
else {
this.throwParseError( "Unclosed CDATA tag", tag );
return null;
}
}
this.patTag.lastIndex = this.patNextClose.lastIndex;
if (matches = tag.match(this.patCDATANode)) {
return matches[1];
}
else {
this.throwParseError( "Malformed CDATA tag", tag );
return null;
}
};
XML.prototype.getTree = function() {
// get reference to parsed XML tree
return this.tree;
};
XML.prototype.compose = function() {
// compose tree back into XML
var raw = compose_xml( this.tree, this.documentNodeName );
var body = raw.substring( raw.indexOf("\n") + 1, raw.length );
var xml = '';
if (this.piNodeList.length) {
for (var idx = 0, len = this.piNodeList.length; idx < len; idx++) {
xml += '<' + this.piNodeList[idx] + '>' + "\n";
}
}
else {
xml += xml_header + "\n";
}
if (this.dtdNodeList.length) {
for (var idx = 0, len = this.dtdNodeList.length; idx < len; idx++) {
xml += '<' + this.dtdNodeList[idx] + '>' + "\n";
}
}
xml += body;
return xml;
};
//
// Static Utility Functions:
//
var parse_xml = exports.parse = function parse_xml(text, opts) {
// turn text into XML tree quickly
if (!opts) opts = {};
opts.text = text;
var parser = new XML(opts);
return parser.error() ? parser.getLastError() : parser.getTree();
};
var trim = exports.trim = function trim(text) {
// strip whitespace from beginning and end of string
if (text == null) return '';
if (text && text.replace) {
text = text.replace(/^\s+/, "");
text = text.replace(/\s+$/, "");
}
return text;
};
var encode_entities = exports.encodeEntities = function encode_entities(text) {
// Simple entitize exports.for = function for composing XML
if (text == null) return '';
if (text && text.replace) {
text = text.replace(/\&/g, "&amp;"); // MUST BE FIRST
text = text.replace(/</g, "&lt;");
text = text.replace(/>/g, "&gt;");
}
return text;
};
var encode_attrib_entities = exports.encodeAttribEntities = function encode_attrib_entities(text) {
// Simple entitize exports.for = function for composing XML attributes
if (text == null) return '';
if (text && text.replace) {
text = text.replace(/\&/g, "&amp;"); // MUST BE FIRST
text = text.replace(/</g, "&lt;");
text = text.replace(/>/g, "&gt;");
text = text.replace(/\"/g, "&quot;");
text = text.replace(/\'/g, "&apos;");
}
return text;
};
var decode_entities = exports.decodeEntities = function decode_entities(text) {
// Decode XML entities into raw ASCII
if (text == null) return '';
if (text && text.replace && text.match(/\&/)) {
text = text.replace(/\&lt\;/g, "<");
text = text.replace(/\&gt\;/g, ">");
text = text.replace(/\&quot\;/g, '"');
text = text.replace(/\&apos\;/g, "'");
text = text.replace(/\&amp\;/g, "&"); // MUST BE LAST
}
return text;
};
var compose_xml = exports.stringify = function compose_xml(node, name, indent) {
// Compose node into XML including attributes
// Recurse for child nodes
var xml = "";
// If this is the root node, set the indent to 0
// and setup the XML header (PI node)
if (!indent) {
indent = 0;
xml = xml_header + "\n";
if (!name) {
// no name provided, assume content is wrapped in it
name = first_key(node);
node = node[name];
}
}
// Setup the indent text
var indent_text = "";
for (var k = 0; k < indent; k++) indent_text += indent_string;
if ((typeof(node) == 'object') && (node != null)) {
// node is object -- now see if it is an array or hash
if (!node.length) { // what about zero-length array?
// node is hash
xml += indent_text + "<" + name;
var num_keys = 0;
var has_attribs = 0;
for (var key in node) num_keys++; // there must be a better way...
if (node["_Attribs"]) {
has_attribs = 1;
var sorted_keys = hash_keys_to_array(node["_Attribs"]).sort();
for (var idx = 0, len = sorted_keys.length; idx < len; idx++) {
var key = sorted_keys[idx];
xml += " " + key + "=\"" + encode_attrib_entities(node["_Attribs"][key]) + "\"";
}
} // has attribs
if (num_keys > has_attribs) {
// has child elements
xml += ">";
if (node["_Data"]) {
// simple text child node
xml += encode_entities(node["_Data"]) + "</" + name + ">\n";
} // just text
else {
xml += "\n";
var sorted_keys = hash_keys_to_array(node).sort();
for (var idx = 0, len = sorted_keys.length; idx < len; idx++) {
var key = sorted_keys[idx];
if ((key != "_Attribs") && key.match(re_valid_tag_name)) {
// recurse for node, with incremented indent value
xml += compose_xml( node[key], key, indent + 1 );
} // not _Attribs key
} // foreach key
xml += indent_text + "</" + name + ">\n";
} // real children
}
else {
// no child elements, so self-close
xml += "/>\n";
}
} // standard node
else {
// node is array
for (var idx = 0; idx < node.length; idx++) {
// recurse for node in array with same indent
xml += compose_xml( node[idx], name, indent );
}
} // array of nodes
} // complex node
else {
// node is simple string
xml += indent_text + "<" + name + ">" + encode_entities(node) + "</" + name + ">\n";
} // simple text node
return xml;
};
var always_array = exports.alwaysArray = function always_array(obj, key) {
// if object is not array, return array containing object
// if key is passed, work like XMLalwaysarray() instead
if (key) {
if ((typeof(obj[key]) != 'object') || (typeof(obj[key].length) == 'undefined')) {
var temp = obj[key];
delete obj[key];
obj[key] = new Array();
obj[key][0] = temp;
}
return null;
}
else {
if ((typeof(obj) != 'object') || (typeof(obj.length) == 'undefined')) { return [ obj ]; }
else return obj;
}
};
var hash_keys_to_array = exports.hashKeysToArray = function hash_keys_to_array(hash) {
// convert hash keys to array (discard values)
var array = [];
for (var key in hash) array.push(key);
return array;
};
var isa_hash = exports.isaHash = function isa_hash(arg) {
// determine if arg is a hash
return( !!arg && (typeof(arg) == 'object') && (typeof(arg.length) == 'undefined') );
};
var isa_array = exports.isaArray = function isa_array(arg) {
// determine if arg is an array or is array-like
if (typeof(arg) == 'array') return true;
return( !!arg && (typeof(arg) == 'object') && (typeof(arg.length) != 'undefined') );
};
var first_key = exports.firstKey = function first_key(hash) {
// return first key from hash (unordered)
for (var key in hash) return key;
return null; // no keys in hash
};
var num_keys = exports.numKeys = function num_keys(hash) {
// count the number of keys in a hash
var count = 0;
for (var a in hash) count++;
return count;
};

View File

@ -1,13 +0,0 @@
# SPDX-License-Identifier: GPL-3.0-or-later
# THIS IS NOT A COMPLETE Makefile
# IT IS INCLUDED BY ITS PARENT'S Makefile.am
# IT IS REQUIRED TO REFERENCE ALL FILES RELATIVE TO THE PARENT
# install these files
dist_node_DATA += snmp/snmp.node.js
# dist_nodeconfig_DATA += snmp/snmp.conf
# do not install these files, but include them in the distribution
dist_noinst_DATA += snmp/README.md snmp/Makefile.inc

View File

@ -1,445 +0,0 @@
<!--
title: "SNMP device monitoring with Netdata"
custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/node.d.plugin/snmp/README.md
sidebar_label: "SNMP"
-->
# SNMP device monitoring with Netdata
Collects data from any SNMP device and uses the [net-snmp](https://github.com/markabrahams/node-net-snmp) module.
It supports:
- all SNMP versions: SNMPv1, SNMPv2c and SNMPv3
- any number of SNMP devices
- each SNMP device can be used to collect data for any number of charts
- each chart may have any number of dimensions
- each SNMP device may have a different update frequency
- each SNMP device will accept one or more batches to report values (you can set `max_request_size` per SNMP server, to control the size of batches).
## Requirements
- `nodejs` minimum required version 4
## Configuration
You will need to create the file `/etc/netdata/node.d/snmp.conf` with data like the following.
In this example:
- the SNMP device is `10.11.12.8`.
- the SNMP community is `public`.
- we will update the values every 10 seconds (`update_every: 10` under the server `10.11.12.8`).
- we define 2 charts `snmp_switch.bandwidth_port1` and `snmp_switch.bandwidth_port2`, each having 2 dimensions: `in` and `out`. Note that the charts and dimensions must not contain any white space or special characters, other than `.` and `_`.
```json
{
"enable_autodetect": false,
"update_every": 5,
"max_request_size": 100,
"servers": [
{
"hostname": "10.11.12.8",
"community": "public",
"update_every": 10,
"max_request_size": 50,
"options": {
"timeout": 10000
},
"charts": {
"snmp_switch.bandwidth_port1": {
"title": "Switch Bandwidth for port 1",
"units": "kilobits/s",
"type": "area",
"priority": 1,
"family": "ports",
"dimensions": {
"in": {
"oid": "1.3.6.1.2.1.2.2.1.10.1",
"algorithm": "incremental",
"multiplier": 8,
"divisor": 1024,
"offset": 0
},
"out": {
"oid": "1.3.6.1.2.1.2.2.1.16.1",
"algorithm": "incremental",
"multiplier": -8,
"divisor": 1024,
"offset": 0
}
}
},
"snmp_switch.bandwidth_port2": {
"title": "Switch Bandwidth for port 2",
"units": "kilobits/s",
"type": "area",
"priority": 1,
"family": "ports",
"dimensions": {
"in": {
"oid": "1.3.6.1.2.1.2.2.1.10.2",
"algorithm": "incremental",
"multiplier": 8,
"divisor": 1024,
"offset": 0
},
"out": {
"oid": "1.3.6.1.2.1.2.2.1.16.2",
"algorithm": "incremental",
"multiplier": -8,
"divisor": 1024,
"offset": 0
}
}
}
}
}
]
}
```
`update_every` is the update frequency for each server, in seconds.
`max_request_size` limits the maximum number of OIDs that will be requested in a single call. The default is 50. Lower this number of you get `TooBig` errors in Netdata's `error.log`.
`family` sets the name of the submenu of the dashboard each chart will appear under.
`multiplier` and `divisor` are passed by the plugin to the Netdata daemon and are applied to the metric to convert it properly to `units`. For incremental counters with the exception of Counter64 type metrics, `offset` is added to the metric from within the SNMP plugin. This means that the value you will see in debug mode in the `DEBUG: setting current chart to... SET` line for a metric will not have been multiplied or divided, but it will have had the offset added to it.
<details markdown="1"><summary><b>Caution: Counter64 metrics do not support `offset` (issue #5028).</b></summary>
The SNMP plugin supports Counter64 metrics with the only limitation that the `offset` parameter should not be defined. Due to the way Javascript handles large numbers and the fact that the offset is applied to metrics inside the plugin, the offset will be ignored silently.
</details>
If you need to define many charts using incremental OIDs, you can use something like this:
```json
{
"enable_autodetect": false,
"update_every": 10,
"servers": [
{
"hostname": "10.11.12.8",
"community": "public",
"update_every": 10,
"options": {
"timeout": 20000
},
"charts": {
"snmp_switch.bandwidth_port": {
"title": "Switch Bandwidth for port ",
"units": "kilobits/s",
"type": "area",
"priority": 1,
"family": "ports",
"multiply_range": [
1,
24
],
"dimensions": {
"in": {
"oid": "1.3.6.1.2.1.2.2.1.10.",
"algorithm": "incremental",
"multiplier": 8,
"divisor": 1024,
"offset": 0
},
"out": {
"oid": "1.3.6.1.2.1.2.2.1.16.",
"algorithm": "incremental",
"multiplier": -8,
"divisor": 1024,
"offset": 0
}
}
}
}
}
]
}
```
This is like the previous, but the option `multiply_range` given, will multiply the current chart from `1` to `24` inclusive, producing 24 charts in total for the 24 ports of the switch `10.11.12.8`.
Each of the 24 new charts will have its id (1-24) appended at:
1. its chart unique id, i.e. `snmp_switch.bandwidth_port1` to `snmp_switch.bandwidth_port24`
2. its `title`, i.e. `Switch Bandwidth for port 1` to `Switch Bandwidth for port 24`
3. its `oid` (for all dimensions), i.e. dimension `in` will be `1.3.6.1.2.1.2.2.1.10.1` to `1.3.6.1.2.1.2.2.1.10.24`
4. its priority (which will be incremented for each chart so that the charts will appear on the dashboard in this order)
The `options` given for each server, are:
- `port` - UDP port to send requests too. Defaults to `161`.
- `retries` - number of times to re-send a request. Defaults to `1`.
- `sourceAddress` - IP address from which SNMP requests should originate, there is no default for this option, the operating system will select an appropriate source address when the SNMP request is sent.
- `sourcePort` - UDP port from which SNMP requests should originate, defaults to an ephemeral port selected by the operation system.
- `timeout` - number of milliseconds to wait for a response before re-trying or failing. Defaults to `5000`.
- `transport` - specify the transport to use, can be either `udp4` or `udp6`. Defaults to `udp4`.
- `version` - either `0` (v1) or `1` (v2) or `3` (v3). Defaults to `0`.
- `idBitsSize` - either `16` or `32`. Defaults to `32`. Used to reduce the size of the generated id for compatibility with some older devices.
## SNMPv3
To use SNMPv3:
- use `user` instead of `community`
- set `version` to 3
User syntax:
```json
{
"enable_autodetect": false,
"update_every": 10,
"servers": [
{
"hostname": "10.11.12.8",
"user": {
"name": "userName",
"level": 3,
"authProtocol": "3",
"authKey": "authKey",
"privProtocol": "2",
"privKey": "privKey"
},
"update_every": 10,
"options": {
"version": 3
},
"charts": {
}
}
]
}
```
Security levels (`level`):
- 1 is `noAuthNoPriv`
- 2 is `authNoPriv`
- 3 is `authPriv`
Authentication protocols (`authProtocol`):
- "1" is `none`
- "2" is `md5`
- "3" is `sha`
Privacy protocols (`privProtocol`):
- "1" is `none`
- "2" is `des`
For additional details please see [net-snmp module readme](https://github.com/markabrahams/node-net-snmp#snmpcreatev3session-target-user-options).
## Retrieving names from snmp
You can append a value retrieved from SNMP to the title, by adding `titleoid` to the chart.
You can set a dimension name to a value retrieved from SNMP, by adding `oidname` to the dimension.
Both of the above will participate in `multiply_range`.
## Testing the configuration
To test it, you can run:
```sh
/usr/libexec/netdata/plugins.d/node.d.plugin 1 snmp
```
The above will run it on your console and you will be able to see what Netdata sees, but also errors. You can get a very detailed output by appending `debug` to the command line.
If it works, restart Netdata to activate the snmp collector and refresh the dashboard (if your SNMP device responds with a delay, you may need to refresh the dashboard in a few seconds).
## Data collection speed
Keep in mind that many SNMP switches and routers are very slow. They may not be able to report values per second. If you run `node.d.plugin` in `debug` mode, it will report the time it took for the SNMP device to respond. My switch, for example, needs 7-8 seconds to respond for the traffic on 24 ports (48 OIDs, in/out).
Also, if you use many SNMP clients on the same SNMP device at the same time, values may be skipped. This is a problem of the SNMP device, not this collector.
## Finding OIDs
Use `snmpwalk`, like this:
```sh
snmpwalk -t 20 -v 1 -O fn -c public 10.11.12.8
```
- `-t 20` is the timeout in seconds
- `-v 1` is the SNMP version
- `-O fn` will display full OIDs in numeric format (you may want to run it also without this option to see human readable output of OIDs)
- `-c public` is the SNMP community
- `10.11.12.8` is the SNMP device
Keep in mind that `snmpwalk` outputs the OIDs with a dot in front them. You should remove this dot when adding OIDs to the configuration file of this collector.
## Example: Linksys SRW2024P
This is what I use for my Linksys SRW2024P. It creates:
1. A chart for power consumption (it is a PoE switch)
2. Two charts for packets received (total packets received and packets received with errors)
3. One chart for packets output
4. 24 charts, one for each port of the switch. It also appends the port names, as defined at the switch, to the chart titles.
This switch also reports various other metrics, like snmp, packets per port, etc. Unfortunately it does not report CPU utilization or backplane utilization.
This switch has a very slow SNMP processors. To respond, it needs about 8 seconds, so I have set the refresh frequency (`update_every`) to 15 seconds.
```json
{
"enable_autodetect": false,
"update_every": 5,
"servers": [
{
"hostname": "10.11.12.8",
"community": "public",
"update_every": 15,
"options": {
"timeout": 20000,
"version": 1
},
"charts": {
"snmp_switch.power": {
"title": "Switch Power Supply",
"units": "watts",
"type": "line",
"priority": 10,
"family": "power",
"dimensions": {
"supply": {
"oid": ".1.3.6.1.2.1.105.1.3.1.1.2.1",
"algorithm": "absolute",
"multiplier": 1,
"divisor": 1,
"offset": 0
},
"used": {
"oid": ".1.3.6.1.2.1.105.1.3.1.1.4.1",
"algorithm": "absolute",
"multiplier": 1,
"divisor": 1,
"offset": 0
}
}
},
"snmp_switch.input": {
"title": "Switch Packets Input",
"units": "packets/s",
"type": "area",
"priority": 20,
"family": "IP",
"dimensions": {
"receives": {
"oid": ".1.3.6.1.2.1.4.3.0",
"algorithm": "incremental",
"multiplier": 1,
"divisor": 1,
"offset": 0
},
"discards": {
"oid": ".1.3.6.1.2.1.4.8.0",
"algorithm": "incremental",
"multiplier": 1,
"divisor": 1,
"offset": 0
}
}
},
"snmp_switch.input_errors": {
"title": "Switch Received Packets with Errors",
"units": "packets/s",
"type": "line",
"priority": 30,
"family": "IP",
"dimensions": {
"bad_header": {
"oid": ".1.3.6.1.2.1.4.4.0",
"algorithm": "incremental",
"multiplier": 1,
"divisor": 1,
"offset": 0
},
"bad_address": {
"oid": ".1.3.6.1.2.1.4.5.0",
"algorithm": "incremental",
"multiplier": 1,
"divisor": 1,
"offset": 0
},
"unknown_protocol": {
"oid": ".1.3.6.1.2.1.4.7.0",
"algorithm": "incremental",
"multiplier": 1,
"divisor": 1,
"offset": 0
}
}
},
"snmp_switch.output": {
"title": "Switch Output Packets",
"units": "packets/s",
"type": "line",
"priority": 40,
"family": "IP",
"dimensions": {
"requests": {
"oid": ".1.3.6.1.2.1.4.10.0",
"algorithm": "incremental",
"multiplier": 1,
"divisor": 1,
"offset": 0
},
"discards": {
"oid": ".1.3.6.1.2.1.4.11.0",
"algorithm": "incremental",
"multiplier": -1,
"divisor": 1,
"offset": 0
},
"no_route": {
"oid": ".1.3.6.1.2.1.4.12.0",
"algorithm": "incremental",
"multiplier": -1,
"divisor": 1,
"offset": 0
}
}
},
"snmp_switch.bandwidth_port": {
"title": "Switch Bandwidth for port ",
"titleoid": ".1.3.6.1.2.1.31.1.1.1.18.",
"units": "kilobits/s",
"type": "area",
"priority": 100,
"family": "ports",
"multiply_range": [
1,
24
],
"dimensions": {
"in": {
"oid": ".1.3.6.1.2.1.2.2.1.10.",
"algorithm": "incremental",
"multiplier": 8,
"divisor": 1024,
"offset": 0
},
"out": {
"oid": ".1.3.6.1.2.1.2.2.1.16.",
"algorithm": "incremental",
"multiplier": -8,
"divisor": 1024,
"offset": 0
}
}
}
}
}
]
}
```

View File

@ -1,527 +0,0 @@
'use strict';
// SPDX-License-Identifier: GPL-3.0-or-later
// netdata snmp module
// This program will connect to one or more SNMP Agents
//
// example configuration in /etc/netdata/node.d/snmp.conf
/*
{
"enable_autodetect": false,
"update_every": 5,
"max_request_size": 50,
"servers": [
{
"hostname": "10.11.12.8",
"community": "public",
"update_every": 10,
"max_request_size": 50,
"options": { "timeout": 10000 },
"charts": {
"snmp_switch.bandwidth_port1": {
"title": "Switch Bandwidth for port 1",
"units": "kilobits/s",
"type": "area",
"priority": 1,
"dimensions": {
"in": {
"oid": ".1.3.6.1.2.1.2.2.1.10.1",
"algorithm": "incremental",
"multiplier": 8,
"divisor": 1024,
"offset": 0
},
"out": {
"oid": ".1.3.6.1.2.1.2.2.1.16.1",
"algorithm": "incremental",
"multiplier": -8,
"divisor": 1024,
"offset": 0
}
}
},
"snmp_switch.bandwidth_port2": {
"title": "Switch Bandwidth for port 2",
"units": "kilobits/s",
"type": "area",
"priority": 1,
"dimensions": {
"in": {
"oid": ".1.3.6.1.2.1.2.2.1.10.2",
"algorithm": "incremental",
"multiplier": 8,
"divisor": 1024,
"offset": 0
},
"out": {
"oid": ".1.3.6.1.2.1.2.2.1.16.2",
"algorithm": "incremental",
"multiplier": -8,
"divisor": 1024,
"offset": 0
}
}
}
}
}
]
}
*/
// You can also give ranges of charts like the following.
// This will append 1-24 to id, title, oid (on each dimension)
// so that 24 charts will be created.
/*
{
"enable_autodetect": false,
"update_every": 10,
"max_request_size": 50,
"servers": [
{
"hostname": "10.11.12.8",
"community": "public",
"update_every": 10,
"max_request_size": 50,
"options": { "timeout": 20000 },
"charts": {
"snmp_switch.bandwidth_port": {
"title": "Switch Bandwidth for port ",
"units": "kilobits/s",
"type": "area",
"priority": 1,
"multiply_range": [ 1, 24 ],
"dimensions": {
"in": {
"oid": ".1.3.6.1.2.1.2.2.1.10.",
"algorithm": "incremental",
"multiplier": 8,
"divisor": 1024,
"offset": 0
},
"out": {
"oid": ".1.3.6.1.2.1.2.2.1.16.",
"algorithm": "incremental",
"multiplier": -8,
"divisor": 1024,
"offset": 0
}
}
}
}
}
]
}
*/
var net_snmp = require('net-snmp');
var extend = require('extend');
var netdata = require('netdata');
if (netdata.options.DEBUG === true) netdata.debug('loaded', __filename, ' plugin');
netdata.processors.snmp = {
name: 'snmp',
fixoid: function (oid) {
if (typeof oid !== 'string')
return oid;
if (oid.charAt(0) === '.')
return oid.substring(1, oid.length);
return oid;
},
prepare: function (service) {
var __DEBUG = netdata.options.DEBUG;
if (typeof service.snmp_oids === 'undefined' || service.snmp_oids === null || service.snmp_oids.length === 0) {
// this is the first time we see this service
if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': preparing ' + this.name + ' OIDs');
// build an index of all OIDs
service.snmp_oids_index = {};
var chart_keys = Object.keys(service.request.charts);
var chart_keys_len = chart_keys.length;
while (chart_keys_len--) {
var c = chart_keys[chart_keys_len];
var chart = service.request.charts[c];
// for each chart
if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': indexing ' + this.name + ' chart: ' + c);
if (typeof chart.titleoid !== 'undefined') {
service.snmp_oids_index[this.fixoid(chart.titleoid)] = {
type: 'title',
link: chart
};
}
var dim_keys = Object.keys(chart.dimensions);
var dim_keys_len = dim_keys.length;
while (dim_keys_len--) {
var d = dim_keys[dim_keys_len];
var dim = chart.dimensions[d];
// for each dimension in the chart
var oid = this.fixoid(dim.oid);
var oidname = this.fixoid(dim.oidname);
if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': indexing ' + this.name + ' chart: ' + c + ', dimension: ' + d + ', OID: ' + oid + ", OID name: " + oidname);
// link it to the point we need to set the value to
service.snmp_oids_index[oid] = {
type: 'value',
link: dim
};
if (typeof oidname !== 'undefined')
service.snmp_oids_index[oidname] = {
type: 'name',
link: dim
};
// and set the value to null
dim.value = null;
}
}
if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': indexed ' + this.name + ' OIDs: ' + netdata.stringify(service.snmp_oids_index));
// now create the array of OIDs needed by net-snmp
service.snmp_oids = Object.keys(service.snmp_oids_index);
if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': final list of ' + this.name + ' OIDs: ' + netdata.stringify(service.snmp_oids));
service.snmp_oids_cleaned = 0;
} else if (service.snmp_oids_cleaned === 0) {
service.snmp_oids_cleaned = 1;
// the second time, keep only values
service.snmp_oids = new Array();
var oid_keys = Object.keys(service.snmp_oids_index);
var oid_keys_len = oid_keys.length;
while (oid_keys_len--) {
if (service.snmp_oids_index[oid_keys[oid_keys_len]].type === 'value')
service.snmp_oids.push(oid_keys[oid_keys_len]);
}
}
},
getdata: function (service, index, ok, failed, callback) {
var __DEBUG = netdata.options.DEBUG;
var that = this;
if (index >= service.snmp_oids.length) {
callback((ok > 0) ? {ok: ok, failed: failed} : null);
return;
}
var slice;
if (service.snmp_oids.length <= service.request.max_request_size) {
slice = service.snmp_oids;
index = service.snmp_oids.length;
} else if (service.snmp_oids.length - index <= service.request.max_request_size) {
slice = service.snmp_oids.slice(index, service.snmp_oids.length);
index = service.snmp_oids.length;
} else {
slice = service.snmp_oids.slice(index, index + service.request.max_request_size);
index += service.request.max_request_size;
}
if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': making ' + slice.length + ' entries request, max is: ' + service.request.max_request_size);
service.snmp_session.get(slice, function (error, varbinds) {
if (error) {
service.error('Received error = ' + netdata.stringify(error) + ' varbinds = ' + netdata.stringify(varbinds));
// make all values null
var len = slice.length;
while (len--)
service.snmp_oids_index[slice[len]].value = null;
} else {
if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': got valid ' + service.module.name + ' response: ' + netdata.stringify(varbinds));
var varbinds_len = varbinds.length;
for (var i = 0; i < varbinds_len; i++) {
var value = null;
if (net_snmp.isVarbindError(varbinds[i])) {
if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': failed ' + service.module.name + ' get for OIDs ' + varbinds[i].oid);
service.error('OID ' + varbinds[i].oid + ' gave error: ' + net_snmp.varbindError(varbinds[i]));
value = null;
failed++;
} else {
// test fom Counter64
// varbinds[i].type = net_snmp.ObjectType.Counter64;
// varbinds[i].value = new Buffer([0x34, 0x49, 0x2e, 0xdc, 0xd1]);
switch (varbinds[i].type) {
case net_snmp.ObjectType.OctetString:
if (service.snmp_oids_index[varbinds[i].oid].type !== 'title' && service.snmp_oids_index[varbinds[i].oid].type !== 'name') {
// parse floating point values, exposed as strings
value = parseFloat(varbinds[i].value) * 1000;
if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof (varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as float in string)");
} else {
// just use the string
value = varbinds[i].value;
if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof (varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as string)");
}
break;
case net_snmp.ObjectType.Counter64:
// copy the buffer
value = '0x' + varbinds[i].value.toString('hex');
if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof (varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as buffer)");
break;
case net_snmp.ObjectType.Integer:
case net_snmp.ObjectType.Counter:
case net_snmp.ObjectType.Gauge:
default:
value = varbinds[i].value;
if (__DEBUG === true) netdata.debug(service.module.name + ': ' + service.name + ': found ' + service.module.name + ' value of OIDs ' + varbinds[i].oid + ", ObjectType " + net_snmp.ObjectType[varbinds[i].type] + " (" + netdata.stringify(varbinds[i].type) + "), typeof(" + typeof (varbinds[i].value) + "), in JSON: " + netdata.stringify(varbinds[i].value) + ", value = " + value.toString() + " (parsed as number)");
break;
}
ok++;
}
if (value !== null) {
switch (service.snmp_oids_index[varbinds[i].oid].type) {
case 'title':
service.snmp_oids_index[varbinds[i].oid].link.title += ' ' + value;
break;
case 'name' :
service.snmp_oids_index[varbinds[i].oid].link.name = value.toString().replace(/\W/g, '_');
break;
case 'value':
service.snmp_oids_index[varbinds[i].oid].link.value = value;
break;
}
}
}
if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': finished ' + service.module.name + ' with ' + ok + ' successful and ' + failed + ' failed values');
}
that.getdata(service, index, ok, failed, callback);
});
},
process: function (service, callback) {
var __DEBUG = netdata.options.DEBUG;
this.prepare(service);
if (service.snmp_oids.length === 0) {
// no OIDs found for this service
if (__DEBUG === true)
service.error('no OIDs to process.');
callback(null);
return;
}
if (typeof service.snmp_session === 'undefined' || service.snmp_session === null) {
// no SNMP session has been created for this service
// the SNMP session is just the initialization of NET-SNMP
var snmp_version = (service.request.options && service.request.options.version)
? service.request.options.version
: net_snmp.Version1;
if (snmp_version === net_snmp.Version3) {
if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': opening ' + this.name + ' session on ' + service.request.hostname + ' user ' + service.request.user + ' options ' + netdata.stringify(service.request.options));
// create the SNMP session
service.snmp_session = net_snmp.createV3Session(service.request.hostname, service.request.user, service.request.options);
} else {
if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': opening ' + this.name + ' session on ' + service.request.hostname + ' community ' + service.request.community + ' options ' + netdata.stringify(service.request.options));
// create the SNMP session
service.snmp_session = net_snmp.createSession(service.request.hostname, service.request.community, service.request.options);
}
if (__DEBUG === true)
netdata.debug(service.module.name + ': ' + service.name + ': got ' + this.name + ' session: ' + netdata.stringify(service.snmp_session));
// if we later need traps, this is how to do it:
//service.snmp_session.trap(net_snmp.TrapType.LinkDown, function(error) {
// if(error) console.error('trap error: ' + netdata.stringify(error));
//});
}
// do it, get the SNMP values for the sessions we need
this.getdata(service, 0, 0, 0, callback);
}
};
var snmp = {
name: __filename,
enable_autodetect: true,
update_every: 1,
base_priority: 50000,
charts: {},
processResponse: function (service, data) {
if (data !== null) {
if (service.added !== true)
service.commit();
var chart_keys = Object.keys(service.request.charts);
var chart_keys_len = chart_keys.length;
for (var i = 0; i < chart_keys_len; i++) {
var c = chart_keys[i];
var chart = snmp.charts[c];
if (typeof chart === 'undefined') {
chart = service.chart(c, service.request.charts[c]);
snmp.charts[c] = chart;
}
service.begin(chart);
var dimensions = service.request.charts[c].dimensions;
var dim_keys = Object.keys(dimensions);
var dim_keys_len = dim_keys.length;
for (var j = 0; j < dim_keys_len; j++) {
var d = dim_keys[j];
if (dimensions[d].value !== null) {
if (typeof dimensions[d].offset === 'number' && typeof dimensions[d].value === 'number')
service.set(d, dimensions[d].value + dimensions[d].offset);
else
service.set(d, dimensions[d].value);
}
}
service.end();
}
}
},
// module.serviceExecute()
// this function is called only from this module
// its purpose is to prepare the request and call
// netdata.serviceExecute()
serviceExecute: function (conf) {
var __DEBUG = netdata.options.DEBUG;
if (__DEBUG === true)
netdata.debug(this.name + ': snmp hostname: ' + conf.hostname + ', update_every: ' + conf.update_every);
var service = netdata.service({
name: conf.hostname,
request: conf,
update_every: conf.update_every,
module: this,
processor: netdata.processors.snmp
});
// multiply the charts, if required
var chart_keys = Object.keys(service.request.charts);
var chart_keys_len = chart_keys.length;
for (var i = 0; i < chart_keys_len; i++) {
var c = chart_keys[i];
var service_request_chart = service.request.charts[c];
if (__DEBUG === true)
netdata.debug(this.name + ': snmp hostname: ' + conf.hostname + ', examining chart: ' + c);
if (typeof service_request_chart.update_every === 'undefined')
service_request_chart.update_every = service.update_every;
if (typeof service_request_chart.multiply_range !== 'undefined') {
var from = service_request_chart.multiply_range[0];
var to = service_request_chart.multiply_range[1];
var prio = service_request_chart.priority || 1;
if (prio < snmp.base_priority) prio += snmp.base_priority;
while (from <= to) {
var id = c + from.toString();
var chart = extend(true, {}, service_request_chart);
chart.title += from.toString();
if (typeof chart.titleoid !== 'undefined')
chart.titleoid += from.toString();
chart.priority = prio++;
var dim_keys = Object.keys(chart.dimensions);
var dim_keys_len = dim_keys.length;
for (var j = 0; j < dim_keys_len; j++) {
var d = dim_keys[j];
chart.dimensions[d].oid += from.toString();
if (typeof chart.dimensions[d].oidname !== 'undefined')
chart.dimensions[d].oidname += from.toString();
}
service.request.charts[id] = chart;
from++;
}
delete service.request.charts[c];
} else {
if (service.request.charts[c].priority < snmp.base_priority)
service.request.charts[c].priority += snmp.base_priority;
}
}
service.execute(this.processResponse);
},
configure: function (config) {
var added = 0;
if (typeof config.max_request_size === 'undefined')
config.max_request_size = 50;
if (typeof (config.servers) !== 'undefined') {
var len = config.servers.length;
while (len--) {
if (typeof config.servers[len].update_every === 'undefined')
config.servers[len].update_every = this.update_every;
if (typeof config.servers[len].max_request_size === 'undefined')
config.servers[len].max_request_size = config.max_request_size;
this.serviceExecute(config.servers[len]);
added++;
}
}
return added;
},
// module.update()
// this is called repeatedly to collect data, by calling
// service.execute()
update: function (service, callback) {
service.execute(function (serv, data) {
service.module.processResponse(serv, data);
callback();
});
}
};
module.exports = snmp;

View File

@ -21,7 +21,6 @@ from external processes, thus allowing Netdata to use **external plugins**.
|[nfacct.plugin](/collectors/nfacct.plugin/README.md)|`C`|linux|collects netfilter firewall, connection tracker and accounting metrics using `libmnl` and `libnetfilter_acct`.|
|[xenstat.plugin](/collectors/xenstat.plugin/README.md)|`C`|linux|collects XenServer and XCP-ng metrics using `lxenstat`.|
|[perf.plugin](/collectors/perf.plugin/README.md)|`C`|linux|collects CPU performance metrics using performance monitoring units (PMU).|
|[node.d.plugin](/collectors/node.d.plugin/README.md)|`node.js`|all|a **plugin orchestrator** for data collection modules written in `node.js`.|
|[python.d.plugin](/collectors/python.d.plugin/README.md)|`python`|all|a **plugin orchestrator** for data collection modules written in `python` v2 or v3 (both are supported).|
|[slabinfo.plugin](/collectors/slabinfo.plugin/README.md)|`C`|linux|collects kernel internal cache objects (SLAB) metrics.|
@ -74,7 +73,6 @@ Example:
# charts.d = yes
# fping = yes
# ioping = yes
# node.d = yes
# python.d = yes
```
@ -391,17 +389,12 @@ or do not output the line at all.
python is ideal for Netdata plugins. It is a simple, yet powerful way to collect data, it has a very small memory footprint, although it is not the most CPU efficient way to do it.
2. **node.js**, use `node.d.plugin`, there are a few examples in the [node.d
directory](/collectors/node.d.plugin/README.md)
node.js is the fastest scripting language for collecting data. If your plugin needs to do a lot of work, compute values, etc, node.js is probably the best choice before moving to compiled code. Keep in mind though that node.js is not memory efficient; it will probably need more RAM compared to python.
3. **BASH**, use `charts.d.plugin`, there are many examples in the [charts.d
2. **BASH**, use `charts.d.plugin`, there are many examples in the [charts.d
directory](/collectors/charts.d.plugin/README.md)
BASH is the simplest scripting language for collecting values. It is the less efficient though in terms of CPU resources. You can use it to collect data quickly, but extensive use of it might use a lot of system resources.
4. **C**
3. **C**
Of course, C is the most efficient way of collecting data. This is why Netdata itself is written in C.

View File

@ -1580,7 +1580,6 @@ varlibdir="${localstatedir}/lib/netdata"
registrydir="${localstatedir}/lib/netdata/registry"
cachedir="${localstatedir}/cache/netdata"
chartsdir="${libexecdir}/netdata/charts.d"
nodedir="${libexecdir}/netdata/node.d"
pythondir="${libexecdir}/netdata/python.d"
configdir="${sysconfdir}/netdata"
libconfigdir="${libdir}/netdata/conf.d"
@ -1591,7 +1590,6 @@ AC_SUBST([varlibdir])
AC_SUBST([registrydir])
AC_SUBST([cachedir])
AC_SUBST([chartsdir])
AC_SUBST([nodedir])
AC_SUBST([pythondir])
AC_SUBST([configdir])
AC_SUBST([libconfigdir])
@ -1716,7 +1714,6 @@ AC_CONFIG_FILES([
collectors/idlejitter.plugin/Makefile
collectors/macos.plugin/Makefile
collectors/nfacct.plugin/Makefile
collectors/node.d.plugin/Makefile
collectors/plugins.d/Makefile
collectors/proc.plugin/Makefile
collectors/python.d.plugin/Makefile

View File

@ -41,7 +41,6 @@ Depends: adduser,
Conflicts: netdata-core,
netdata-plugins-bash,
netdata-plugins-python,
netdata-plugins-nodejs,
netdata-web
Pre-Depends: dpkg (>= 1.17.14)
Description: real-time charts for system monitoring

View File

@ -92,7 +92,7 @@ Refer to the [web server documentation](/web/server/README.md)
### [plugins] section options
In this section you will see be a boolean (`yes`/`no`) option for each plugin (e.g. tc, cgroups, apps, proc etc.). Note that the configuration options in this section for the orchestrator plugins `python.d`, `charts.d` and `node.d` control **all the modules** written for that orchestrator. For instance, setting `python.d = no` means that all Python modules under `collectors/python.d.plugin` will be disabled.
In this section you will see be a boolean (`yes`/`no`) option for each plugin (e.g. tc, cgroups, apps, proc etc.). Note that the configuration options in this section for the orchestrator plugins `python.d` and `charts.d` control **all the modules** written for that orchestrator. For instance, setting `python.d = no` means that all Python modules under `collectors/python.d.plugin` will be disabled.
Additionally, there will be the following options:

View File

@ -62,8 +62,6 @@ terms related to collecting metrics.
`python` v2/v3.
- [charts.d.plugin](/collectors/charts.d.plugin/README.md): An orchestrator for data collection modules written in
`bash` v4+.
- [node.d.plugin](/collectors/node.d.plugin/README.md): An orchestrator for data collection modules written in
`node.js`.
- **External plugins** gather metrics from external processes, such as a webserver or database, and run as independent
processes that communicate with the Netdata daemon via pipes.
- **Internal plugins** gather metrics from `/proc`, `/sys`, and other Linux kernel sources. They are written in `C`,

View File

@ -46,7 +46,7 @@ of 5 seconds.
```
Every collector and plugin has its own `update every` setting, which you can also change in the `go.d.conf`,
`python.d.conf`, `node.d.conf`, or `charts.d.conf` files, or in individual collector configuration files. If the `update
`python.d.conf` or `charts.d.conf` files, or in individual collector configuration files. If the `update
every` for an individual collector is less than the global, the Netdata Agent uses the global setting. See the [enable
or configure a collector](/docs/collect/enable-configure.md) doc for details.
@ -55,7 +55,7 @@ or configure a collector](/docs/collect/enable-configure.md) doc for details.
Turn off entire plugins in the [`[plugins]` section](/daemon/config/README.md#plugins-section-options) of
`netdata.conf`.
To disable specific collectors, open `go.d.conf`, `python.d.conf`, `node.d.conf`, or `charts.d.conf` and find the line
To disable specific collectors, open `go.d.conf`, `python.d.conf` or `charts.d.conf` and find the line
for that specific module. Uncomment the line and change its value to `no`.
## Modify alarms and notifications

View File

@ -72,7 +72,7 @@ seconds, respectively.
### Specific plugin or collector
Every collector and plugin has its own `update every` setting, which you can also change in the `go.d.conf`,
`python.d.conf`, `node.d.conf`, or `charts.d.conf` files, or in individual collector configuration files. If the `update
`python.d.conf`, or `charts.d.conf` files, or in individual collector configuration files. If the `update
every` for an individual collector is less than the global, the Netdata Agent uses the global setting. See the [enable
or configure a collector](/docs/collect/enable-configure.md) doc for details.
@ -103,15 +103,13 @@ Keep in mind that if a plugin/collector has nothing to do, it simply shuts down
You will only improve the Agent's performance by disabling plugins/collectors that are actively collecting metrics.
Open `netdata.conf` and scroll down to the `[plugins]` section. To disable any plugin, uncomment it and set the value to
`no`. For example, to explicitly keep the `proc` and `go.d` plugins enabled while disabling `python.d`, `charts.d`, and
`node.d`.
`no`. For example, to explicitly keep the `proc` and `go.d` plugins enabled while disabling `python.d` and `charts.d`.
```conf
[plugins]
proc = yes
python.d = no
charts.d = no
node.d = no
go.d = yes
```
@ -121,7 +119,6 @@ collector, and setting its value to `no`.
```bash
sudo ./edit-config go.d.conf
sudo ./edit-config python.d.conf
sudo ./edit-config node.d.conf
sudo ./edit-config charts.d.conf
```

View File

@ -39,7 +39,7 @@ they were built in.
These modules are primarily written in [Go](https://learn.netdata.cloud/docs/agent/collectors/go.d.plugin/) (`go.d`) and
[Python](/collectors/python.d.plugin/README.md), although some use [Bash](/collectors/charts.d.plugin/README.md)
(`charts.d`) or [Node.js](/collectors/node.d.plugin/README.md) (`node.d`).
(`charts.d`).
## Enable and disable plugins
@ -58,14 +58,14 @@ Enabled:
```conf
[plugins]
# node.d = yes
# python.d = yes
```
Disabled:
```conf
[plugins]
node.d = no
python.d = no
```
When you explicitly disable a plugin this way, it won't auto-collect metrics using its collectors.

View File

@ -1066,32 +1066,6 @@ fi
# -----------------------------------------------------------------------------
[ -n "${GITHUB_ACTIONS}" ] && echo "::group::Installing Netdata."
progress "Migrate configuration files for node.d.plugin and charts.d.plugin"
# migrate existing configuration files
# for node.d and charts.d
if [ -d "${NETDATA_PREFIX}/etc/netdata" ]; then
# the configuration directory exists
if [ ! -d "${NETDATA_PREFIX}/etc/netdata/charts.d" ]; then
run mkdir "${NETDATA_PREFIX}/etc/netdata/charts.d"
fi
# move the charts.d config files
for x in apache ap cpu_apps cpufreq example exim hddtemp load_average mem_apps mysql nginx nut opensips phpfpm postfix sensors squid tomcat; do
for y in "" ".old" ".orig"; do
if [ -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" ] && [ ! -f "${NETDATA_PREFIX}/etc/netdata/charts.d/${x}.conf${y}" ]; then
run mv -f "${NETDATA_PREFIX}/etc/netdata/${x}.conf${y}" "${NETDATA_PREFIX}/etc/netdata/charts.d/${x}.conf${y}"
fi
done
done
if [ ! -d "${NETDATA_PREFIX}/etc/netdata/node.d" ]; then
run mkdir "${NETDATA_PREFIX}/etc/netdata/node.d"
fi
fi
# -----------------------------------------------------------------------------
# shellcheck disable=SC2230

View File

@ -489,11 +489,9 @@ rm -rf "${RPM_BUILD_ROOT}"
%dir %{_libexecdir}/%{name}/python.d
%dir %{_libexecdir}/%{name}/charts.d
%dir %{_libexecdir}/%{name}/plugins.d
%dir %{_libexecdir}/%{name}/node.d
%{_libexecdir}/%{name}/python.d
%{_libexecdir}/%{name}/plugins.d
%{_libexecdir}/%{name}/node.d
%caps(cap_dac_read_search,cap_sys_ptrace=ep) %attr(0750,root,netdata) %{_libexecdir}/%{name}/plugins.d/apps.plugin

View File

@ -18,7 +18,6 @@ fi
# These options control which packages we are going to install
# They can be pre-set, but also can be controlled with command line options
PACKAGES_NETDATA=${PACKAGES_NETDATA-1}
PACKAGES_NETDATA_NODEJS=${PACKAGES_NETDATA_NODEJS-0}
PACKAGES_NETDATA_PYTHON=${PACKAGES_NETDATA_PYTHON-0}
PACKAGES_NETDATA_PYTHON3=${PACKAGES_NETDATA_PYTHON3-1}
PACKAGES_NETDATA_PYTHON_MYSQL=${PACKAGES_NETDATA_PYTHON_MYSQL-0}
@ -103,10 +102,7 @@ Supported packages (you can append many of them):
node.js, python, sensors, etc
- netdata minimum packages required to install netdata
(no mysql client, no nodejs, includes python)
- nodejs install nodejs
(required for monitoring named and SNMP)
(no mysql client, includes python)
- python install python
@ -930,20 +926,6 @@ declare -A pkg_nginx=(
['default']="nginx"
)
declare -A pkg_nodejs=(
['gentoo']="net-libs/nodejs"
['clearlinux']="nodejs-basic"
['freebsd']="node"
['default']="nodejs"
# exceptions
['rhel-6']="WARNING|To install nodejs check: https://nodejs.org/en/download/package-manager/"
['rhel-7']="WARNING|To install nodejs check: https://nodejs.org/en/download/package-manager/"
['centos-6']="WARNING|To install nodejs check: https://nodejs.org/en/download/package-manager/"
['debian-6']="WARNING|To install nodejs check: https://nodejs.org/en/download/package-manager/"
['debian-7']="WARNING|To install nodejs check: https://nodejs.org/en/download/package-manager/"
)
declare -A pkg_postfix=(
['gentoo']="mail-mta/postfix"
['macos']="WARNING|"
@ -1447,13 +1429,6 @@ packages() {
suitable_package libelf
fi
# -------------------------------------------------------------------------
# scripting interpreters for netdata plugins
if [ "${PACKAGES_NETDATA_NODEJS}" -ne 0 ]; then
require_cmd nodejs node js || suitable_package nodejs
fi
# -------------------------------------------------------------------------
# python2
@ -2001,7 +1976,7 @@ EOF
remote_log() {
# log success or failure on our system
# to help us solve installation issues
curl > /dev/null 2>&1 -Ss --max-time 3 "https://registry.my-netdata.io/log/installer?status=${1}&error=${2}&distribution=${distribution}&version=${version}&installer=${package_installer}&tree=${tree}&detection=${detection}&netdata=${PACKAGES_NETDATA}&nodejs=${PACKAGES_NETDATA_NODEJS}&python=${PACKAGES_NETDATA_PYTHON}&python3=${PACKAGES_NETDATA_PYTHON3}&mysql=${PACKAGES_NETDATA_PYTHON_MYSQL}&postgres=${PACKAGES_NETDATA_PYTHON_POSTGRES}&pymongo=${PACKAGES_NETDATA_PYTHON_MONGO}&sensors=${PACKAGES_NETDATA_SENSORS}&database=${PACKAGES_NETDATA_DATABASE}&ebpf=${PACKAGES_NETDATA_EBPF}&firehol=${PACKAGES_FIREHOL}&fireqos=${PACKAGES_FIREQOS}&iprange=${PACKAGES_IPRANGE}&update_ipsets=${PACKAGES_UPDATE_IPSETS}&demo=${PACKAGES_NETDATA_DEMO_SITE}"
curl > /dev/null 2>&1 -Ss --max-time 3 "https://registry.my-netdata.io/log/installer?status=${1}&error=${2}&distribution=${distribution}&version=${version}&installer=${package_installer}&tree=${tree}&detection=${detection}&netdata=${PACKAGES_NETDATA}&python=${PACKAGES_NETDATA_PYTHON}&python3=${PACKAGES_NETDATA_PYTHON3}&mysql=${PACKAGES_NETDATA_PYTHON_MYSQL}&postgres=${PACKAGES_NETDATA_PYTHON_POSTGRES}&pymongo=${PACKAGES_NETDATA_PYTHON_MONGO}&sensors=${PACKAGES_NETDATA_SENSORS}&database=${PACKAGES_NETDATA_DATABASE}&ebpf=${PACKAGES_NETDATA_EBPF}&firehol=${PACKAGES_FIREHOL}&fireqos=${PACKAGES_FIREQOS}&iprange=${PACKAGES_IPRANGE}&update_ipsets=${PACKAGES_UPDATE_IPSETS}&demo=${PACKAGES_NETDATA_DEMO_SITE}"
}
if [ -z "${1}" ]; then
@ -2062,7 +2037,6 @@ while [ -n "${1}" ]; do
netdata-all)
PACKAGES_NETDATA=1
PACKAGES_NETDATA_NODEJS=1
if [ "${pv}" -eq 2 ]; then
PACKAGES_NETDATA_PYTHON=1
PACKAGES_NETDATA_PYTHON_MYSQL=1
@ -2124,12 +2098,6 @@ while [ -n "${1}" ]; do
fi
;;
nodejs | netdata-nodejs)
PACKAGES_NETDATA=1
PACKAGES_NETDATA_NODEJS=1
PACKAGES_NETDATA_DATABASE=1
;;
sensors | netdata-sensors)
PACKAGES_NETDATA=1
PACKAGES_NETDATA_PYTHON3=1
@ -2147,7 +2115,6 @@ while [ -n "${1}" ]; do
demo | all)
PACKAGES_NETDATA=1
PACKAGES_NETDATA_NODEJS=1
if [ "${pv}" -eq 2 ]; then
PACKAGES_NETDATA_PYTHON=1
PACKAGES_NETDATA_PYTHON_MYSQL=1

View File

@ -34,7 +34,6 @@
enable running new plugins = no
check for new plugins every = 60
go.d = no
node.d = no
charts.d = no
nfacct = no
python.d = no

View File

@ -34,7 +34,6 @@
enable running new plugins = no
check for new plugins every = 60
go.d = no
node.d = no
charts.d = no
nfacct = no
python.d = no

View File

@ -13,8 +13,7 @@ ENV="${INSTALLATION}/netdata/etc/netdata/.environment"
# list of files which need to be checked. Path cannot start from '/'
FILES="usr/libexec/netdata/plugins.d/go.d.plugin
usr/libexec/netdata/plugins.d/charts.d.plugin
usr/libexec/netdata/plugins.d/python.d.plugin
usr/libexec/netdata/plugins.d/node.d.plugin"
usr/libexec/netdata/plugins.d/python.d.plugin"
DIRS="usr/sbin/netdata
etc/netdata

View File

@ -13,8 +13,7 @@ ENV="${INSTALLATION}/netdata/etc/netdata/.environment"
# list of files which need to be checked. Path cannot start from '/'
FILES="usr/libexec/netdata/plugins.d/go.d.plugin
usr/libexec/netdata/plugins.d/charts.d.plugin
usr/libexec/netdata/plugins.d/python.d.plugin
usr/libexec/netdata/plugins.d/node.d.plugin"
usr/libexec/netdata/plugins.d/python.d.plugin"
DIRS="usr/sbin/netdata
etc/netdata