diff --git a/CMakeLists.txt b/CMakeLists.txt index 13a9f472c9..8f2ec31698 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -115,6 +115,7 @@ set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${LIBLZ4_INCLUDE_ #set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${JUDY_INCLUDE_DIRS}) set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} "-lJudy") set(CMAKE_REQUIRED_LIBRARIES "Judy") +include(CheckSymbolExists) check_symbol_exists("JudyLLast" "Judy.h" HAVE_JUDY) IF(HAVE_JUDY) message(STATUS "Judy library found") @@ -271,6 +272,15 @@ pkg_check_modules(SNAPPY snappy) # ${SNAPPY_INCLUDE_DIRS} +# ----------------------------------------------------------------------------- +# Detect libmongoc + +find_package(libmongoc-1.0) +# later we use: +# ${MONGOC_LIBRARIES} +# ${MONGOC_INCLUDE_DIRS} + + # ----------------------------------------------------------------------------- # netdata files @@ -594,6 +604,11 @@ set(PROMETHEUS_REMOTE_WRITE_BACKEND_FILES backends/prometheus/remote_write/remote_write.h ) +set(MONGODB_BACKEND_FILES + backends/mongodb/mongodb.c + backends/mongodb/mongodb.h + ) + set(DAEMON_FILES daemon/common.c daemon/common.h @@ -681,6 +696,19 @@ ELSE() message(STATUS "prometheus remote write backend: disabled (requires protobuf and snappy libraries)") ENDIF() +# ----------------------------------------------------------------------------- +# mongodb backend + +IF(libmongoc-1.0_FOUND) + message(STATUS "mongodb backend: enabled") + + list(APPEND NETDATA_FILES ${MONGODB_BACKEND_FILES}) + list(APPEND NETDATA_COMMON_LIBRARIES ${MONGOC_LIBRARIES}) + list(APPEND NETDATA_COMMON_INCLUDE_DIRS ${MONGOC_INCLUDE_DIRS}) +ELSE() + message(STATUS "mongodb backend: disabled (requires mongoc library)") +ENDIF() + # ----------------------------------------------------------------------------- # netdata diff --git a/Makefile.am b/Makefile.am index 7cff5ff559..fa12a757d2 100644 --- a/Makefile.am +++ b/Makefile.am @@ -458,6 +458,11 @@ PROMETHEUS_REMOTE_WRITE_BACKEND_FILES = \ backends/prometheus/remote_write/remote_write.proto \ $(NULL) +MONGODB_BACKEND_FILES = \ + backends/mongodb/mongodb.c \ + backends/mongodb/mongodb.h \ + $(NULL) + DAEMON_FILES = \ daemon/common.c \ daemon/common.h \ @@ -615,3 +620,8 @@ backends/prometheus/remote_write/remote_write.pb.h: backends/prometheus/remote_w $(PROTOC) --proto_path=$(srcdir) --cpp_out=$(builddir) $^ endif + +if ENABLE_BACKEND_MONGODB + netdata_SOURCES += $(MONGODB_BACKEND_FILES) + netdata_LDADD += $(OPTIONAL_MONGOC_LIBS) +endif diff --git a/README.md b/README.md index 0780b73d8b..7462f72d79 100644 --- a/README.md +++ b/README.md @@ -324,7 +324,7 @@ This is what you should expect from Netdata: - **Notifications**: [alerta.io](health/notifications/alerta/), [amazon sns](health/notifications/awssns/), [discordapp.com](health/notifications/discord/), [email](health/notifications/email/), [flock.com](health/notifications/flock/), [irc](health/notifications/irc/), [kavenegar.com](health/notifications/kavenegar/), [messagebird.com](health/notifications/messagebird/), [pagerduty.com](health/notifications/pagerduty/), [prowl](health/notifications/prowl/), [pushbullet.com](health/notifications/pushbullet/), [pushover.net](health/notifications/pushover/), [rocket.chat](health/notifications/rocketchat/), [slack.com](health/notifications/slack/), [smstools3](health/notifications/smstools3/), [syslog](health/notifications/syslog/), [telegram.org](health/notifications/telegram/), [twilio.com](health/notifications/twilio/), [web](health/notifications/web/) and [custom notifications](health/notifications/custom/). ### Integrations -- **time-series dbs** - can archive its metrics to **Graphite**, **OpenTSDB**, **Prometheus**, **AWS Kinesis**, **JSON document DBs**, in the same or lower resolution (lower: to prevent it from congesting these servers due to the amount of data collected). Netdata also supports **Prometheus remote write API** which allows storing metrics to **Elasticsearch**, **Gnocchi**, **InfluxDB**, **Kafka**, **PostgreSQL/TimescaleDB**, **Splunk**, **VictoriaMetrics** and a lot of other [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). +- **time-series dbs** - can archive its metrics to **Graphite**, **OpenTSDB**, **Prometheus**, **AWS Kinesis**, **MongoDB**, **JSON document DBs**, in the same or lower resolution (lower: to prevent it from congesting these servers due to the amount of data collected). Netdata also supports **Prometheus remote write API** which allows storing metrics to **Elasticsearch**, **Gnocchi**, **InfluxDB**, **Kafka**, **PostgreSQL/TimescaleDB**, **Splunk**, **VictoriaMetrics** and a lot of other [storage providers](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage). ## Visualization diff --git a/backends/Makefile.am b/backends/Makefile.am index 278f057a1d..ea0d77e09b 100644 --- a/backends/Makefile.am +++ b/backends/Makefile.am @@ -9,6 +9,7 @@ SUBDIRS = \ opentsdb \ prometheus \ aws_kinesis \ + mongodb \ $(NULL) dist_noinst_DATA = \ diff --git a/backends/README.md b/backends/README.md index 86f08325ff..10d457248f 100644 --- a/backends/README.md +++ b/backends/README.md @@ -44,6 +44,10 @@ X seconds (though, it can send them per second if you need it to). metrics are sent to the service in `JSON` format. + - **MongoDB** + + metrics are sent to the database in `JSON` format. + 2. Only one backend may be active at a time. 3. Netdata can filter metrics (at the chart level), to send only a subset of the collected metrics. @@ -78,7 +82,7 @@ of `netdata.conf` from your Netdata): ``` [backend] enabled = yes | no - type = graphite | opentsdb:telnet | opentsdb:http | opentsdb:https | prometheus_remote_write | json | kinesis + type = graphite | opentsdb:telnet | opentsdb:http | opentsdb:https | prometheus_remote_write | json | kinesis | mongodb host tags = list of TAG=VALUE destination = space separated list of [PROTOCOL:]HOST[:PORT] - the first working will be used, or a region for kinesis data source = average | sum | as collected @@ -94,7 +98,7 @@ of `netdata.conf` from your Netdata): - `enabled = yes | no`, enables or disables sending data to a backend -- `type = graphite | opentsdb:telnet | opentsdb:http | opentsdb:https | json | kinesis`, selects the backend type +- `type = graphite | opentsdb:telnet | opentsdb:http | opentsdb:https | json | kinesis | mongodb`, selects the backend type - `destination = host1 host2 host3 ...`, accepts **a space separated list** of hostnames, IPs (IPv4 and IPv6) and ports to connect to. @@ -132,6 +136,9 @@ of `netdata.conf` from your Netdata): For kinesis backend `destination` should be set to an AWS region (for example, `us-east-1`). + The MongoDB backend doesn't use the `destination` option for its configuration. It uses the `mongodb.conf` + [configuration file](mongodb/README.md) instead. + - `data source = as collected`, or `data source = average`, or `data source = sum`, selects the kind of data that will be sent to the backend. diff --git a/backends/aws_kinesis/aws_kinesis.c b/backends/aws_kinesis/aws_kinesis.c index d8b79364cc..b1ea478623 100644 --- a/backends/aws_kinesis/aws_kinesis.c +++ b/backends/aws_kinesis/aws_kinesis.c @@ -63,18 +63,11 @@ int read_kinesis_conf(const char *path, char **access_key_id_p, char **secret_ac continue; } - if(!value) value = ""; + if(!value) + value = ""; + else + value = strip_quotes(value); - // strip quotes - if(*value == '"' || *value == '\'') { - value++; - - s = value; - while(*s) s++; - if(s != value) s--; - - if(*s == '"' || *s == '\'') *s = '\0'; - } if(name[0] == 'a' && name[4] == 'a' && !strcmp(name, "aws_access_key_id")) { access_key_id = strdupz(value); } diff --git a/backends/backends.c b/backends/backends.c index 39ab677c0f..24f84b63c8 100644 --- a/backends/backends.c +++ b/backends/backends.c @@ -249,12 +249,11 @@ static void backends_main_cleanup(void *ptr) { /** * Set Kinesis variables * - * Set the variables necessaries to work with this specific backend. + * Set the variables necessary to work with this specific backend. * - * @param default_port the default port of the backend + * @param default_port the default port of the backend * @param brc function called to check the result. - * @param brf function called to format the msessage to the backend - * @param type the backend string selector. + * @param brf function called to format the message to the backend */ void backend_set_kinesis_variables(int *default_port, backend_response_checker_t brc, @@ -278,12 +277,11 @@ void backend_set_kinesis_variables(int *default_port, /** * Set Prometheus variables * - * Set the variables necessaries to work with this specific backend. + * Set the variables necessary to work with this specific backend. * - * @param default_port the default port of the backend + * @param default_port the default port of the backend * @param brc function called to check the result. - * @param brf function called to format the msessage to the backend - * @param type the backend string selector. + * @param brf function called to format the message to the backend */ void backend_set_prometheus_variables(int *default_port, backend_response_checker_t brc, @@ -300,15 +298,42 @@ void backend_set_prometheus_variables(int *default_port, #endif /* ENABLE_PROMETHEUS_REMOTE_WRITE */ } +/** + * Set MongoDB variables + * + * Set the variables necessary to work with this specific backend. + * + * @param default_port the default port of the backend + * @param brc function called to check the result. + * @param brf function called to format the message to the backend + */ +void backend_set_mongodb_variables(int *default_port, + backend_response_checker_t brc, + backend_request_formatter_t brf) +{ + (void)default_port; +#ifndef HAVE_MONGOC + (void)brc; + (void)brf; +#endif + +#if HAVE_MONGOC + *brc = process_json_response; + if(BACKEND_OPTIONS_DATA_SOURCE(global_backend_options) == BACKEND_SOURCE_DATA_AS_COLLECTED) + *brf = format_dimension_collected_json_plaintext; + else + *brf = format_dimension_stored_json_plaintext; +#endif +} + /** * Set JSON variables * - * Set the variables necessaries to work with this specific backend. + * Set the variables necessary to work with this specific backend. * - * @param default_port the default port of the backend + * @param default_port the default port of the backend * @param brc function called to check the result. - * @param brf function called to format the msessage to the backend - * @param type the backend string selector. + * @param brf function called to format the message to the backend */ void backend_set_json_variables(int *default_port, backend_response_checker_t brc, @@ -326,12 +351,11 @@ void backend_set_json_variables(int *default_port, /** * Set OpenTSDB HTTP variables * - * Set the variables necessaries to work with this specific backend. + * Set the variables necessary to work with this specific backend. * - * @param default_port the default port of the backend + * @param default_port the default port of the backend * @param brc function called to check the result. - * @param brf function called to format the msessage to the backend - * @param type the backend string selector. + * @param brf function called to format the message to the backend */ void backend_set_opentsdb_http_variables(int *default_port, backend_response_checker_t brc, @@ -350,12 +374,11 @@ void backend_set_opentsdb_http_variables(int *default_port, /** * Set OpenTSDB Telnet variables * - * Set the variables necessaries to work with this specific backend. + * Set the variables necessary to work with this specific backend. * - * @param default_port the default port of the backend + * @param default_port the default port of the backend * @param brc function called to check the result. - * @param brf function called to format the msessage to the backend - * @param type the backend string selector. + * @param brf function called to format the message to the backend */ void backend_set_opentsdb_telnet_variables(int *default_port, backend_response_checker_t brc, @@ -373,12 +396,11 @@ void backend_set_opentsdb_telnet_variables(int *default_port, /** * Set Graphite variables * - * Set the variables necessaries to work with this specific backend. + * Set the variables necessary to work with this specific backend. * - * @param default_port the default port of the backend + * @param default_port the default port of the backend * @param brc function called to check the result. - * @param brf function called to format the msessage to the backend - * @param type the backend string selector. + * @param brf function called to format the message to the backend */ void backend_set_graphite_variables(int *default_port, backend_response_checker_t brc, @@ -421,6 +443,9 @@ BACKEND_TYPE backend_select_type(const char *type) { else if (!strcmp(type, "kinesis") || !strcmp(type, "kinesis:plaintext")) { return BACKEND_TYPE_KINESIS; } + else if (!strcmp(type, "mongodb") || !strcmp(type, "mongodb:plaintext")) { + return BACKEND_TYPE_MONGODB; + } return BACKEND_TYPE_UNKNOWN; } @@ -450,7 +475,18 @@ void *backends_main(void *ptr) { #if ENABLE_PROMETHEUS_REMOTE_WRITE int do_prometheus_remote_write = 0; - BUFFER *http_request_header = buffer_create(1); + BUFFER *http_request_header = NULL; +#endif + +#if HAVE_MONGOC + int do_mongodb = 0; + char *mongodb_uri = NULL; + char *mongodb_database = NULL; + char *mongodb_collection = NULL; + + // set the default socket timeout in ms + int32_t mongodb_default_socket_timeout = (int32_t)(global_backend_update_every >= 2)?(global_backend_update_every * MSEC_PER_SEC - 500):1000; + #endif #ifdef ENABLE_HTTPS @@ -524,6 +560,7 @@ void *backends_main(void *ptr) { #if ENABLE_PROMETHEUS_REMOTE_WRITE do_prometheus_remote_write = 1; + http_request_header = buffer_create(1); init_write_request(); #else error("BACKEND: Prometheus remote write support isn't compiled"); @@ -547,6 +584,30 @@ void *backends_main(void *ptr) { backend_set_kinesis_variables(&default_port,&backend_response_checker,&backend_request_formatter); break; } + case BACKEND_TYPE_MONGODB: { +#if HAVE_MONGOC + if(unlikely(read_mongodb_conf(netdata_configured_user_config_dir, + &mongodb_uri, + &mongodb_database, + &mongodb_collection))) { + error("BACKEND: mongodb backend type is set but cannot read its configuration from %s/mongodb.conf", + netdata_configured_user_config_dir); + goto cleanup; + } + + if(likely(!mongodb_init(mongodb_uri, mongodb_database, mongodb_collection, mongodb_default_socket_timeout))) { + backend_set_mongodb_variables(&default_port, &backend_response_checker, &backend_request_formatter); + do_mongodb = 1; + } + else { + error("BACKEND: cannot initialize MongoDB backend"); + goto cleanup; + } +#else + error("BACKEND: MongoDB support isn't compiled"); +#endif // HAVE_MONGOC + break; + } case BACKEND_TYPE_GRAPHITE: { backend_set_graphite_variables(&default_port,&backend_response_checker,&backend_request_formatter); break; @@ -834,12 +895,53 @@ void *backends_main(void *ptr) { chart_sent_metrics = chart_buffered_metrics; buffer_flush(b); - } - else { -#else - { + } else #endif /* HAVE_KINESIS */ +#if HAVE_MONGOC + if(do_mongodb) { + size_t buffer_len = buffer_strlen(b); + size_t sent = 0; + + while(sent < buffer_len) { + const char *first_char = buffer_tostring(b); + + debug(D_BACKEND, "BACKEND: mongodb_insert(): uri = %s, database = %s, collection = %s, \ + buffer = %zu", mongodb_uri, mongodb_database, mongodb_collection, buffer_len); + + if(likely(!mongodb_insert((char *)first_char, (size_t)chart_buffered_metrics))) { + sent += buffer_len; + chart_transmission_successes++; + chart_receptions++; + } + else { + // oops! we couldn't send (all or some of the) data + error("BACKEND: failed to write data to database backend '%s'. Willing to write %zu bytes, wrote %zu bytes.", + mongodb_uri, buffer_len, 0UL); + + chart_transmission_failures++; + chart_data_lost_events++; + chart_lost_bytes += buffer_len; + + // estimate the number of lost metrics + chart_lost_metrics += (collected_number)chart_buffered_metrics; + + break; + } + + if(unlikely(netdata_exit)) break; + } + + chart_sent_bytes += sent; + if(likely(sent == buffer_len)) + chart_sent_metrics = chart_buffered_metrics; + + buffer_flush(b); + } else +#endif /* HAVE_MONGOC */ + + { + // ------------------------------------------------------------------------ // if we are connected, receive a response, without blocking @@ -1106,6 +1208,15 @@ cleanup: protocol_buffers_shutdown(); #endif +#if HAVE_MONGOC + if(do_mongodb) { + mongodb_cleanup(); + freez(mongodb_uri); + freez(mongodb_database); + freez(mongodb_collection); + } +#endif + if(sock != -1) close(sock); diff --git a/backends/backends.h b/backends/backends.h index 8d0bda413a..9da04ccee4 100644 --- a/backends/backends.h +++ b/backends/backends.h @@ -16,13 +16,14 @@ typedef enum backend_options { } BACKEND_OPTIONS; typedef enum backend_types { - BACKEND_TYPE_UNKNOWN, //Invalid type - BACKEND_TYPE_GRAPHITE, //Send plain text to Graphite - BACKEND_TYPE_OPENTSDB_USING_TELNET, //Send data to OpenTSDB using telnet API - BACKEND_TYPE_OPENTSDB_USING_HTTP, //Send data to OpenTSDB using HTTP API - BACKEND_TYPE_JSON, //Stores the data using JSON. - BACKEND_TYPE_PROMETEUS, //The user selected to use Prometheus backend - BACKEND_TYPE_KINESIS //Send message to AWS Kinesis + BACKEND_TYPE_UNKNOWN, // Invalid type + BACKEND_TYPE_GRAPHITE, // Send plain text to Graphite + BACKEND_TYPE_OPENTSDB_USING_TELNET, // Send data to OpenTSDB using telnet API + BACKEND_TYPE_OPENTSDB_USING_HTTP, // Send data to OpenTSDB using HTTP API + BACKEND_TYPE_JSON, // Stores the data using JSON. + BACKEND_TYPE_PROMETEUS, // The user selected to use Prometheus backend + BACKEND_TYPE_KINESIS, // Send message to AWS Kinesis + BACKEND_TYPE_MONGODB // Send data to MongoDB collection } BACKEND_TYPE; @@ -56,6 +57,22 @@ extern calculated_number backend_calculate_value_from_stored_data( extern size_t backend_name_copy(char *d, const char *s, size_t usable); extern int discard_response(BUFFER *b, const char *backend); +static inline char *strip_quotes(char *str) { + if(*str == '"' || *str == '\'') { + char *s; + + str++; + + s = str; + while(*s) s++; + if(s != str) s--; + + if(*s == '"' || *s == '\'') *s = '\0'; + } + + return str; +} + #endif // BACKENDS_INTERNALS #include "backends/prometheus/backend_prometheus.h" @@ -71,4 +88,8 @@ extern int discard_response(BUFFER *b, const char *backend); #include "backends/prometheus/remote_write/remote_write.h" #endif +#if HAVE_MONGOC +#include "backends/mongodb/mongodb.h" +#endif + #endif /* NETDATA_BACKENDS_H */ diff --git a/backends/mongodb/Makefile.am b/backends/mongodb/Makefile.am new file mode 100644 index 0000000000..89e14a8d04 --- /dev/null +++ b/backends/mongodb/Makefile.am @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-3.0-or-later + +AUTOMAKE_OPTIONS = subdir-objects +MAINTAINERCLEANFILES = $(srcdir)/Makefile.in + +dist_noinst_DATA = \ + README.md \ + $(NULL) + +dist_libconfig_DATA = \ + mongodb.conf \ + $(NULL) diff --git a/backends/mongodb/README.md b/backends/mongodb/README.md new file mode 100644 index 0000000000..fc26dfe10e --- /dev/null +++ b/backends/mongodb/README.md @@ -0,0 +1,31 @@ +# MongoDB backend + +## Prerequisites + +To use MongoDB as a backend, `libmongoc` 1.7.0 or higher should be [installed](http://mongoc.org/libmongoc/current/installing.html) first. Next, Netdata should be re-installed from the source. The installer will detect that the required libraries are now available. + +## Configuration + +To enable data sending to the MongoDB backend set the following options in `netdata.conf`: +``` +[backend] + enabled = yes + type = mongodb +``` + +In the Netdata configuration directory run `./edit-config mongodb.conf` and set [MongoDB URI](https://docs.mongodb.com/manual/reference/connection-string/), database name, and collection name: +``` +# URI +uri = mongodb:// + +# database name +database = your_database_name + +# collection name +collection = your_collection_name +``` + +The default socket timeout depends on the backend update interval. The timeout is 500 ms shorter than the interval (but not less than 1000 ms). You can alter the timeout using the `sockettimeoutms` MongoDB URI option. + + +[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fbackends%2Fmongodb%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)]() diff --git a/backends/mongodb/mongodb.c b/backends/mongodb/mongodb.c new file mode 100644 index 0000000000..f40d8fa5fc --- /dev/null +++ b/backends/mongodb/mongodb.c @@ -0,0 +1,189 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#define BACKENDS_INTERNALS +#include "mongodb.h" +#include + +#define CONFIG_FILE_LINE_MAX ((CONFIG_MAX_NAME + CONFIG_MAX_VALUE + 1024) * 2) + +mongoc_client_t *mongodb_client; +mongoc_collection_t *mongodb_collection; + +int mongodb_init(const char *uri_string, + const char *database_string, + const char *collection_string, + int32_t default_socket_timeout) { + mongoc_uri_t *uri; + bson_error_t error; + + mongoc_init(); + + uri = mongoc_uri_new_with_error(uri_string, &error); + if(unlikely(!uri)) { + error("BACKEND: failed to parse URI: %s. Error message: %s", uri_string, error.message); + return 1; + } + + int32_t socket_timeout = mongoc_uri_get_option_as_int32(uri, MONGOC_URI_SOCKETTIMEOUTMS, default_socket_timeout); + if(!mongoc_uri_set_option_as_int32(uri, MONGOC_URI_SOCKETTIMEOUTMS, socket_timeout)) { + error("BACKEND: failed to set %s to the value %d", MONGOC_URI_SOCKETTIMEOUTMS, socket_timeout); + return 1; + }; + + mongodb_client = mongoc_client_new_from_uri(uri); + if(unlikely(!mongodb_client)) { + error("BACKEND: failed to create a new client"); + return 1; + } + + if(!mongoc_client_set_appname(mongodb_client, "netdata")) { + error("BACKEND: failed to set client appname"); + }; + + mongodb_collection = mongoc_client_get_collection(mongodb_client, database_string, collection_string); + + mongoc_uri_destroy(uri); + + return 0; +} + +void free_bson(bson_t **insert, size_t n_documents) { + size_t i; + + for(i = 0; i < n_documents; i++) + bson_destroy(insert[i]); + + free(insert); +} + +int mongodb_insert(char *data, size_t n_metrics) { + bson_t **insert = calloc(n_metrics, sizeof(bson_t *)); + bson_error_t error; + char *start = data, *end = data; + size_t n_documents = 0; + + while(*end && n_documents <= n_metrics) { + while(*end && *end != '\n') end++; + + if(likely(*end)) { + *end = '\0'; + end++; + } + else { + break; + } + + insert[n_documents] = bson_new_from_json((const uint8_t *)start, -1, &error); + + if(unlikely(!insert[n_documents])) { + error("BACKEND: %s", error.message); + free_bson(insert, n_documents); + return 1; + } + + start = end; + + n_documents++; + } + + if(unlikely(!mongoc_collection_insert_many(mongodb_collection, (const bson_t **)insert, n_documents, NULL, NULL, &error))) { + error("BACKEND: %s", error.message); + free_bson(insert, n_documents); + return 1; + } + + free_bson(insert, n_documents); + + return 0; +} + +void mongodb_cleanup() { + mongoc_collection_destroy(mongodb_collection); + mongoc_client_destroy(mongodb_client); + mongoc_cleanup(); + + return; +} + +int read_mongodb_conf(const char *path, char **uri_p, char **database_p, char **collection_p) { + char *uri = *uri_p; + char *database = *database_p; + char *collection = *collection_p; + + if(unlikely(uri)) freez(uri); + if(unlikely(database)) freez(database); + if(unlikely(collection)) freez(collection); + uri = NULL; + database = NULL; + collection = NULL; + + int line = 0; + + char filename[FILENAME_MAX + 1]; + snprintfz(filename, FILENAME_MAX, "%s/mongodb.conf", path); + + char buffer[CONFIG_FILE_LINE_MAX + 1], *s; + + debug(D_BACKEND, "BACKEND: opening config file '%s'", filename); + + FILE *fp = fopen(filename, "r"); + if(!fp) { + return 1; + } + + while(fgets(buffer, CONFIG_FILE_LINE_MAX, fp) != NULL) { + buffer[CONFIG_FILE_LINE_MAX] = '\0'; + line++; + + s = trim(buffer); + if(!s || *s == '#') { + debug(D_BACKEND, "BACKEND: ignoring line %d of file '%s', it is empty.", line, filename); + continue; + } + + char *name = s; + char *value = strchr(s, '='); + if(unlikely(!value)) { + error("BACKEND: ignoring line %d ('%s') of file '%s', there is no = in it.", line, s, filename); + continue; + } + *value = '\0'; + value++; + + name = trim(name); + value = trim(value); + + if(unlikely(!name || *name == '#')) { + error("BACKEND: ignoring line %d of file '%s', name is empty.", line, filename); + continue; + } + + if(!value) + value = ""; + else + value = strip_quotes(value); + + if(name[0] == 'u' && !strcmp(name, "uri")) { + uri = strdupz(value); + } + else if(name[0] == 'd' && !strcmp(name, "database")) { + database = strdupz(value); + } + else if(name[0] == 'c' && !strcmp(name, "collection")) { + collection = strdupz(value); + } + } + + fclose(fp); + + if(unlikely(!collection || !*collection)) { + error("BACKEND: collection name is a mandatory MongoDB parameter, but it is not configured"); + return 1; + } + + *uri_p = uri; + *database_p = database; + *collection_p = collection; + + return 0; +} diff --git a/backends/mongodb/mongodb.conf b/backends/mongodb/mongodb.conf new file mode 100644 index 0000000000..11ea6efb23 --- /dev/null +++ b/backends/mongodb/mongodb.conf @@ -0,0 +1,12 @@ +# MongoDB backend configuration +# +# All options in this file are mandatory + +# URI +uri = + +# database name +database = + +# collection name +collection = diff --git a/backends/mongodb/mongodb.h b/backends/mongodb/mongodb.h new file mode 100644 index 0000000000..ffb276902a --- /dev/null +++ b/backends/mongodb/mongodb.h @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_BACKEND_MONGODB_H +#define NETDATA_BACKEND_MONGODB_H + +#include "backends/backends.h" + +extern int mongodb_init(const char *uri_string, const char *database_string, const char *collection_string, const int32_t socket_timeout); + +extern int mongodb_insert(char *data, size_t n_metrics); + +extern void mongodb_cleanup(); + +extern int read_mongodb_conf(const char *path, char **uri_p, char **database_p, char **collection_p); + +#endif //NETDATA_BACKEND_MONGODB_H diff --git a/configure.ac b/configure.ac index 02feba22ef..56e484cc2c 100644 --- a/configure.ac +++ b/configure.ac @@ -82,6 +82,12 @@ AC_ARG_ENABLE( , [enable_backend_prometheus_remote_write="detect"] ) +AC_ARG_ENABLE( + [backend-mongodb], + [AS_HELP_STRING([--enable-backend-mongodb], [enable mongodb backend @<:@default autodetect@:>@])], + , + [enable_backend_mongodb="detect"] +) AC_ARG_ENABLE( [pedantic], [AS_HELP_STRING([--enable-pedantic], [enable pedantic compiler warnings @<:@default disabled@:>@])], @@ -964,6 +970,33 @@ AC_MSG_RESULT([${enable_backend_prometheus_remote_write}]) AM_CONDITIONAL([ENABLE_BACKEND_PROMETHEUS_REMOTE_WRITE], [test "${enable_backend_prometheus_remote_write}" = "yes"]) +# ----------------------------------------------------------------------------- +# MongoDB backend - libmongoc + +PKG_CHECK_MODULES( + [LIBMONGOC], + [libmongoc-1.0 >= 1.7], + [have_libmongoc=yes], + [have_libmongoc=no] +) + +test "${enable_backend_mongodb}" = "yes" -a "${have_libmongoc}" != "yes" && \ + AC_MSG_ERROR([libmongoc required but not found. Try installing `mongoc`.]) + +AC_MSG_CHECKING([if mongodb backend should be enabled]) +if test "${enable_backend_mongodb}" != "no" -a "${have_libmongoc}" = "yes"; then + enable_backend_mongodb="yes" + AC_DEFINE([HAVE_MONGOC], [1], [libmongoc usability]) + OPTIONAL_MONGOC_CFLAGS="${LIBMONGOC_CFLAGS}" + OPTIONAL_MONGOC_LIBS="${LIBMONGOC_LIBS}" +else + enable_backend_mongodb="no" +fi + +AC_MSG_RESULT([${enable_backend_mongodb}]) +AM_CONDITIONAL([ENABLE_BACKEND_MONGODB], [test "${enable_backend_mongodb}" = "yes"]) + + # ----------------------------------------------------------------------------- # check for setns() - cgroup-network @@ -988,7 +1021,7 @@ if test "${enable_lto}" != "no"; then fi if test "${have_lto}" = "yes"; then oCFLAGS="${CFLAGS}" - CFLAGS="${CFLAGS} -flto ${OPTIONAL_MATH_CFLAGS} ${OPTIONAL_NFACCT_CFLAGS} ${OPTIONAL_ZLIB_CFLAGS} ${OPTIONAL_UUID_CFLAGS} ${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS} ${OPTIONAL_CUPS_CFLAGS} ${OPTIONAL_XENSTAT_FLAGS} ${OPTIONAL_KINESIS_CFLAGS} ${OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS}" + CFLAGS="${CFLAGS} -flto" ac_cv_c_lto_cross_compile="${enable_lto}" test "${ac_cv_c_lto_cross_compile}" != "yes" && ac_cv_c_lto_cross_compile="no" AC_C_LTO @@ -1037,6 +1070,10 @@ AC_SUBST([logdir]) AC_SUBST([pluginsdir]) AC_SUBST([webdir]) +CFLAGS="${CFLAGS} ${OPTIONAL_MATH_CFLAGS} ${OPTIONAL_NFACCT_CFLAGS} ${OPTIONAL_ZLIB_CFLAGS} ${OPTIONAL_UUID_CFLAGS} \ + ${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS} ${OPTIONAL_CUPS_CFLAGS} ${OPTIONAL_XENSTAT_FLAGS} \ + ${OPTIONAL_KINESIS_CFLAGS} ${OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS} ${OPTIONAL_MONGOC_CFLAGS}" + CXXFLAGS="${CFLAGS} ${CXX11FLAG}" CPPFLAGS="\ @@ -1076,6 +1113,8 @@ AC_SUBST([OPTIONAL_KINESIS_CFLAGS]) AC_SUBST([OPTIONAL_KINESIS_LIBS]) AC_SUBST([OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS]) AC_SUBST([OPTIONAL_PROMETHEUS_REMOTE_WRITE_LIBS]) +AC_SUBST([OPTIONAL_MONGOC_CFLAGS]) +AC_SUBST([OPTIONAL_MONGOC_LIBS]) AC_CONFIG_FILES([ @@ -1088,6 +1127,7 @@ AC_CONFIG_FILES([ backends/prometheus/Makefile backends/prometheus/remote_write/Makefile backends/aws_kinesis/Makefile + backends/mongodb/Makefile collectors/Makefile collectors/apps.plugin/Makefile collectors/cgroups.plugin/Makefile diff --git a/netdata-installer.sh b/netdata-installer.sh index 812921e221..e749a867af 100755 --- a/netdata-installer.sh +++ b/netdata-installer.sh @@ -168,6 +168,8 @@ USAGE: ${PROGRAM} [options] --enable-backend-prometheus-remote-write Enable Prometheus remote write backend. Default: enable it when libprotobuf and libsnappy are available. --disable-backend-prometheus-remote-write + --enable-backend-mongodb Enable MongoDB backend. Default: enable it when libmongoc is available. + --disable-backend-mongodb --enable-lto Enable Link-Time-Optimization. Default: enabled --disable-lto --disable-x86-sse Disable SSE instructions. By default SSE optimizations are enabled. @@ -218,6 +220,8 @@ while [ -n "${1}" ]; do "--disable-backend-kinesis") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-backend-kinesis/} --disable-backend-kinesis";; "--enable-backend-prometheus-remote-write") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-backend-prometheus-remote-write/} --enable-backend-prometheus-remote-write";; "--disable-backend-prometheus-remote-write") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-backend-prometheus-remote-write/} --disable-backend-prometheus-remote-write";; + "--enable-backend-mongodb") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-backend-mongodb/} --enable-backend-mongodb";; + "--disable-backend-mongodb") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-backend-mongodb/} --disable-backend-mongodb";; "--enable-lto") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--enable-lto/} --enable-lto";; "--disable-lto") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-lto/} --disable-lto";; "--disable-x86-sse") NETDATA_CONFIGURE_OPTIONS="${NETDATA_CONFIGURE_OPTIONS//--disable-x86-sse/} --disable-x86-sse";;