summaryrefslogtreecommitdiffstats
path: root/daemon
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-05-08 16:27:04 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-05-08 16:27:04 +0000
commita836a244a3d2bdd4da1ee2641e3e957850668cea (patch)
treecb87c75b3677fab7144f868435243f864048a1e6 /daemon
parentAdding upstream version 1.38.1. (diff)
downloadnetdata-a836a244a3d2bdd4da1ee2641e3e957850668cea.tar.xz
netdata-a836a244a3d2bdd4da1ee2641e3e957850668cea.zip
Adding upstream version 1.39.0.upstream/1.39.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'daemon')
-rw-r--r--daemon/README.md112
-rw-r--r--daemon/analytics.c102
-rw-r--r--daemon/analytics.h16
-rwxr-xr-xdaemon/anonymous-statistics.sh.in5
-rw-r--r--daemon/commands.c38
-rw-r--r--daemon/commands.h1
-rw-r--r--daemon/common.h3
-rw-r--r--daemon/config/README.md12
-rw-r--r--daemon/event_loop.c1
-rw-r--r--daemon/event_loop.h1
-rw-r--r--daemon/global_statistics.c28
-rw-r--r--daemon/main.c148
-rw-r--r--daemon/main.h19
-rw-r--r--daemon/service.c2
-rwxr-xr-xdaemon/system-info.sh26
-rw-r--r--daemon/unit_test.c87
16 files changed, 305 insertions, 296 deletions
diff --git a/daemon/README.md b/daemon/README.md
index 7a17506bb..65ac105c6 100644
--- a/daemon/README.md
+++ b/daemon/README.md
@@ -1,102 +1,10 @@
-<!--
-title: "Netdata daemon"
-date: "2020-04-29"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/daemon/README.md"
-sidebar_label: "Netdata daemon"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "References/Configuration"
--->
-
# Netdata daemon
-## Starting netdata
-
-- You can start Netdata by executing it with `/usr/sbin/netdata` (the installer will also start it).
-
-- You can stop Netdata by killing it with `killall netdata`. You can stop and start Netdata at any point. When
- exiting, the [database engine](https://github.com/netdata/netdata/blob/master/database/engine/README.md) saves metrics to `/var/cache/netdata/dbengine/` so that
- it can continue when started again.
-
-Access to the web site, for all graphs, is by default on port `19999`, so go to:
-
-```sh
-http://127.0.0.1:19999/
-```
-
-You can get the running config file at any time, by accessing `http://127.0.0.1:19999/netdata.conf`.
-
-### Starting Netdata at boot
-
-In the `system` directory you can find scripts and configurations for the
-various distros.
-
-#### systemd
-
-The installer already installs `netdata.service` if it detects a systemd system.
-
-To install `netdata.service` by hand, run:
-
-```sh
-# stop Netdata
-killall netdata
-
-# copy netdata.service to systemd
-cp system/netdata.service /etc/systemd/system/
-
-# let systemd know there is a new service
-systemctl daemon-reload
-
-# enable Netdata at boot
-systemctl enable netdata
-
-# start Netdata
-systemctl start netdata
-```
-
-#### init.d
-
-In the system directory you can find `netdata-lsb`. Copy it to the proper place according to your distribution
-documentation. For Ubuntu, this can be done via running the following commands as root.
-
-```sh
-# copy the Netdata startup file to /etc/init.d
-cp system/netdata-lsb /etc/init.d/netdata
-
-# make sure it is executable
-chmod +x /etc/init.d/netdata
-
-# enable it
-update-rc.d netdata defaults
-```
-
-#### openrc (gentoo)
-
-In the `system` directory you can find `netdata-openrc`. Copy it to the proper
-place according to your distribution documentation.
-
-#### CentOS / Red Hat Enterprise Linux
+The Netdata daemon is practically a synonym for the Netdata Agent, as it controls its
+entire operation. We support various methods to
+[start, stop, or restart the daemon](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md).
-For older versions of RHEL/CentOS that don't have systemd, an init script is included in the system directory. This can
-be installed by running the following commands as root.
-
-```sh
-# copy the Netdata startup file to /etc/init.d
-cp system/netdata-init-d /etc/init.d/netdata
-
-# make sure it is executable
-chmod +x /etc/init.d/netdata
-
-# enable it
-chkconfig --add netdata
-```
-
-_There have been some recent work on the init script, see PR
-<https://github.com/netdata/netdata/pull/403>_
-
-#### other systems
-
-You can start Netdata by running it from `/etc/rc.local` or equivalent.
+This document provides some basic information on the command line options, log files, and how to debug and troubleshoot
## Command line options
@@ -303,7 +211,7 @@ You can use the following:
For more information see `man sched`.
-### scheduling priority for `rr` and `fifo`
+### Scheduling priority for `rr` and `fifo`
Once the policy is set to one of `rr` or `fifo`, the following will appear:
@@ -324,7 +232,7 @@ When the policy is set to `other`, `nice`, or `batch`, the following will appear
process nice level = 19
```
-## scheduling settings and systemd
+## Scheduling settings and systemd
Netdata will not be able to set its scheduling policy and priority to more important values when it is started as the
`netdata` user (systemd case).
@@ -472,7 +380,7 @@ will contain the messages.
> Do not forget to disable tracing (`debug flags = 0`) when you are done tracing. The file `debug.log` can grow too
> fast.
-### compiling Netdata with debugging
+### Compiling Netdata with debugging
To compile Netdata with debugging, use this:
@@ -487,7 +395,7 @@ CFLAGS="-O1 -ggdb -DNETDATA_INTERNAL_CHECKS=1" ./netdata-installer.sh
The above will compile and install Netdata with debugging info embedded. You can now use `debug flags` to set the
section(s) you need to trace.
-### debugging crashes
+### Debugging crashes
We have made the most to make Netdata crash free. If however, Netdata crashes on your system, it would be very helpful
to provide stack traces of the crash. Without them, is will be almost impossible to find the issue (the code base is
@@ -515,7 +423,7 @@ Run the following command and post the output on a github issue.
gdb $(which netdata) /path/to/core/dump
```
-### you can reproduce a Netdata crash on your system
+### You can reproduce a Netdata crash on your system
> you need to have Netdata compiled with debugging info for this to work (check above)
@@ -527,5 +435,3 @@ valgrind $(which netdata) -D
Netdata will start and it will be a lot slower. Now reproduce the crash and `valgrind` will dump on your console the
stack trace. Open a new github issue and post the output.
-
-
diff --git a/daemon/analytics.c b/daemon/analytics.c
index a2f52bc8f..b3c802b86 100644
--- a/daemon/analytics.c
+++ b/daemon/analytics.c
@@ -141,22 +141,14 @@ void analytics_set_data_str(char **name, char *value)
}
/*
- * Get data, used by web api v1
- */
-void analytics_get_data(char *name, BUFFER *wb)
-{
- buffer_strcat(wb, name);
-}
-
-/*
* Log hits on the allmetrics page, with prometheus parameter
*/
void analytics_log_prometheus(void)
{
if (netdata_anonymous_statistics_enabled == 1 && likely(analytics_data.prometheus_hits < ANALYTICS_MAX_PROMETHEUS_HITS)) {
analytics_data.prometheus_hits++;
- char b[7];
- snprintfz(b, 6, "%d", analytics_data.prometheus_hits);
+ char b[21];
+ snprintfz(b, 20, "%zu", analytics_data.prometheus_hits);
analytics_set_data(&analytics_data.netdata_allmetrics_prometheus_used, b);
}
}
@@ -168,8 +160,8 @@ void analytics_log_shell(void)
{
if (netdata_anonymous_statistics_enabled == 1 && likely(analytics_data.shell_hits < ANALYTICS_MAX_SHELL_HITS)) {
analytics_data.shell_hits++;
- char b[7];
- snprintfz(b, 6, "%d", analytics_data.shell_hits);
+ char b[21];
+ snprintfz(b, 20, "%zu", analytics_data.shell_hits);
analytics_set_data(&analytics_data.netdata_allmetrics_shell_used, b);
}
}
@@ -181,8 +173,8 @@ void analytics_log_json(void)
{
if (netdata_anonymous_statistics_enabled == 1 && likely(analytics_data.json_hits < ANALYTICS_MAX_JSON_HITS)) {
analytics_data.json_hits++;
- char b[7];
- snprintfz(b, 6, "%d", analytics_data.json_hits);
+ char b[21];
+ snprintfz(b, 20, "%zu", analytics_data.json_hits);
analytics_set_data(&analytics_data.netdata_allmetrics_json_used, b);
}
}
@@ -194,8 +186,8 @@ void analytics_log_dashboard(void)
{
if (netdata_anonymous_statistics_enabled == 1 && likely(analytics_data.dashboard_hits < ANALYTICS_MAX_DASHBOARD_HITS)) {
analytics_data.dashboard_hits++;
- char b[7];
- snprintfz(b, 6, "%d", analytics_data.dashboard_hits);
+ char b[21];
+ snprintfz(b, 20, "%zu", analytics_data.dashboard_hits);
analytics_set_data(&analytics_data.netdata_dashboard_used, b);
}
}
@@ -204,18 +196,18 @@ void analytics_log_dashboard(void)
* Called when setting the oom score
*/
void analytics_report_oom_score(long long int score){
- char b[7];
- snprintfz(b, 6, "%d", (int)score);
+ char b[21];
+ snprintfz(b, 20, "%lld", score);
analytics_set_data(&analytics_data.netdata_config_oom_score, b);
}
void analytics_mirrored_hosts(void)
{
RRDHOST *host;
- int count = 0;
- int reachable = 0;
- int unreachable = 0;
- char b[11];
+ size_t count = 0;
+ size_t reachable = 0;
+ size_t unreachable = 0;
+ char b[21];
rrd_rdlock();
rrdhost_foreach_read(host)
@@ -229,11 +221,11 @@ void analytics_mirrored_hosts(void)
}
rrd_unlock();
- snprintfz(b, 10, "%d", count);
+ snprintfz(b, 20, "%zu", count);
analytics_set_data(&analytics_data.netdata_mirrored_host_count, b);
- snprintfz(b, 10, "%d", reachable);
+ snprintfz(b, 20, "%zu", reachable);
analytics_set_data(&analytics_data.netdata_mirrored_hosts_reachable, b);
- snprintfz(b, 10, "%d", unreachable);
+ snprintfz(b, 20, "%zu", unreachable);
analytics_set_data(&analytics_data.netdata_mirrored_hosts_unreachable, b);
}
@@ -303,8 +295,8 @@ void analytics_collectors(void)
analytics_set_data(&analytics_data.netdata_collectors, (char *)buffer_tostring(ap.both));
{
- char b[7];
- snprintfz(b, 6, "%d", ap.c);
+ char b[21];
+ snprintfz(b, 20, "%d", ap.c);
analytics_set_data(&analytics_data.netdata_collectors_count, b);
}
@@ -362,16 +354,16 @@ void analytics_alarms_notifications(void)
buffer_free(b);
}
-void analytics_get_install_type(void)
+static void analytics_get_install_type(struct rrdhost_system_info *system_info)
{
- if (localhost->system_info->install_type == NULL) {
+ if (system_info->install_type == NULL) {
analytics_set_data_str(&analytics_data.netdata_install_type, "unknown");
} else {
- analytics_set_data_str(&analytics_data.netdata_install_type, localhost->system_info->install_type);
+ analytics_set_data_str(&analytics_data.netdata_install_type, system_info->install_type);
}
- if (localhost->system_info->prebuilt_dist != NULL) {
- analytics_set_data_str(&analytics_data.netdata_prebuilt_distro, localhost->system_info->prebuilt_dist);
+ if (system_info->prebuilt_dist != NULL) {
+ analytics_set_data_str(&analytics_data.netdata_prebuilt_distro, system_info->prebuilt_dist);
}
}
@@ -396,15 +388,16 @@ void analytics_https(void)
void analytics_charts(void)
{
RRDSET *st;
- int c = 0;
+ size_t c = 0;
rrdset_foreach_read(st, localhost)
if(rrdset_is_available_for_viewers(st)) c++;
rrdset_foreach_done(st);
+ analytics_data.charts_count = c;
{
- char b[7];
- snprintfz(b, 6, "%d", c);
+ char b[21];
+ snprintfz(b, 20, "%zu", c);
analytics_set_data(&analytics_data.netdata_charts_count, b);
}
}
@@ -412,7 +405,7 @@ void analytics_charts(void)
void analytics_metrics(void)
{
RRDSET *st;
- long int dimensions = 0;
+ size_t dimensions = 0;
rrdset_foreach_read(st, localhost) {
if (rrdset_is_available_for_viewers(st)) {
RRDDIM *rd;
@@ -426,17 +419,18 @@ void analytics_metrics(void)
}
rrdset_foreach_done(st);
+ analytics_data.metrics_count = dimensions;
{
- char b[7];
- snprintfz(b, 6, "%ld", dimensions);
+ char b[21];
+ snprintfz(b, 20, "%zu", dimensions);
analytics_set_data(&analytics_data.netdata_metrics_count, b);
}
}
void analytics_alarms(void)
{
- int alarm_warn = 0, alarm_crit = 0, alarm_normal = 0;
- char b[10];
+ size_t alarm_warn = 0, alarm_crit = 0, alarm_normal = 0;
+ char b[21];
RRDCALC *rc;
foreach_rrdcalc_in_rrdhost_read(localhost, rc) {
if (unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
@@ -455,11 +449,11 @@ void analytics_alarms(void)
}
foreach_rrdcalc_in_rrdhost_done(rc);
- snprintfz(b, 9, "%d", alarm_normal);
+ snprintfz(b, 20, "%zu", alarm_normal);
analytics_set_data(&analytics_data.netdata_alarms_normal, b);
- snprintfz(b, 9, "%d", alarm_warn);
+ snprintfz(b, 20, "%zu", alarm_warn);
analytics_set_data(&analytics_data.netdata_alarms_warning, b);
- snprintfz(b, 9, "%d", alarm_crit);
+ snprintfz(b, 20, "%zu", alarm_crit);
analytics_set_data(&analytics_data.netdata_alarms_critical, b);
}
@@ -476,7 +470,8 @@ void analytics_misc(void)
analytics_set_data_str(&analytics_data.netdata_host_aclk_implementation, "");
#endif
- analytics_set_data(&analytics_data.netdata_config_exporting_enabled, appconfig_get_boolean(&exporting_config, CONFIG_SECTION_EXPORTING, "enabled", CONFIG_BOOLEAN_NO) ? "true" : "false");
+ analytics_data.exporting_enabled = appconfig_get_boolean(&exporting_config, CONFIG_SECTION_EXPORTING, "enabled", CONFIG_BOOLEAN_NO);
+ analytics_set_data(&analytics_data.netdata_config_exporting_enabled, analytics_data.exporting_enabled ? "true" : "false");
analytics_set_data(&analytics_data.netdata_config_is_private_registry, "false");
analytics_set_data(&analytics_data.netdata_config_use_private_registry, "false");
@@ -539,20 +534,20 @@ void analytics_gather_mutable_meta_data(void)
freez(claim_id);
{
- char b[7];
- snprintfz(b, 6, "%d", analytics_data.prometheus_hits);
+ char b[21];
+ snprintfz(b, 20, "%zu", analytics_data.prometheus_hits);
analytics_set_data(&analytics_data.netdata_allmetrics_prometheus_used, b);
- snprintfz(b, 6, "%d", analytics_data.shell_hits);
+ snprintfz(b, 20, "%zu", analytics_data.shell_hits);
analytics_set_data(&analytics_data.netdata_allmetrics_shell_used, b);
- snprintfz(b, 6, "%d", analytics_data.json_hits);
+ snprintfz(b, 20, "%zu", analytics_data.json_hits);
analytics_set_data(&analytics_data.netdata_allmetrics_json_used, b);
- snprintfz(b, 6, "%d", analytics_data.dashboard_hits);
+ snprintfz(b, 20, "%zu", analytics_data.dashboard_hits);
analytics_set_data(&analytics_data.netdata_dashboard_used, b);
- snprintfz(b, 6, "%zu", rrdhost_hosts_available());
+ snprintfz(b, 20, "%zu", rrdhost_hosts_available());
analytics_set_data(&analytics_data.netdata_config_hosts_available, b);
}
}
@@ -637,7 +632,7 @@ static const char *verify_required_directory(const char *dir)
* This is called after the rrdinit
* These values will be sent on the START event
*/
-void set_late_global_environment()
+void set_late_global_environment(struct rrdhost_system_info *system_info)
{
analytics_set_data(&analytics_data.netdata_config_stream_enabled, default_rrdpush_enabled ? "true" : "false");
analytics_set_data_str(&analytics_data.netdata_config_memory_mode, (char *)rrd_memory_mode_name(default_rrd_memory_mode));
@@ -681,7 +676,7 @@ void set_late_global_environment()
buffer_free(bi);
}
- analytics_get_install_type();
+ analytics_get_install_type(system_info);
}
void get_system_timezone(void)
@@ -894,6 +889,9 @@ void set_global_environment()
analytics_data.shell_hits = 0;
analytics_data.json_hits = 0;
analytics_data.dashboard_hits = 0;
+ analytics_data.charts_count = 0;
+ analytics_data.metrics_count = 0;
+ analytics_data.exporting_enabled = false;
char *default_port = appconfig_get(&netdata_config, CONFIG_SECTION_WEB, "default port", NULL);
int clean = 0;
diff --git a/daemon/analytics.h b/daemon/analytics.h
index d1ffcec18..34418316f 100644
--- a/daemon/analytics.h
+++ b/daemon/analytics.h
@@ -63,14 +63,18 @@ struct analytics_data {
size_t data_length;
- uint8_t prometheus_hits;
- uint8_t shell_hits;
- uint8_t json_hits;
- uint8_t dashboard_hits;
+ size_t prometheus_hits;
+ size_t shell_hits;
+ size_t json_hits;
+ size_t dashboard_hits;
+
+ size_t charts_count;
+ size_t metrics_count;
+
+ bool exporting_enabled;
};
-void analytics_get_data(char *name, BUFFER *wb);
-void set_late_global_environment(void);
+void set_late_global_environment(struct rrdhost_system_info *system_info);
void analytics_free_data(void);
void set_global_environment(void);
void send_statistics(const char *action, const char *action_result, const char *action_data);
diff --git a/daemon/anonymous-statistics.sh.in b/daemon/anonymous-statistics.sh.in
index 9f8df188d..8676ffbe7 100755
--- a/daemon/anonymous-statistics.sh.in
+++ b/daemon/anonymous-statistics.sh.in
@@ -69,6 +69,7 @@ NETDATA_USE_PRIVATE_REGISTRY="${40}"
NETDATA_CONFIG_OOM_SCORE="${41}"
NETDATA_PREBUILT_DISTRO="${42}"
+[ -z "$NETDATA_REGISTRY_UNIQUE_ID" ] && NETDATA_REGISTRY_UNIQUE_ID="00000000-0000-0000-0000-000000000000"
# define body of request to be sent
REQ_BODY="$(cat << EOF
@@ -165,7 +166,7 @@ EOF
# send the anonymous statistics to the Netdata PostHog
if [ -n "$(command -v curl 2> /dev/null)" ]; then
- curl --silent -o /dev/null --write-out '%{http_code}' -X POST --max-time 2 --header "Content-Type: application/json" -d "${REQ_BODY}" https://posthog.netdata.cloud/capture/
+ curl --silent -o /dev/null --write-out '%{http_code}' -X POST --max-time 2 --header "Content-Type: application/json" -d "${REQ_BODY}" https://app.posthog.com/capture/
else
wget -q -O - --no-check-certificate \
--server-response \
@@ -173,5 +174,5 @@ else
--timeout=1 \
--header 'Content-Type: application/json' \
--body-data "${REQ_BODY}" \
- 'https://posthog.netdata.cloud/capture/' 2>&1 | awk '/^ HTTP/{print $2}'
+ 'https://app.posthog.com/capture/' 2>&1 | awk '/^ HTTP/{print $2}'
fi
diff --git a/daemon/commands.c b/daemon/commands.c
index 377a4002f..fcb75b71c 100644
--- a/daemon/commands.c
+++ b/daemon/commands.c
@@ -47,6 +47,7 @@ static cmd_status_t cmd_write_config_execute(char *args, char **message);
static cmd_status_t cmd_ping_execute(char *args, char **message);
static cmd_status_t cmd_aclk_state(char *args, char **message);
static cmd_status_t cmd_version(char *args, char **message);
+static cmd_status_t cmd_dumpconfig(char *args, char **message);
static command_info_t command_info_array[] = {
{"help", cmd_help_execute, CMD_TYPE_HIGH_PRIORITY}, // show help menu
@@ -61,7 +62,8 @@ static command_info_t command_info_array[] = {
{"write-config", cmd_write_config_execute, CMD_TYPE_ORTHOGONAL},
{"ping", cmd_ping_execute, CMD_TYPE_ORTHOGONAL},
{"aclk-state", cmd_aclk_state, CMD_TYPE_ORTHOGONAL},
- {"version", cmd_version, CMD_TYPE_ORTHOGONAL}
+ {"version", cmd_version, CMD_TYPE_ORTHOGONAL},
+ {"dumpconfig", cmd_dumpconfig, CMD_TYPE_ORTHOGONAL}
};
/* Mutexes for commands of type CMD_TYPE_ORTHOGONAL */
@@ -127,6 +129,8 @@ static cmd_status_t cmd_help_execute(char *args, char **message)
" Return with 'pong' if agent is alive.\n"
"aclk-state [json]\n"
" Returns current state of ACLK and Cloud connection. (optionally in json).\n"
+ "dumpconfig\n"
+ " Returns the current netdata.conf on stdout.\n"
"version\n"
" Returns the netdata version.\n",
MAX_COMMAND_LENGTH - 1);
@@ -330,6 +334,17 @@ static cmd_status_t cmd_version(char *args, char **message)
return CMD_STATUS_SUCCESS;
}
+static cmd_status_t cmd_dumpconfig(char *args, char **message)
+{
+ (void)args;
+
+ BUFFER *wb = buffer_create(1024, NULL);
+ config_generate(wb, 0);
+ *message = strdupz(buffer_tostring(wb));
+ buffer_free(wb);
+ return CMD_STATUS_SUCCESS;
+}
+
static void cmd_lock_exclusive(unsigned index)
{
(void)index;
@@ -393,32 +408,30 @@ static void pipe_write_cb(uv_write_t* req, int status)
uv_close((uv_handle_t *)client, pipe_close_cb);
--clients;
- freez(client->data);
+ buffer_free(client->data);
info("Command Clients = %u\n", clients);
}
-static inline void add_char_to_command_reply(char *reply_string, unsigned *reply_string_size, char character)
+static inline void add_char_to_command_reply(BUFFER *reply_string, unsigned *reply_string_size, char character)
{
- reply_string[(*reply_string_size)++] = character;
+ buffer_fast_charcat(reply_string, character);
+ *reply_string_size +=1;
}
-static inline void add_string_to_command_reply(char *reply_string, unsigned *reply_string_size, char *str)
+static inline void add_string_to_command_reply(BUFFER *reply_string, unsigned *reply_string_size, char *str)
{
unsigned len;
len = strlen(str);
-
- if (MAX_COMMAND_LENGTH - 1 < len + *reply_string_size)
- len = MAX_COMMAND_LENGTH - *reply_string_size - 1;
-
- strncpyz(reply_string + *reply_string_size, str, len);
+ buffer_fast_strcat(reply_string, str, len);
*reply_string_size += len;
}
static void send_command_reply(struct command_context *cmd_ctx, cmd_status_t status, char *message)
{
int ret;
- char *reply_string = mallocz(MAX_COMMAND_LENGTH);
+ BUFFER *reply_string = buffer_create(128, NULL);
+
char exit_status_string[MAX_EXIT_STATUS_LENGTH + 1] = {'\0', };
unsigned reply_string_size = 0;
uv_buf_t write_buf;
@@ -436,13 +449,12 @@ static void send_command_reply(struct command_context *cmd_ctx, cmd_status_t sta
cmd_ctx->write_req.data = client;
client->data = reply_string;
- write_buf.base = reply_string;
+ write_buf.base = reply_string->buffer;
write_buf.len = reply_string_size;
ret = uv_write(&cmd_ctx->write_req, (uv_stream_t *)client, &write_buf, 1, pipe_write_cb);
if (ret) {
error("uv_write(): %s", uv_strerror(ret));
}
- info("COMMAND: Sending reply: \"%s\"", reply_string);
}
cmd_status_t execute_command(cmd_t idx, char *args, char **message)
diff --git a/daemon/commands.h b/daemon/commands.h
index 78bdcc779..43a0ef96b 100644
--- a/daemon/commands.h
+++ b/daemon/commands.h
@@ -26,6 +26,7 @@ typedef enum cmd {
CMD_PING,
CMD_ACLK_STATE,
CMD_VERSION,
+ CMD_DUMPCONFIG,
CMD_TOTAL_COMMANDS
} cmd_t;
diff --git a/daemon/common.h b/daemon/common.h
index ca4d5c954..66ffd4a74 100644
--- a/daemon/common.h
+++ b/daemon/common.h
@@ -58,7 +58,8 @@
#include "exporting/exporting_engine.h"
// the netdata API
-#include "web/api/web_api_v1.h"
+#include "web/server/web_client.h"
+#include "web/rtc/webrtc.h"
// all data collection plugins
#include "collectors/all.h"
diff --git a/daemon/config/README.md b/daemon/config/README.md
index 4a6d0bb80..418b12cf9 100644
--- a/daemon/config/README.md
+++ b/daemon/config/README.md
@@ -4,8 +4,7 @@ description: "The Netdata Agent's daemon is installed preconfigured to collect t
custom_edit_url: "https://github.com/netdata/netdata/edit/master/daemon/config/README.md"
sidebar_label: "Daemon"
learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "References/Configuration"
+learn_rel_path: "Configuration"
learn_doc_purpose: "Explain the daemon options, the log files, the process scheduling, virtual memory, explain how the netdata.conf is used and backlink to the netdata.conf file reference"
-->
@@ -108,12 +107,9 @@ Please note that your data history will be lost if you have modified `history` p
| delete orphan hosts files | `yes` | Set to `no` to disable non-responsive host removal. |
| enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero. |
-:::info
-
-The multiplication of all the **enabled** tiers `dbengine tier N update every iterations` values must be less than `65535`.
-
-:::
-
+> ### Info
+>
+>The multiplication of all the **enabled** tiers `dbengine tier N update every iterations` values must be less than `65535`.
### [directories] section options
diff --git a/daemon/event_loop.c b/daemon/event_loop.c
index 6f09cd654..5fd02377e 100644
--- a/daemon/event_loop.c
+++ b/daemon/event_loop.c
@@ -49,6 +49,7 @@ void register_libuv_worker_jobs() {
worker_register_job_name(UV_EVENT_DBENGINE_SHUTDOWN, "dbengine shutdown");
// metadata
+ worker_register_job_name(UV_EVENT_HOST_CONTEXT_LOAD, "metadata load host context");
worker_register_job_name(UV_EVENT_METADATA_STORE, "metadata store host");
worker_register_job_name(UV_EVENT_METADATA_CLEANUP, "metadata cleanup");
diff --git a/daemon/event_loop.h b/daemon/event_loop.h
index 0d3cc0d07..1ff1c2c1c 100644
--- a/daemon/event_loop.h
+++ b/daemon/event_loop.h
@@ -41,6 +41,7 @@ enum event_loop_job {
UV_EVENT_DBENGINE_SHUTDOWN,
// metadata
+ UV_EVENT_HOST_CONTEXT_LOAD,
UV_EVENT_METADATA_STORE,
UV_EVENT_METADATA_CLEANUP,
diff --git a/daemon/global_statistics.c b/daemon/global_statistics.c
index 0dc3ee645..ee68bebd1 100644
--- a/daemon/global_statistics.c
+++ b/daemon/global_statistics.c
@@ -827,33 +827,7 @@ static void global_statistics_charts(void) {
rrdset_done(st_points_stored);
}
- {
- static RRDSET *st = NULL;
- static RRDDIM *rd = NULL;
-
- if (unlikely(!st)) {
- st = rrdset_create_localhost(
- "netdata" // type
- , "ml_models_consulted" // id
- , NULL // name
- , NETDATA_ML_CHART_FAMILY // family
- , NULL // context
- , "KMeans models used for prediction" // title
- , "models" // units
- , NETDATA_ML_PLUGIN // plugin
- , NETDATA_ML_MODULE_DETECTION // module
- , NETDATA_ML_CHART_PRIO_MACHINE_LEARNING_STATUS // priority
- , localhost->rrd_update_every // update_every
- , RRDSET_TYPE_AREA // chart_type
- );
-
- rd = rrddim_add(st, "num_models_consulted", NULL, 1, 1, RRD_ALGORITHM_INCREMENTAL);
- }
-
- rrddim_set_by_pointer(st, rd, (collected_number) gs.ml_models_consulted);
-
- rrdset_done(st);
- }
+ ml_update_global_statistics_charts(gs.ml_models_consulted);
}
// ----------------------------------------------------------------------------
diff --git a/daemon/main.c b/daemon/main.c
index 7b2076f3f..606de128b 100644
--- a/daemon/main.c
+++ b/daemon/main.c
@@ -13,6 +13,7 @@ int netdata_zero_metrics_enabled;
int netdata_anonymous_statistics_enabled;
int libuv_worker_threads = MIN_LIBUV_WORKER_THREADS;
+bool ieee754_doubles = false;
struct netdata_static_thread *static_threads;
@@ -147,10 +148,6 @@ static void service_to_buffer(BUFFER *wb, SERVICE_TYPE service) {
buffer_strcat(wb, "MAINTENANCE ");
if(service & SERVICE_COLLECTORS)
buffer_strcat(wb, "COLLECTORS ");
- if(service & SERVICE_ML_TRAINING)
- buffer_strcat(wb, "ML_TRAINING ");
- if(service & SERVICE_ML_PREDICTION)
- buffer_strcat(wb, "ML_PREDICTION ");
if(service & SERVICE_REPLICATION)
buffer_strcat(wb, "REPLICATION ");
if(service & ABILITY_DATA_QUERIES)
@@ -312,6 +309,8 @@ static bool service_wait_exit(SERVICE_TYPE service, usec_t timeout_ut) {
timeout = false; \
}
+void web_client_cache_destroy(void);
+
void netdata_cleanup_and_exit(int ret) {
usec_t started_ut = now_monotonic_usec();
usec_t last_ut = started_ut;
@@ -339,6 +338,15 @@ void netdata_cleanup_and_exit(int ret) {
}
#endif
+ delta_shutdown_time("close webrtc connections");
+
+ webrtc_close_all_connections();
+
+ delta_shutdown_time("disable ML detection and training threads");
+
+ ml_stop_threads();
+ ml_fini();
+
delta_shutdown_time("disable maintenance, new queries, new web requests, new streaming connections and aclk");
service_signal_exit(
@@ -347,14 +355,14 @@ void netdata_cleanup_and_exit(int ret) {
| ABILITY_WEB_REQUESTS
| ABILITY_STREAMING_CONNECTIONS
| SERVICE_ACLK
+ | SERVICE_ACLKSYNC
);
- delta_shutdown_time("stop replication, exporters, ML training, health and web servers threads");
+ delta_shutdown_time("stop replication, exporters, health and web servers threads");
timeout = !service_wait_exit(
SERVICE_REPLICATION
| SERVICE_EXPORTERS
- | SERVICE_ML_TRAINING
| SERVICE_HEALTH
| SERVICE_WEB_SERVER
, 3 * USEC_PER_SEC);
@@ -366,11 +374,10 @@ void netdata_cleanup_and_exit(int ret) {
| SERVICE_STREAMING
, 3 * USEC_PER_SEC);
- delta_shutdown_time("stop ML prediction and context threads");
+ delta_shutdown_time("stop context thread");
timeout = !service_wait_exit(
- SERVICE_ML_PREDICTION
- | SERVICE_CONTEXT
+ SERVICE_CONTEXT
, 3 * USEC_PER_SEC);
delta_shutdown_time("stop maintenance thread");
@@ -379,6 +386,10 @@ void netdata_cleanup_and_exit(int ret) {
SERVICE_MAINTENANCE
, 3 * USEC_PER_SEC);
+ delta_shutdown_time("clear web client cache");
+
+ web_client_cache_destroy();
+
delta_shutdown_time("clean rrdhost database");
rrdhost_cleanup_all();
@@ -387,11 +398,6 @@ void netdata_cleanup_and_exit(int ret) {
metadata_sync_shutdown_prepare();
-#ifdef ENABLE_ACLK
- delta_shutdown_time("signal aclk sync to stop");
- aclk_sync_exit_all();
-#endif
-
delta_shutdown_time("stop aclk threads");
timeout = !service_wait_exit(
@@ -529,38 +535,41 @@ void web_server_config_options(void)
web_x_frame_options = NULL;
web_allow_connections_from =
- simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow connections from", "localhost *"),
- NULL, SIMPLE_PATTERN_EXACT);
+ simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow connections from", "localhost *"),
+ NULL, SIMPLE_PATTERN_EXACT, true);
web_allow_connections_dns =
make_dns_decision(CONFIG_SECTION_WEB, "allow connections by dns", "heuristic", web_allow_connections_from);
web_allow_dashboard_from =
- simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow dashboard from", "localhost *"),
- NULL, SIMPLE_PATTERN_EXACT);
+ simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow dashboard from", "localhost *"),
+ NULL, SIMPLE_PATTERN_EXACT, true);
web_allow_dashboard_dns =
make_dns_decision(CONFIG_SECTION_WEB, "allow dashboard by dns", "heuristic", web_allow_dashboard_from);
web_allow_badges_from =
- simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow badges from", "*"), NULL, SIMPLE_PATTERN_EXACT);
+ simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow badges from", "*"), NULL, SIMPLE_PATTERN_EXACT,
+ true);
web_allow_badges_dns =
make_dns_decision(CONFIG_SECTION_WEB, "allow badges by dns", "heuristic", web_allow_badges_from);
web_allow_registry_from =
- simple_pattern_create(config_get(CONFIG_SECTION_REGISTRY, "allow from", "*"), NULL, SIMPLE_PATTERN_EXACT);
+ simple_pattern_create(config_get(CONFIG_SECTION_REGISTRY, "allow from", "*"), NULL, SIMPLE_PATTERN_EXACT,
+ true);
web_allow_registry_dns = make_dns_decision(CONFIG_SECTION_REGISTRY, "allow by dns", "heuristic",
web_allow_registry_from);
web_allow_streaming_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow streaming from", "*"),
- NULL, SIMPLE_PATTERN_EXACT);
+ NULL, SIMPLE_PATTERN_EXACT, true);
web_allow_streaming_dns = make_dns_decision(CONFIG_SECTION_WEB, "allow streaming by dns", "heuristic",
web_allow_streaming_from);
// Note the default is not heuristic, the wildcards could match DNS but the intent is ip-addresses.
web_allow_netdataconf_from = simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow netdata.conf from",
- "localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.*"
- " 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.*"
- " 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.*"
- " 172.31.* UNKNOWN"), NULL, SIMPLE_PATTERN_EXACT);
+ "localhost fd* 10.* 192.168.* 172.16.* 172.17.* 172.18.*"
+ " 172.19.* 172.20.* 172.21.* 172.22.* 172.23.* 172.24.*"
+ " 172.25.* 172.26.* 172.27.* 172.28.* 172.29.* 172.30.*"
+ " 172.31.* UNKNOWN"), NULL, SIMPLE_PATTERN_EXACT,
+ true);
web_allow_netdataconf_dns =
make_dns_decision(CONFIG_SECTION_WEB, "allow netdata.conf by dns", "no", web_allow_netdataconf_from);
web_allow_mgmt_from =
- simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow management from", "localhost"),
- NULL, SIMPLE_PATTERN_EXACT);
+ simple_pattern_create(config_get(CONFIG_SECTION_WEB, "allow management from", "localhost"),
+ NULL, SIMPLE_PATTERN_EXACT, true);
web_allow_mgmt_dns =
make_dns_decision(CONFIG_SECTION_WEB, "allow management by dns","heuristic",web_allow_mgmt_from);
@@ -655,9 +664,14 @@ void cancel_main_threads() {
int i, found = 0;
usec_t max = 5 * USEC_PER_SEC, step = 100000;
for (i = 0; static_threads[i].name != NULL ; i++) {
- if(static_threads[i].enabled == NETDATA_MAIN_THREAD_RUNNING) {
- info("EXIT: Stopping main thread: %s", static_threads[i].name);
- netdata_thread_cancel(*static_threads[i].thread);
+ if (static_threads[i].enabled == NETDATA_MAIN_THREAD_RUNNING) {
+ if (static_threads[i].thread) {
+ info("EXIT: Stopping main thread: %s", static_threads[i].name);
+ netdata_thread_cancel(*static_threads[i].thread);
+ } else {
+ info("EXIT: No thread running (marking as EXITED): %s", static_threads[i].name);
+ static_threads[i].enabled = NETDATA_MAIN_THREAD_EXITED;
+ }
found++;
}
}
@@ -1107,8 +1121,12 @@ static void get_netdata_configured_variables() {
// get default Database Engine page cache size in MiB
default_rrdeng_page_cache_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine page cache size MB", default_rrdeng_page_cache_mb);
+ default_rrdeng_extent_cache_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine extent cache size MB", default_rrdeng_extent_cache_mb);
db_engine_journal_check = config_get_boolean(CONFIG_SECTION_DB, "dbengine enable journal integrity check", CONFIG_BOOLEAN_NO);
+ if(default_rrdeng_extent_cache_mb < 0)
+ default_rrdeng_extent_cache_mb = 0;
+
if(default_rrdeng_page_cache_mb < RRDENG_MIN_PAGE_CACHE_SIZE_MB) {
error("Invalid page cache size %d given. Defaulting to %d.", default_rrdeng_page_cache_mb, RRDENG_MIN_PAGE_CACHE_SIZE_MB);
default_rrdeng_page_cache_mb = RRDENG_MIN_PAGE_CACHE_SIZE_MB;
@@ -1314,9 +1332,12 @@ void post_conf_load(char **user)
prev_msg = msg; \
}
+int buffer_unittest(void);
int pgc_unittest(void);
int mrg_unittest(void);
int julytest(void);
+int pluginsd_parser_unittest(void);
+void replication_initialize(void);
int main(int argc, char **argv) {
// initialize the system clocks
@@ -1437,12 +1458,17 @@ int main(int argc, char **argv) {
if(strcmp(optarg, "unittest") == 0) {
unittest_running = true;
+ if (pluginsd_parser_unittest())
+ return 1;
+
if (unit_test_static_threads())
return 1;
if (unit_test_buffer())
return 1;
if (unit_test_str2ld())
return 1;
+ if (buffer_unittest())
+ return 1;
if (unit_test_bitmap256())
return 1;
// No call to load the config file on this code-path
@@ -1479,15 +1505,6 @@ int main(int argc, char **argv) {
else if(strcmp(optarg, "escapetest") == 0) {
return command_argument_sanitization_tests();
}
-#ifdef ENABLE_DBENGINE
- else if(strcmp(optarg, "mctest") == 0) {
- unittest_running = true;
- return mc_unittest();
- }
- else if(strcmp(optarg, "ctxtest") == 0) {
- unittest_running = true;
- return ctx_unittest();
- }
else if(strcmp(optarg, "dicttest") == 0) {
unittest_running = true;
return dictionary_unittest(10000);
@@ -1504,6 +1521,19 @@ int main(int argc, char **argv) {
unittest_running = true;
return rrdlabels_unittest();
}
+ else if(strcmp(optarg, "buffertest") == 0) {
+ unittest_running = true;
+ return buffer_unittest();
+ }
+#ifdef ENABLE_DBENGINE
+ else if(strcmp(optarg, "mctest") == 0) {
+ unittest_running = true;
+ return mc_unittest();
+ }
+ else if(strcmp(optarg, "ctxtest") == 0) {
+ unittest_running = true;
+ return ctx_unittest();
+ }
else if(strcmp(optarg, "metatest") == 0) {
unittest_running = true;
return metadata_unittest();
@@ -1523,6 +1553,14 @@ int main(int argc, char **argv) {
else if(strncmp(optarg, createdataset_string, strlen(createdataset_string)) == 0) {
optarg += strlen(createdataset_string);
unsigned history_seconds = strtoul(optarg, NULL, 0);
+ post_conf_load(&user);
+ get_netdata_configured_variables();
+ default_rrd_update_every = 1;
+ registry_init();
+ if(rrd_init("dbengine-dataset", NULL, true)) {
+ fprintf(stderr, "rrd_init failed for unittest\n");
+ return 1;
+ }
generate_dbengine_dataset(history_seconds);
return 0;
}
@@ -1587,12 +1625,16 @@ int main(int argc, char **argv) {
size_t len = strlen(needle) + 1;
char wildcarded[len];
- SIMPLE_PATTERN *p = simple_pattern_create(haystack, NULL, SIMPLE_PATTERN_EXACT);
- int ret = simple_pattern_matches_extract(p, needle, wildcarded, len);
+ SIMPLE_PATTERN *p = simple_pattern_create(haystack, NULL, SIMPLE_PATTERN_EXACT, true);
+ SIMPLE_PATTERN_RESULT ret = simple_pattern_matches_extract(p, needle, wildcarded, len);
simple_pattern_free(p);
- if(ret) {
- fprintf(stdout, "RESULT: MATCHED - pattern '%s' matches '%s', wildcarded '%s'\n", haystack, needle, wildcarded);
+ if(ret == SP_MATCHED_POSITIVE) {
+ fprintf(stdout, "RESULT: POSITIVE MATCHED - pattern '%s' matches '%s', wildcarded '%s'\n", haystack, needle, wildcarded);
+ return 0;
+ }
+ else if(ret == SP_MATCHED_NEGATIVE) {
+ fprintf(stdout, "RESULT: NEGATIVE MATCHED - pattern '%s' matches '%s', wildcarded '%s'\n", haystack, needle, wildcarded);
return 0;
}
else {
@@ -1801,7 +1843,7 @@ int main(int argc, char **argv) {
#endif
// set libuv worker threads
- libuv_worker_threads = (int)get_netdata_cpus() * 2;
+ libuv_worker_threads = (int)get_netdata_cpus() * 6;
if(libuv_worker_threads < MIN_LIBUV_WORKER_THREADS)
libuv_worker_threads = MIN_LIBUV_WORKER_THREADS;
@@ -1866,10 +1908,14 @@ int main(int argc, char **argv) {
// initialize the log files
open_all_log_files();
+ ieee754_doubles = is_system_ieee754_double();
+
aral_judy_init();
get_system_timezone();
+ replication_initialize();
+
// --------------------------------------------------------------------
// get the certificate and start security
@@ -1988,13 +2034,16 @@ int main(int argc, char **argv) {
struct rrdhost_system_info *system_info = callocz(1, sizeof(struct rrdhost_system_info));
__atomic_sub_fetch(&netdata_buffers_statistics.rrdhost_allocations_size, sizeof(struct rrdhost_system_info), __ATOMIC_RELAXED);
get_system_info(system_info);
+ (void) registry_get_this_machine_guid();
system_info->hops = 0;
get_install_type(&system_info->install_type, &system_info->prebuilt_arch, &system_info->prebuilt_dist);
delta_startup_time("initialize RRD structures");
- if(rrd_init(netdata_configured_hostname, system_info, false))
+ if(rrd_init(netdata_configured_hostname, system_info, false)) {
+ set_late_global_environment(system_info);
fatal("Cannot initialize localhost instance with name '%s'.", netdata_configured_hostname);
+ }
delta_startup_time("check for incomplete shutdown");
@@ -2036,8 +2085,7 @@ int main(int argc, char **argv) {
netdata_zero_metrics_enabled = config_get_boolean_ondemand(CONFIG_SECTION_DB, "enable zero metrics", CONFIG_BOOLEAN_NO);
- set_late_global_environment();
-
+ set_late_global_environment(system_info);
for (i = 0; static_threads[i].name != NULL ; i++) {
struct netdata_static_thread *st = &static_threads[i];
@@ -2048,6 +2096,7 @@ int main(int argc, char **argv) {
}
else debug(D_SYSTEM, "Not starting thread %s.", st->name);
}
+ ml_start_threads();
// ------------------------------------------------------------------------
// Initialize netdata agent command serving from cli and signals
@@ -2098,6 +2147,11 @@ int main(int argc, char **argv) {
#endif
// ------------------------------------------------------------------------
+ // initialize WebRTC
+
+ webrtc_initialize();
+
+ // ------------------------------------------------------------------------
// unblock signals
signals_unblock();
diff --git a/daemon/main.h b/daemon/main.h
index 8704d6097..3e32c5ad6 100644
--- a/daemon/main.h
+++ b/daemon/main.h
@@ -33,16 +33,15 @@ typedef enum {
ABILITY_STREAMING_CONNECTIONS = (1 << 2),
SERVICE_MAINTENANCE = (1 << 3),
SERVICE_COLLECTORS = (1 << 4),
- SERVICE_ML_TRAINING = (1 << 5),
- SERVICE_ML_PREDICTION = (1 << 6),
- SERVICE_REPLICATION = (1 << 7),
- SERVICE_WEB_SERVER = (1 << 8),
- SERVICE_ACLK = (1 << 9),
- SERVICE_HEALTH = (1 << 10),
- SERVICE_STREAMING = (1 << 11),
- SERVICE_CONTEXT = (1 << 12),
- SERVICE_ANALYTICS = (1 << 13),
- SERVICE_EXPORTERS = (1 << 14),
+ SERVICE_REPLICATION = (1 << 5),
+ SERVICE_WEB_SERVER = (1 << 6),
+ SERVICE_ACLK = (1 << 7),
+ SERVICE_HEALTH = (1 << 8),
+ SERVICE_STREAMING = (1 << 9),
+ SERVICE_CONTEXT = (1 << 10),
+ SERVICE_ANALYTICS = (1 << 11),
+ SERVICE_EXPORTERS = (1 << 12),
+ SERVICE_ACLKSYNC = (1 << 13)
} SERVICE_TYPE;
typedef enum {
diff --git a/daemon/service.c b/daemon/service.c
index 9761abd02..57c7c7f39 100644
--- a/daemon/service.c
+++ b/daemon/service.c
@@ -55,7 +55,7 @@ static void svc_rrddim_obsolete_to_archive(RRDDIM *rd) {
if(rd->tiers[tier].db_collection_handle) {
tiers_available++;
- if(rd->tiers[tier].collect_ops->finalize(rd->tiers[tier].db_collection_handle))
+ if(storage_engine_store_finalize(rd->tiers[tier].db_collection_handle))
tiers_said_no_retention++;
rd->tiers[tier].db_collection_handle = NULL;
diff --git a/daemon/system-info.sh b/daemon/system-info.sh
index 1e334a3d1..43f761c2d 100755
--- a/daemon/system-info.sh
+++ b/daemon/system-info.sh
@@ -96,6 +96,11 @@ if [ "${CONTAINER}" = "unknown" ]; then
CONT_DETECTION="dockerenv"
fi
+ if [ -n "${KUBERNETES_SERVICE_HOST}" ]; then
+ CONTAINER="container"
+ CONT_DETECTION="kubernetes"
+ fi
+
fi
# -------------------------------------------------------------------------------------------------
@@ -391,7 +396,7 @@ else
# These translate to the prefixs of files in `/dev` indicating the device type.
# They are sorted by lowest used device major number, with dynamically assigned ones at the end.
# We use this to look up device major numbers in `/proc/devices`
- device_names='hd sd mfm ad ftl pd nftl dasd intfl mmcblk ub xvd rfd vbd nvme virtblk blkext'
+ device_names='hd sd mfm ad ftl pd nftl dasd intfl mmcblk mmc ub xvd rfd vbd nvme virtblk blkext'
for name in ${device_names}; do
if grep -qE " ${name}\$" /proc/devices; then
@@ -457,7 +462,7 @@ if [ "${VIRTUALIZATION}" != "none" ] && command -v curl > /dev/null 2>&1; then
# Try GCE computeMetadata v1
if [ "${CLOUD_TYPE}" = "unknown" ]; then
- if [ -n "$(curl --fail -s --connect-timeout 1 -m 3 --noproxy "*" -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1")" ]; then
+ if curl --fail -s --connect-timeout 1 -m 3 --noproxy "*" -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1" | grep -sq computeMetadata; then
CLOUD_TYPE="GCP"
CLOUD_INSTANCE_TYPE="$(curl --fail -s --connect-timeout 1 -m 3 --noproxy "*" -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/machine-type")"
[ -n "$CLOUD_INSTANCE_TYPE" ] && CLOUD_INSTANCE_TYPE=$(basename "$CLOUD_INSTANCE_TYPE")
@@ -466,16 +471,15 @@ if [ "${VIRTUALIZATION}" != "none" ] && command -v curl > /dev/null 2>&1; then
fi
fi
- # TODO: needs to be tested in Microsoft Azure
# Try Azure IMDS
- # if [ "${CLOUD_TYPE}" = "unknown" ]; then
- # AZURE_IMDS_DATA="$(curl --fail -s -m 5 -H "Metadata: true" --noproxy "*" "http://169.254.169.254/metadata/instance?version=2021-10-01")"
- # if [ -n "${AZURE_IMDS_DATA}" ]; then
- # CLOUD_TYPE="Azure"
- # CLOUD_INSTANCE_TYPE="$(curl --fail -s -m 5 -H "Metadata: true" --noproxy "*" "http://169.254.169.254/metadata/instance/compute/vmSize?version=2021-10-01&format=text")"
- # CLOUD_INSTANCE_REGION="$(curl --fail -s -m 5 -H "Metadata: true" --noproxy "*" "http://169.254.169.254/metadata/instance/compute/location?version=2021-10-01&format=text")"
- # fi
- # fi
+ if [ "${CLOUD_TYPE}" = "unknown" ]; then
+ AZURE_IMDS_DATA="$(curl --fail -s --connect-timeout 1 -m 3 -H "Metadata: true" --noproxy "*" "http://169.254.169.254/metadata/instance?api-version=2021-10-01")"
+ if [ -n "${AZURE_IMDS_DATA}" ] && echo "${AZURE_IMDS_DATA}" | grep -sq azEnvironment; then
+ CLOUD_TYPE="Azure"
+ CLOUD_INSTANCE_TYPE="$(curl --fail -s --connect-timeout 1 -m 3 -H "Metadata: true" --noproxy "*" "http://169.254.169.254/metadata/instance/compute/vmSize?api-version=2021-10-01&format=text")"
+ CLOUD_INSTANCE_REGION="$(curl --fail -s --connect-timeout 1 -m 3 -H "Metadata: true" --noproxy "*" "http://169.254.169.254/metadata/instance/compute/location?api-version=2021-10-01&format=text")"
+ fi
+ fi
fi
fi
diff --git a/daemon/unit_test.c b/daemon/unit_test.c
index 52b55c4e5..fa3fa847f 100644
--- a/daemon/unit_test.c
+++ b/daemon/unit_test.c
@@ -68,17 +68,36 @@ static int check_number_printing(void) {
{ .n = 0.000000001, .correct = "0" },
{ .n = 99.99999999999999999, .correct = "100" },
{ .n = -99.99999999999999999, .correct = "-100" },
+ { .n = 123.4567899123456789, .correct = "123.4567899" },
{ .n = 123.4567890123456789, .correct = "123.456789" },
+ { .n = 123.4567800123456789, .correct = "123.45678" },
+ { .n = 123.4567000123456789, .correct = "123.4567" },
+ { .n = 123.4560000123456789, .correct = "123.456" },
+ { .n = 123.4500000123456789, .correct = "123.45" },
+ { .n = 123.4000000123456789, .correct = "123.4" },
+ { .n = 123.0000000123456789, .correct = "123" },
+ { .n = 123.0000000923456789, .correct = "123.0000001" },
+ { .n = 4294967295.123456789, .correct = "4294967295.123457" },
+ { .n = 8294967295.123456789, .correct = "8294967295.123457" },
+ { .n = 1.000000000000002e+19, .correct = "1.000000000000001998e+19" },
+ { .n = 9.2233720368547676e+18, .correct = "9.223372036854767584e+18" },
+ { .n = 18446744073709541376.0, .correct = "1.84467440737095424e+19" },
+ { .n = 18446744073709551616.0, .correct = "1.844674407370955136e+19" },
+ { .n = 12318446744073710600192.0, .correct = "1.231844674407371008e+22" },
+ { .n = 1677721499999999885312.0, .correct = "1.677721499999999872e+21" },
+ { .n = -1677721499999999885312.0, .correct = "-1.677721499999999872e+21" },
+ { .n = -1.677721499999999885312e40, .correct = "-1.677721499999999872e+40" },
+ { .n = -16777214999999997337621690403742592008192.0, .correct = "-1.677721499999999616e+40" },
{ .n = 9999.9999999, .correct = "9999.9999999" },
{ .n = -9999.9999999, .correct = "-9999.9999999" },
{ .n = 0, .correct = NULL },
};
- char netdata[50], system[50];
+ char netdata[512 + 2], system[512 + 2];
int i, failed = 0;
for(i = 0; values[i].correct ; i++) {
print_netdata_double(netdata, values[i].n);
- snprintfz(system, 49, "%0.12" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE)values[i].n);
+ snprintfz(system, 512, "%0.12" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE)values[i].n);
int ok = 1;
if(strcmp(netdata, values[i].correct) != 0) {
@@ -86,7 +105,18 @@ static int check_number_printing(void) {
failed++;
}
- fprintf(stderr, "'%s' (system) printed as '%s' (netdata): %s\n", system, netdata, ok?"OK":"FAILED");
+ NETDATA_DOUBLE parsed_netdata = str2ndd(netdata, NULL);
+ NETDATA_DOUBLE parsed_system = strtondd(netdata, NULL);
+
+ if(parsed_system != parsed_netdata)
+ failed++;
+
+ fprintf(stderr, "[%d]. '%s' (system) printed as '%s' (netdata): PRINT %s, "
+ "PARSED %0.12" NETDATA_DOUBLE_MODIFIER " (system), %0.12" NETDATA_DOUBLE_MODIFIER " (netdata): %s\n",
+ i,
+ system, netdata, ok?"OK":"FAILED",
+ parsed_system, parsed_netdata,
+ parsed_netdata == parsed_system ? "OK" : "FAILED");
}
if(failed) return 1;
@@ -395,9 +425,35 @@ int unit_test_storage() {
}
int unit_test_str2ld() {
+ is_system_ieee754_double();
+
char *values[] = {
- "1.2345678", "-35.6", "0.00123", "23842384234234.2", ".1", "1.2e-10",
- "hello", "1wrong", "nan", "inf", NULL
+ "1.2345678",
+ "-35.6",
+ "0.00123",
+ "23842384234234.2",
+ ".1",
+ "1.2e-10",
+ "18446744073709551616.0",
+ "18446744073709551616123456789123456789123456789123456789123456789123456789123456789.0",
+ "1.8446744073709551616123456789123456789123456789123456789123456789123456789123456789e+300",
+ "9.",
+ "9.e2",
+ "1.2e",
+ "1.2e+",
+ "1.2e-",
+ "1.2e0",
+ "1.2e-0",
+ "1.2e+0",
+ "-1.2e+1",
+ "-1.2e-1",
+ "1.2e1",
+ "1.2e400",
+ "hello",
+ "1wrong",
+ "nan",
+ "inf",
+ NULL
};
int i;
@@ -427,7 +483,8 @@ int unit_test_str2ld() {
}
if(e_mine != e_sys) {
- fprintf(stderr, "Value '%s' is parsed correctly, but endptr is not right\n", values[i]);
+ fprintf(stderr, "Value '%s' is parsed correctly, but endptr is not right (netdata returned %d, but system returned %d)\n",
+ values[i], (int)(e_mine - values[i]), (int)(e_sys - values[i]));
return -1;
}
@@ -1880,7 +1937,7 @@ static time_t test_dbengine_create_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS
// feed it with the test data
for (i = 0 ; i < CHARTS ; ++i) {
for (j = 0 ; j < DIMS ; ++j) {
- rd[i][j]->tiers[0].collect_ops->change_collection_frequency(rd[i][j]->tiers[0].db_collection_handle, update_every);
+ storage_engine_store_change_collection_frequency(rd[i][j]->tiers[0].db_collection_handle, update_every);
rd[i][j]->last_collected_time.tv_sec =
st[i]->last_collected_time.tv_sec = st[i]->last_updated.tv_sec = time_now;
@@ -1931,13 +1988,13 @@ static int test_dbengine_check_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DI
time_now = time_start + (c + 1) * update_every;
for (i = 0 ; i < CHARTS ; ++i) {
for (j = 0; j < DIMS; ++j) {
- rd[i][j]->tiers[0].query_ops->init(rd[i][j]->tiers[0].db_metric_handle, &handle, time_now, time_now + QUERY_BATCH * update_every, STORAGE_PRIORITY_NORMAL);
+ storage_engine_query_init(rd[i][j]->tiers[0].backend, rd[i][j]->tiers[0].db_metric_handle, &handle, time_now, time_now + QUERY_BATCH * update_every, STORAGE_PRIORITY_NORMAL);
for (k = 0; k < QUERY_BATCH; ++k) {
last = ((collected_number)i * DIMS) * REGION_POINTS[current_region] +
j * REGION_POINTS[current_region] + c + k;
expected = unpack_storage_number(pack_storage_number((NETDATA_DOUBLE)last, SN_DEFAULT_FLAGS));
- STORAGE_POINT sp = rd[i][j]->tiers[0].query_ops->next_metric(&handle);
+ STORAGE_POINT sp = storage_engine_query_next_metric(&handle);
value = sp.sum;
time_retrieved = sp.start_time_s;
end_time = sp.end_time_s;
@@ -1959,7 +2016,7 @@ static int test_dbengine_check_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DI
errors++;
}
}
- rd[i][j]->tiers[0].query_ops->finalize(&handle);
+ storage_engine_query_finalize(&handle);
}
}
}
@@ -2327,7 +2384,7 @@ void generate_dbengine_dataset(unsigned history_seconds)
}
freez(thread_info);
rrd_wrlock();
- rrdhost_free___while_having_rrd_wrlock(host, true);
+ rrdhost_free___while_having_rrd_wrlock(localhost, true);
rrd_unlock();
}
@@ -2387,13 +2444,13 @@ static void query_dbengine_chart(void *arg)
time_before = MIN(time_after + duration, time_max); /* up to 1 hour queries */
}
- rd->tiers[0].query_ops->init(rd->tiers[0].db_metric_handle, &handle, time_after, time_before, STORAGE_PRIORITY_NORMAL);
+ storage_engine_query_init(rd->tiers[0].backend, rd->tiers[0].db_metric_handle, &handle, time_after, time_before, STORAGE_PRIORITY_NORMAL);
++thread_info->queries_nr;
for (time_now = time_after ; time_now <= time_before ; time_now += update_every) {
generatedv = generate_dbengine_chart_value(i, j, time_now);
expected = unpack_storage_number(pack_storage_number((NETDATA_DOUBLE) generatedv, SN_DEFAULT_FLAGS));
- if (unlikely(rd->tiers[0].query_ops->is_finished(&handle))) {
+ if (unlikely(storage_engine_query_is_finished(&handle))) {
if (!thread_info->delete_old_data) { /* data validation only when we don't delete */
fprintf(stderr, " DB-engine stresstest %s/%s: at %lu secs, expecting value " NETDATA_DOUBLE_FORMAT
", found data gap, ### E R R O R ###\n",
@@ -2403,7 +2460,7 @@ static void query_dbengine_chart(void *arg)
break;
}
- STORAGE_POINT sp = rd->tiers[0].query_ops->next_metric(&handle);
+ STORAGE_POINT sp = storage_engine_query_next_metric(&handle);
value = sp.sum;
time_retrieved = sp.start_time_s;
end_time = sp.end_time_s;
@@ -2441,7 +2498,7 @@ static void query_dbengine_chart(void *arg)
}
}
}
- rd->tiers[0].query_ops->finalize(&handle);
+ storage_engine_query_finalize(&handle);
} while(!thread_info->done);
if(value_errors)