summaryrefslogtreecommitdiffstats
path: root/daemon
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-07-20 04:50:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-07-20 04:50:01 +0000
commitcd4377fab21e0f500bef7f06543fa848a039c1e0 (patch)
treeba00a55e430c052d6bed0b61c0f8bbe8ebedd313 /daemon
parentReleasing debian version 1.40.1-1. (diff)
downloadnetdata-cd4377fab21e0f500bef7f06543fa848a039c1e0.tar.xz
netdata-cd4377fab21e0f500bef7f06543fa848a039c1e0.zip
Merging upstream version 1.41.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'daemon')
-rw-r--r--daemon/analytics.c131
-rw-r--r--daemon/buildinfo.c1727
-rw-r--r--daemon/buildinfo.h2
-rw-r--r--daemon/commands.c92
-rw-r--r--daemon/commands.h6
-rw-r--r--daemon/common.c149
-rw-r--r--daemon/common.h29
-rw-r--r--daemon/config/README.md2
-rw-r--r--daemon/daemon.c162
-rw-r--r--daemon/event_loop.c7
-rw-r--r--daemon/global_statistics.c17
-rw-r--r--daemon/main.c275
-rw-r--r--daemon/metrics.csv254
-rw-r--r--daemon/pipename.c17
-rw-r--r--daemon/pipename.h8
-rw-r--r--daemon/service.c14
-rw-r--r--daemon/signals.c44
-rw-r--r--daemon/static_threads.c16
-rw-r--r--daemon/unit_test.c249
-rw-r--r--daemon/unit_test.h2
20 files changed, 2355 insertions, 848 deletions
diff --git a/daemon/analytics.c b/daemon/analytics.c
index 2689886b..9323c8e8 100644
--- a/daemon/analytics.c
+++ b/daemon/analytics.c
@@ -24,45 +24,45 @@ struct array_printer {
*/
void analytics_log_data(void)
{
- debug(D_ANALYTICS, "NETDATA_CONFIG_STREAM_ENABLED : [%s]", analytics_data.netdata_config_stream_enabled);
- debug(D_ANALYTICS, "NETDATA_CONFIG_MEMORY_MODE : [%s]", analytics_data.netdata_config_memory_mode);
- debug(D_ANALYTICS, "NETDATA_CONFIG_EXPORTING_ENABLED : [%s]", analytics_data.netdata_config_exporting_enabled);
- debug(D_ANALYTICS, "NETDATA_EXPORTING_CONNECTORS : [%s]", analytics_data.netdata_exporting_connectors);
- debug(D_ANALYTICS, "NETDATA_ALLMETRICS_PROMETHEUS_USED : [%s]", analytics_data.netdata_allmetrics_prometheus_used);
- debug(D_ANALYTICS, "NETDATA_ALLMETRICS_SHELL_USED : [%s]", analytics_data.netdata_allmetrics_shell_used);
- debug(D_ANALYTICS, "NETDATA_ALLMETRICS_JSON_USED : [%s]", analytics_data.netdata_allmetrics_json_used);
- debug(D_ANALYTICS, "NETDATA_DASHBOARD_USED : [%s]", analytics_data.netdata_dashboard_used);
- debug(D_ANALYTICS, "NETDATA_COLLECTORS : [%s]", analytics_data.netdata_collectors);
- debug(D_ANALYTICS, "NETDATA_COLLECTORS_COUNT : [%s]", analytics_data.netdata_collectors_count);
- debug(D_ANALYTICS, "NETDATA_BUILDINFO : [%s]", analytics_data.netdata_buildinfo);
- debug(D_ANALYTICS, "NETDATA_CONFIG_PAGE_CACHE_SIZE : [%s]", analytics_data.netdata_config_page_cache_size);
- debug(D_ANALYTICS, "NETDATA_CONFIG_MULTIDB_DISK_QUOTA : [%s]", analytics_data.netdata_config_multidb_disk_quota);
- debug(D_ANALYTICS, "NETDATA_CONFIG_HTTPS_ENABLED : [%s]", analytics_data.netdata_config_https_enabled);
- debug(D_ANALYTICS, "NETDATA_CONFIG_WEB_ENABLED : [%s]", analytics_data.netdata_config_web_enabled);
- debug(D_ANALYTICS, "NETDATA_CONFIG_RELEASE_CHANNEL : [%s]", analytics_data.netdata_config_release_channel);
- debug(D_ANALYTICS, "NETDATA_MIRRORED_HOST_COUNT : [%s]", analytics_data.netdata_mirrored_host_count);
- debug(D_ANALYTICS, "NETDATA_MIRRORED_HOSTS_REACHABLE : [%s]", analytics_data.netdata_mirrored_hosts_reachable);
- debug(D_ANALYTICS, "NETDATA_MIRRORED_HOSTS_UNREACHABLE : [%s]", analytics_data.netdata_mirrored_hosts_unreachable);
- debug(D_ANALYTICS, "NETDATA_NOTIFICATION_METHODS : [%s]", analytics_data.netdata_notification_methods);
- debug(D_ANALYTICS, "NETDATA_ALARMS_NORMAL : [%s]", analytics_data.netdata_alarms_normal);
- debug(D_ANALYTICS, "NETDATA_ALARMS_WARNING : [%s]", analytics_data.netdata_alarms_warning);
- debug(D_ANALYTICS, "NETDATA_ALARMS_CRITICAL : [%s]", analytics_data.netdata_alarms_critical);
- debug(D_ANALYTICS, "NETDATA_CHARTS_COUNT : [%s]", analytics_data.netdata_charts_count);
- debug(D_ANALYTICS, "NETDATA_METRICS_COUNT : [%s]", analytics_data.netdata_metrics_count);
- debug(D_ANALYTICS, "NETDATA_CONFIG_IS_PARENT : [%s]", analytics_data.netdata_config_is_parent);
- debug(D_ANALYTICS, "NETDATA_CONFIG_HOSTS_AVAILABLE : [%s]", analytics_data.netdata_config_hosts_available);
- debug(D_ANALYTICS, "NETDATA_HOST_CLOUD_AVAILABLE : [%s]", analytics_data.netdata_host_cloud_available);
- debug(D_ANALYTICS, "NETDATA_HOST_ACLK_AVAILABLE : [%s]", analytics_data.netdata_host_aclk_available);
- debug(D_ANALYTICS, "NETDATA_HOST_ACLK_PROTOCOL : [%s]", analytics_data.netdata_host_aclk_protocol);
- debug(D_ANALYTICS, "NETDATA_HOST_ACLK_IMPLEMENTATION : [%s]", analytics_data.netdata_host_aclk_implementation);
- debug(D_ANALYTICS, "NETDATA_HOST_AGENT_CLAIMED : [%s]", analytics_data.netdata_host_agent_claimed);
- debug(D_ANALYTICS, "NETDATA_HOST_CLOUD_ENABLED : [%s]", analytics_data.netdata_host_cloud_enabled);
- debug(D_ANALYTICS, "NETDATA_CONFIG_HTTPS_AVAILABLE : [%s]", analytics_data.netdata_config_https_available);
- debug(D_ANALYTICS, "NETDATA_INSTALL_TYPE : [%s]", analytics_data.netdata_install_type);
- debug(D_ANALYTICS, "NETDATA_PREBUILT_DISTRO : [%s]", analytics_data.netdata_prebuilt_distro);
- debug(D_ANALYTICS, "NETDATA_CONFIG_IS_PRIVATE_REGISTRY : [%s]", analytics_data.netdata_config_is_private_registry);
- debug(D_ANALYTICS, "NETDATA_CONFIG_USE_PRIVATE_REGISTRY: [%s]", analytics_data.netdata_config_use_private_registry);
- debug(D_ANALYTICS, "NETDATA_CONFIG_OOM_SCORE : [%s]", analytics_data.netdata_config_oom_score);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_CONFIG_STREAM_ENABLED : [%s]", analytics_data.netdata_config_stream_enabled);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_CONFIG_MEMORY_MODE : [%s]", analytics_data.netdata_config_memory_mode);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_CONFIG_EXPORTING_ENABLED : [%s]", analytics_data.netdata_config_exporting_enabled);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_EXPORTING_CONNECTORS : [%s]", analytics_data.netdata_exporting_connectors);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_ALLMETRICS_PROMETHEUS_USED : [%s]", analytics_data.netdata_allmetrics_prometheus_used);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_ALLMETRICS_SHELL_USED : [%s]", analytics_data.netdata_allmetrics_shell_used);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_ALLMETRICS_JSON_USED : [%s]", analytics_data.netdata_allmetrics_json_used);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_DASHBOARD_USED : [%s]", analytics_data.netdata_dashboard_used);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_COLLECTORS : [%s]", analytics_data.netdata_collectors);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_COLLECTORS_COUNT : [%s]", analytics_data.netdata_collectors_count);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_BUILDINFO : [%s]", analytics_data.netdata_buildinfo);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_CONFIG_PAGE_CACHE_SIZE : [%s]", analytics_data.netdata_config_page_cache_size);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_CONFIG_MULTIDB_DISK_QUOTA : [%s]", analytics_data.netdata_config_multidb_disk_quota);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_CONFIG_HTTPS_ENABLED : [%s]", analytics_data.netdata_config_https_enabled);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_CONFIG_WEB_ENABLED : [%s]", analytics_data.netdata_config_web_enabled);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_CONFIG_RELEASE_CHANNEL : [%s]", analytics_data.netdata_config_release_channel);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_MIRRORED_HOST_COUNT : [%s]", analytics_data.netdata_mirrored_host_count);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_MIRRORED_HOSTS_REACHABLE : [%s]", analytics_data.netdata_mirrored_hosts_reachable);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_MIRRORED_HOSTS_UNREACHABLE : [%s]", analytics_data.netdata_mirrored_hosts_unreachable);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_NOTIFICATION_METHODS : [%s]", analytics_data.netdata_notification_methods);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_ALARMS_NORMAL : [%s]", analytics_data.netdata_alarms_normal);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_ALARMS_WARNING : [%s]", analytics_data.netdata_alarms_warning);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_ALARMS_CRITICAL : [%s]", analytics_data.netdata_alarms_critical);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_CHARTS_COUNT : [%s]", analytics_data.netdata_charts_count);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_METRICS_COUNT : [%s]", analytics_data.netdata_metrics_count);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_CONFIG_IS_PARENT : [%s]", analytics_data.netdata_config_is_parent);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_CONFIG_HOSTS_AVAILABLE : [%s]", analytics_data.netdata_config_hosts_available);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_HOST_CLOUD_AVAILABLE : [%s]", analytics_data.netdata_host_cloud_available);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_HOST_ACLK_AVAILABLE : [%s]", analytics_data.netdata_host_aclk_available);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_HOST_ACLK_PROTOCOL : [%s]", analytics_data.netdata_host_aclk_protocol);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_HOST_ACLK_IMPLEMENTATION : [%s]", analytics_data.netdata_host_aclk_implementation);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_HOST_AGENT_CLAIMED : [%s]", analytics_data.netdata_host_agent_claimed);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_HOST_CLOUD_ENABLED : [%s]", analytics_data.netdata_host_cloud_enabled);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_CONFIG_HTTPS_AVAILABLE : [%s]", analytics_data.netdata_config_https_available);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_INSTALL_TYPE : [%s]", analytics_data.netdata_install_type);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_PREBUILT_DISTRO : [%s]", analytics_data.netdata_prebuilt_distro);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_CONFIG_IS_PRIVATE_REGISTRY : [%s]", analytics_data.netdata_config_is_private_registry);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_CONFIG_USE_PRIVATE_REGISTRY: [%s]", analytics_data.netdata_config_use_private_registry);
+ netdata_log_debug(D_ANALYTICS, "NETDATA_CONFIG_OOM_SCORE : [%s]", analytics_data.netdata_config_oom_score);
}
/*
@@ -314,7 +314,7 @@ void analytics_alarms_notifications(void)
sizeof(char) * (strlen(netdata_configured_primary_plugins_dir) + strlen("alarm-notify.sh dump_methods") + 2));
sprintf(script, "%s/%s", netdata_configured_primary_plugins_dir, "alarm-notify.sh");
if (unlikely(access(script, R_OK) != 0)) {
- info("Alarm notify script %s not found.", script);
+ netdata_log_info("Alarm notify script %s not found.", script);
freez(script);
return;
}
@@ -323,7 +323,7 @@ void analytics_alarms_notifications(void)
pid_t command_pid;
- debug(D_ANALYTICS, "Executing %s", script);
+ netdata_log_debug(D_ANALYTICS, "Executing %s", script);
BUFFER *b = buffer_create(1000, NULL);
int cnt = 0;
@@ -561,7 +561,7 @@ void analytics_main_cleanup(void *ptr)
struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
- debug(D_ANALYTICS, "Cleaning up...");
+ netdata_log_debug(D_ANALYTICS, "Cleaning up...");
analytics_free_data();
static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
@@ -581,7 +581,7 @@ void *analytics_main(void *ptr)
heartbeat_init(&hb);
usec_t step_ut = USEC_PER_SEC;
- debug(D_ANALYTICS, "Analytics thread starts");
+ netdata_log_debug(D_ANALYTICS, "Analytics thread starts");
//first delay after agent start
while (service_running(SERVICE_ANALYTICS) && likely(sec <= ANALYTICS_INIT_SLEEP_SEC)) {
@@ -632,6 +632,17 @@ static const char *verify_required_directory(const char *dir)
return dir;
}
+static const char *verify_or_create_required_directory(const char *dir) {
+ int result;
+
+ result = mkdir(dir, 0755);
+
+ if (result != 0 && errno != EEXIST)
+ fatal("Cannot create required directory '%s'", dir);
+
+ return verify_required_directory(dir);
+}
+
/*
* This is called after the rrdinit
* These values will be sent on the START event
@@ -646,7 +657,7 @@ void set_late_global_environment(struct rrdhost_system_info *system_info)
#else
analytics_set_data(
&analytics_data.netdata_host_cloud_enabled,
- appconfig_get_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", CONFIG_BOOLEAN_YES) ? "true" : "false");
+ appconfig_get_boolean_ondemand(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", netdata_cloud_enabled) ? "true" : "false");
#endif
#ifdef ENABLE_DBENGINE
@@ -698,13 +709,13 @@ void get_system_timezone(void)
// use the TZ variable
if (tz && *tz && *tz != ':') {
timezone = tz;
- info("TIMEZONE: using TZ variable '%s'", timezone);
+ netdata_log_info("TIMEZONE: using TZ variable '%s'", timezone);
}
// use the contents of /etc/timezone
if (!timezone && !read_file("/etc/timezone", buffer, FILENAME_MAX)) {
timezone = buffer;
- info("TIMEZONE: using the contents of /etc/timezone");
+ netdata_log_info("TIMEZONE: using the contents of /etc/timezone");
}
// read the link /etc/localtime
@@ -720,7 +731,7 @@ void get_system_timezone(void)
char *s = strstr(buffer, cmp);
if (s && s[cmp_len]) {
timezone = &s[cmp_len];
- info("TIMEZONE: using the link of /etc/localtime: '%s'", timezone);
+ netdata_log_info("TIMEZONE: using the link of /etc/localtime: '%s'", timezone);
}
} else
buffer[0] = '\0';
@@ -740,14 +751,14 @@ void get_system_timezone(void)
else {
buffer[FILENAME_MAX] = '\0';
timezone = buffer;
- info("TIMEZONE: using strftime(): '%s'", timezone);
+ netdata_log_info("TIMEZONE: using strftime(): '%s'", timezone);
}
}
}
if (timezone && *timezone) {
// make sure it does not have illegal characters
- // info("TIMEZONE: fixing '%s'", timezone);
+ // netdata_log_info("TIMEZONE: fixing '%s'", timezone);
size_t len = strlen(timezone);
char tmp[len + 1];
@@ -763,7 +774,7 @@ void get_system_timezone(void)
*d = '\0';
strncpyz(buffer, tmp, len);
timezone = buffer;
- info("TIMEZONE: fixed as '%s'", timezone);
+ netdata_log_info("TIMEZONE: fixed as '%s'", timezone);
}
if (!timezone || !*timezone)
@@ -827,11 +838,11 @@ void set_global_environment()
setenv("NETDATA_STOCK_CONFIG_DIR", verify_required_directory(netdata_configured_stock_config_dir), 1);
setenv("NETDATA_PLUGINS_DIR", verify_required_directory(netdata_configured_primary_plugins_dir), 1);
setenv("NETDATA_WEB_DIR", verify_required_directory(netdata_configured_web_dir), 1);
- setenv("NETDATA_CACHE_DIR", verify_required_directory(netdata_configured_cache_dir), 1);
- setenv("NETDATA_LIB_DIR", verify_required_directory(netdata_configured_varlib_dir), 1);
- setenv("NETDATA_LOCK_DIR", netdata_configured_lock_dir, 1);
- setenv("NETDATA_LOG_DIR", verify_required_directory(netdata_configured_log_dir), 1);
- setenv("HOME", verify_required_directory(netdata_configured_home_dir), 1);
+ setenv("NETDATA_CACHE_DIR", verify_or_create_required_directory(netdata_configured_cache_dir), 1);
+ setenv("NETDATA_LIB_DIR", verify_or_create_required_directory(netdata_configured_varlib_dir), 1);
+ setenv("NETDATA_LOCK_DIR", verify_or_create_required_directory(netdata_configured_lock_dir), 1);
+ setenv("NETDATA_LOG_DIR", verify_or_create_required_directory(netdata_configured_log_dir), 1);
+ setenv("HOME", verify_or_create_required_directory(netdata_configured_home_dir), 1);
setenv("NETDATA_HOST_PREFIX", netdata_configured_host_prefix, 1);
{
@@ -944,7 +955,7 @@ void send_statistics(const char *action, const char *action_result, const char *
sprintf(as_script, "%s/%s", netdata_configured_primary_plugins_dir, "anonymous-statistics.sh");
if (unlikely(access(as_script, R_OK) != 0)) {
netdata_anonymous_statistics_enabled = 0;
- info("Anonymous statistics script %s not found.", as_script);
+ netdata_log_info("Anonymous statistics script %s not found.", as_script);
freez(as_script);
} else {
netdata_anonymous_statistics_enabled = 1;
@@ -1015,7 +1026,7 @@ void send_statistics(const char *action, const char *action_result, const char *
analytics_data.netdata_config_oom_score,
analytics_data.netdata_prebuilt_distro);
- info("%s '%s' '%s' '%s'", as_script, action, action_result, action_data);
+ netdata_log_info("%s '%s' '%s' '%s'", as_script, action, action_result, action_data);
FILE *fp_child_input;
FILE *fp_child_output = netdata_popen(command_to_run, &command_pid, &fp_child_input);
@@ -1024,11 +1035,11 @@ void send_statistics(const char *action, const char *action_result, const char *
char *s = fgets(buffer, 4, fp_child_output);
int exit_code = netdata_pclose(fp_child_input, fp_child_output, command_pid);
if (exit_code)
- error("Execution of anonymous statistics script returned %d.", exit_code);
+ netdata_log_error("Execution of anonymous statistics script returned %d.", exit_code);
if (s && strncmp(buffer, "200", 3))
- error("Execution of anonymous statistics script returned http code %s.", buffer);
+ netdata_log_error("Execution of anonymous statistics script returned http code %s.", buffer);
} else {
- error("Failed to run anonymous statistics script %s.", as_script);
+ netdata_log_error("Failed to run anonymous statistics script %s.", as_script);
}
freez(command_to_run);
}
diff --git a/daemon/buildinfo.c b/daemon/buildinfo.c
index d277d2b3..56cde84f 100644
--- a/daemon/buildinfo.c
+++ b/daemon/buildinfo.c
@@ -5,209 +5,1301 @@
#include "common.h"
#include "buildinfo.h"
-// Optional features
+typedef enum __attribute__((packed)) {
+ BIB_PACKAGING_NETDATA_VERSION,
+ BIB_PACKAGING_INSTALL_TYPE,
+ BIB_PACKAGING_ARCHITECTURE,
+ BIB_PACKAGING_DISTRO,
+ BIB_PACKAGING_CONFIGURE_OPTIONS,
+ BIB_DIR_USER_CONFIG,
+ BIB_DIR_STOCK_CONFIG,
+ BIB_DIR_CACHE,
+ BIB_DIR_LIB,
+ BIB_DIR_PLUGINS,
+ BIB_DIR_WEB,
+ BIB_DIR_LOG,
+ BIB_DIR_LOCK,
+ BIB_DIR_HOME,
+ BIB_OS_KERNEL_NAME,
+ BIB_OS_KERNEL_VERSION,
+ BIB_OS_NAME,
+ BIB_OS_ID,
+ BIB_OS_ID_LIKE,
+ BIB_OS_VERSION,
+ BIB_OS_VERSION_ID,
+ BIB_OS_DETECTION,
+ BIB_HW_CPU_CORES,
+ BIB_HW_CPU_FREQUENCY,
+ BIB_HW_RAM_SIZE,
+ BIB_HW_DISK_SPACE,
+ BIB_HW_ARCHITECTURE,
+ BIB_HW_VIRTUALIZATION,
+ BIB_HW_VIRTUALIZATION_DETECTION,
+ BIB_CONTAINER_NAME,
+ BIB_CONTAINER_DETECTION,
+ BIB_CONTAINER_ORCHESTRATOR,
+ BIB_CONTAINER_OS_NAME,
+ BIB_CONTAINER_OS_ID,
+ BIB_CONTAINER_OS_ID_LIKE,
+ BIB_CONTAINER_OS_VERSION,
+ BIB_CONTAINER_OS_VERSION_ID,
+ BIB_CONTAINER_OS_DETECTION,
+ BIB_FEATURE_BUILT_FOR,
+ BIB_FEATURE_CLOUD,
+ BIB_FEATURE_HEALTH,
+ BIB_FEATURE_STREAMING,
+ BIB_FEATURE_REPLICATION,
+ BIB_FEATURE_STREAMING_COMPRESSION,
+ BIB_FEATURE_CONTEXTS,
+ BIB_FEATURE_TIERING,
+ BIB_FEATURE_ML,
+ BIB_DB_DBENGINE,
+ BIB_DB_ALLOC,
+ BIB_DB_RAM,
+ BIB_DB_MAP,
+ BIB_DB_SAVE,
+ BIB_DB_NONE,
+ BIB_CONNECTIVITY_ACLK,
+ BIB_CONNECTIVITY_HTTPD_STATIC,
+ BIB_CONNECTIVITY_HTTPD_H2O,
+ BIB_CONNECTIVITY_WEBRTC,
+ BIB_CONNECTIVITY_NATIVE_HTTPS,
+ BIB_CONNECTIVITY_TLS_HOST_VERIFY,
+ BIB_LIB_LZ4,
+ BIB_LIB_ZLIB,
+ BIB_LIB_JUDY,
+ BIB_LIB_DLIB,
+ BIB_LIB_PROTOBUF,
+ BIB_LIB_OPENSSL,
+ BIB_LIB_LIBDATACHANNEL,
+ BIB_LIB_JSONC,
+ BIB_LIB_LIBCAP,
+ BIB_LIB_LIBCRYPTO,
+ BIB_LIB_LIBM,
+ BIB_LIB_JEMALLOC,
+ BIB_LIB_TCMALLOC,
+ BIB_PLUGIN_APPS,
+ BIB_PLUGIN_LINUX_CGROUPS,
+ BIB_PLUGIN_LINUX_CGROUP_NETWORK,
+ BIB_PLUGIN_LINUX_PROC,
+ BIB_PLUGIN_LINUX_TC,
+ BIB_PLUGIN_LINUX_DISKSPACE,
+ BIB_PLUGIN_FREEBSD,
+ BIB_PLUGIN_MACOS,
+ BIB_PLUGIN_STATSD,
+ BIB_PLUGIN_TIMEX,
+ BIB_PLUGIN_IDLEJITTER,
+ BIB_PLUGIN_BASH,
+ BIB_PLUGIN_DEBUGFS,
+ BIB_PLUGIN_CUPS,
+ BIB_PLUGIN_EBPF,
+ BIB_PLUGIN_FREEIPMI,
+ BIB_PLUGIN_NFACCT,
+ BIB_PLUGIN_PERF,
+ BIB_PLUGIN_SLABINFO,
+ BIB_PLUGIN_XEN,
+ BIB_PLUGIN_XEN_VBD_ERROR,
+ BIB_EXPORT_AWS_KINESIS,
+ BIB_EXPORT_GCP_PUBSUB,
+ BIB_EXPORT_MONGOC,
+ BIB_EXPORT_PROMETHEUS_EXPORTER,
+ BIB_EXPORT_PROMETHEUS_REMOTE_WRITE,
+ BIB_EXPORT_GRAPHITE,
+ BIB_EXPORT_GRAPHITE_HTTP,
+ BIB_EXPORT_JSON,
+ BIB_EXPORT_JSON_HTTP,
+ BIB_EXPORT_OPENTSDB,
+ BIB_EXPORT_OPENTSDB_HTTP,
+ BIB_EXPORT_ALLMETRICS,
+ BIB_EXPORT_SHELL,
+ BIB_DEVEL_TRACE_ALLOCATIONS,
+ BIB_DEVELOPER_MODE,
+
+ // leave this last
+ BIB_TERMINATOR,
+} BUILD_INFO_SLOT;
+
+typedef enum __attribute__((packed)) {
+ BIC_PACKAGING,
+ BIC_DIRECTORIES,
+ BIC_OPERATING_SYSTEM,
+ BIC_HARDWARE,
+ BIC_CONTAINER,
+ BIC_FEATURE,
+ BIC_DATABASE,
+ BIC_CONNECTIVITY,
+ BIC_LIBS,
+ BIC_PLUGINS,
+ BIC_EXPORTERS,
+ BIC_DEBUG_DEVEL
+} BUILD_INFO_CATEGORY;
+
+typedef enum __attribute__((packed)) {
+ BIT_BOOLEAN,
+ BIT_STRING,
+} BUILD_INFO_TYPE;
+
+static struct {
+ BUILD_INFO_CATEGORY category;
+ BUILD_INFO_TYPE type;
+ const char *analytics;
+ const char *print;
+ const char *json;
+ bool status;
+ const char *value;
+} BUILD_INFO[] = {
+ [BIB_PACKAGING_NETDATA_VERSION] = {
+ .category = BIC_PACKAGING,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Netdata Version",
+ .json = "version",
+ .value = "unknown",
+ },
+ [BIB_PACKAGING_INSTALL_TYPE] = {
+ .category = BIC_PACKAGING,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Installation Type",
+ .json = "type",
+ .value = "unknown",
+ },
+ [BIB_PACKAGING_ARCHITECTURE] = {
+ .category = BIC_PACKAGING,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Package Architecture",
+ .json = "arch",
+ .value = "unknown",
+ },
+ [BIB_PACKAGING_DISTRO] = {
+ .category = BIC_PACKAGING,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Package Distro",
+ .json = "distro",
+ .value = "unknown",
+ },
+ [BIB_PACKAGING_CONFIGURE_OPTIONS] = {
+ .category = BIC_PACKAGING,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Configure Options",
+ .json = "configure",
+ .value = "unknown",
+ },
+ [BIB_DIR_USER_CONFIG] = {
+ .category = BIC_DIRECTORIES,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "User Configurations",
+ .json = "user_config",
+ .value = CONFIG_DIR,
+ },
+ [BIB_DIR_STOCK_CONFIG] = {
+ .category = BIC_DIRECTORIES,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Stock Configurations",
+ .json = "stock_config",
+ .value = LIBCONFIG_DIR,
+ },
+ [BIB_DIR_CACHE] = {
+ .category = BIC_DIRECTORIES,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Ephemeral Databases (metrics data, metadata)",
+ .json = "ephemeral_db",
+ .value = CACHE_DIR,
+ },
+ [BIB_DIR_LIB] = {
+ .category = BIC_DIRECTORIES,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Permanent Databases",
+ .json = "permanent_db",
+ .value = VARLIB_DIR,
+ },
+ [BIB_DIR_PLUGINS] = {
+ .category = BIC_DIRECTORIES,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Plugins",
+ .json = "plugins",
+ .value = PLUGINS_DIR,
+ },
+ [BIB_DIR_WEB] = {
+ .category = BIC_DIRECTORIES,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Static Web Files",
+ .json = "web",
+ .value = WEB_DIR,
+ },
+ [BIB_DIR_LOG] = {
+ .category = BIC_DIRECTORIES,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Log Files",
+ .json = "logs",
+ .value = LOG_DIR,
+ },
+ [BIB_DIR_LOCK] = {
+ .category = BIC_DIRECTORIES,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Lock Files",
+ .json = "locks",
+ .value = VARLIB_DIR "/lock",
+ },
+ [BIB_DIR_HOME] = {
+ .category = BIC_DIRECTORIES,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Home",
+ .json = "home",
+ .value = VARLIB_DIR,
+ },
+ [BIB_OS_KERNEL_NAME] = {
+ .category = BIC_OPERATING_SYSTEM,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Kernel",
+ .json = "kernel",
+ .value = "unknown",
+ },
+ [BIB_OS_KERNEL_VERSION] = {
+ .category = BIC_OPERATING_SYSTEM,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Kernel Version",
+ .json = "kernel_version",
+ .value = "unknown",
+ },
+ [BIB_OS_NAME] = {
+ .category = BIC_OPERATING_SYSTEM,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Operating System",
+ .json = "os",
+ .value = "unknown",
+ },
+ [BIB_OS_ID] = {
+ .category = BIC_OPERATING_SYSTEM,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Operating System ID",
+ .json = "id",
+ .value = "unknown",
+ },
+ [BIB_OS_ID_LIKE] = {
+ .category = BIC_OPERATING_SYSTEM,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Operating System ID Like",
+ .json = "id_like",
+ .value = "unknown",
+ },
+ [BIB_OS_VERSION] = {
+ .category = BIC_OPERATING_SYSTEM,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Operating System Version",
+ .json = "version",
+ .value = "unknown",
+ },
+ [BIB_OS_VERSION_ID] = {
+ .category = BIC_OPERATING_SYSTEM,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Operating System Version ID",
+ .json = "version_id",
+ .value = "unknown",
+ },
+ [BIB_OS_DETECTION] = {
+ .category = BIC_OPERATING_SYSTEM,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Detection",
+ .json = "detection",
+ .value = "unknown",
+ },
+ [BIB_HW_CPU_CORES] = {
+ .category = BIC_HARDWARE,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "CPU Cores",
+ .json = "cpu_cores",
+ .value = "unknown",
+ },
+ [BIB_HW_CPU_FREQUENCY] = {
+ .category = BIC_HARDWARE,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "CPU Frequency",
+ .json = "cpu_frequency",
+ .value = "unknown",
+ },
+ [BIB_HW_RAM_SIZE] = {
+ .category = BIC_HARDWARE,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "CPU Architecture",
+ .json = "cpu_architecture",
+ .value = "unknown",
+ },
+ [BIB_HW_DISK_SPACE] = {
+ .category = BIC_HARDWARE,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "RAM Bytes",
+ .json = "ram",
+ .value = "unknown",
+ },
+ [BIB_HW_ARCHITECTURE] = {
+ .category = BIC_HARDWARE,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Disk Capacity",
+ .json = "disk",
+ .value = "unknown",
+ },
+ [BIB_HW_VIRTUALIZATION] = {
+ .category = BIC_HARDWARE,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Virtualization Technology",
+ .json = "virtualization",
+ .value = "unknown",
+ },
+ [BIB_HW_VIRTUALIZATION_DETECTION] = {
+ .category = BIC_HARDWARE,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Virtualization Detection",
+ .json = "virtualization_detection",
+ .value = "unknown",
+ },
+ [BIB_CONTAINER_NAME] = {
+ .category = BIC_CONTAINER,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Container",
+ .json = "container",
+ .value = "unknown",
+ },
+ [BIB_CONTAINER_DETECTION] = {
+ .category = BIC_CONTAINER,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Container Detection",
+ .json = "container_detection",
+ .value = "unknown",
+ },
+ [BIB_CONTAINER_ORCHESTRATOR] = {
+ .category = BIC_CONTAINER,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Container Orchestrator",
+ .json = "orchestrator",
+ .value = "unknown",
+ },
+ [BIB_CONTAINER_OS_NAME] = {
+ .category = BIC_CONTAINER,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Container Operating System",
+ .json = "os",
+ .value = "unknown",
+ },
+ [BIB_CONTAINER_OS_ID] = {
+ .category = BIC_CONTAINER,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Container Operating System ID",
+ .json = "os_id",
+ .value = "unknown",
+ },
+ [BIB_CONTAINER_OS_ID_LIKE] = {
+ .category = BIC_CONTAINER,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Container Operating System ID Like",
+ .json = "os_id_like",
+ .value = "unknown",
+ },
+ [BIB_CONTAINER_OS_VERSION] = {
+ .category = BIC_CONTAINER,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Container Operating System Version",
+ .json = "version",
+ .value = "unknown",
+ },
+ [BIB_CONTAINER_OS_VERSION_ID] = {
+ .category = BIC_CONTAINER,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Container Operating System Version ID",
+ .json = "version_id",
+ .value = "unknown",
+ },
+ [BIB_CONTAINER_OS_DETECTION] = {
+ .category = BIC_CONTAINER,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Container Operating System Detection",
+ .json = "detection",
+ .value = "unknown",
+ },
+ [BIB_FEATURE_BUILT_FOR] = {
+ .category = BIC_FEATURE,
+ .type = BIT_STRING,
+ .analytics = NULL,
+ .print = "Built For",
+ .json = "built-for",
+ .value = "unknown",
+ },
+ [BIB_FEATURE_CLOUD] = {
+ .category = BIC_FEATURE,
+ .type = BIT_BOOLEAN,
+ .analytics = "Netdata Cloud",
+ .print = "Netdata Cloud",
+ .json = "cloud",
+ .value = NULL,
+ },
+ [BIB_FEATURE_HEALTH] = {
+ .category = BIC_FEATURE,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "Health (trigger alerts and send notifications)",
+ .json = "health",
+ .value = NULL,
+ },
+ [BIB_FEATURE_STREAMING] = {
+ .category = BIC_FEATURE,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "Streaming (stream metrics to parent Netdata servers)",
+ .json = "streaming",
+ .value = NULL,
+ },
+ [BIB_FEATURE_REPLICATION] = {
+ .category = BIC_FEATURE,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "Replication (fill the gaps of parent Netdata servers)",
+ .json = "replication",
+ .value = NULL,
+ },
+ [BIB_FEATURE_STREAMING_COMPRESSION] = {
+ .category = BIC_FEATURE,
+ .type = BIT_BOOLEAN,
+ .analytics = "Stream Compression",
+ .print = "Streaming and Replication Compression",
+ .json = "stream-compression",
+ .value = "none",
+ },
+ [BIB_FEATURE_CONTEXTS] = {
+ .category = BIC_FEATURE,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "Contexts (index all active and archived metrics)",
+ .json = "contexts",
+ .value = NULL,
+ },
+ [BIB_FEATURE_TIERING] = {
+ .category = BIC_FEATURE,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "Tiering (multiple dbs with different metrics resolution)",
+ .json = "tiering",
+ .value = TOSTRING(RRD_STORAGE_TIERS),
+ },
+ [BIB_FEATURE_ML] = {
+ .category = BIC_FEATURE,
+ .type = BIT_BOOLEAN,
+ .analytics = "Machine Learning",
+ .print = "Machine Learning",
+ .json = "ml",
+ .value = NULL,
+ },
+ [BIB_DB_DBENGINE] = {
+ .category = BIC_DATABASE,
+ .type = BIT_BOOLEAN,
+ .analytics = "dbengine",
+ .print = "dbengine",
+ .json = "dbengine",
+ .value = NULL,
+ },
+ [BIB_DB_ALLOC] = {
+ .category = BIC_DATABASE,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "alloc",
+ .json = "alloc",
+ .value = NULL,
+ },
+ [BIB_DB_RAM] = {
+ .category = BIC_DATABASE,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "ram",
+ .json = "ram",
+ .value = NULL,
+ },
+ [BIB_DB_MAP] = {
+ .category = BIC_DATABASE,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "map",
+ .json = "map",
+ .value = NULL,
+ },
+ [BIB_DB_SAVE] = {
+ .category = BIC_DATABASE,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "save",
+ .json = "save",
+ .value = NULL,
+ },
+ [BIB_DB_NONE] = {
+ .category = BIC_DATABASE,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "none",
+ .json = "none",
+ .value = NULL,
+ },
+ [BIB_CONNECTIVITY_ACLK] = {
+ .category = BIC_CONNECTIVITY,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "ACLK (Agent-Cloud Link: MQTT over WebSockets over TLS)",
+ .json = "aclk",
+ .value = NULL,
+ },
+ [BIB_CONNECTIVITY_HTTPD_STATIC] = {
+ .category = BIC_CONNECTIVITY,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "static (Netdata internal web server)",
+ .json = "static",
+ .value = NULL,
+ },
+ [BIB_CONNECTIVITY_HTTPD_H2O] = {
+ .category = BIC_CONNECTIVITY,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "h2o (web server)",
+ .json = "h2o",
+ .value = NULL,
+ },
+ [BIB_CONNECTIVITY_WEBRTC] = {
+ .category = BIC_CONNECTIVITY,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "WebRTC (experimental)",
+ .json = "webrtc",
+ .value = NULL,
+ },
+ [BIB_CONNECTIVITY_NATIVE_HTTPS] = {
+ .category = BIC_CONNECTIVITY,
+ .type = BIT_BOOLEAN,
+ .analytics = "Native HTTPS",
+ .print = "Native HTTPS (TLS Support)",
+ .json = "native-https",
+ .value = NULL,
+ },
+ [BIB_CONNECTIVITY_TLS_HOST_VERIFY] = {
+ .category = BIC_CONNECTIVITY,
+ .type = BIT_BOOLEAN,
+ .analytics = "TLS Host Verification",
+ .print = "TLS Host Verification",
+ .json = "tls-host-verify",
+ .value = NULL,
+ },
+ [BIB_LIB_LZ4] = {
+ .category = BIC_LIBS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "LZ4 (extremely fast lossless compression algorithm)",
+ .json = "lz4",
+ .value = NULL,
+ },
+ [BIB_LIB_ZLIB] = {
+ .category = BIC_LIBS,
+ .type = BIT_BOOLEAN,
+ .analytics = "zlib",
+ .print = "zlib (lossless data-compression library)",
+ .json = "zlib",
+ .value = NULL,
+ },
+ [BIB_LIB_JUDY] = {
+ .category = BIC_LIBS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "Judy (high-performance dynamic arrays and hashtables)",
+ .json = "judy",
+ .status = true,
+ .value = "bundled",
+ },
+ [BIB_LIB_DLIB] = {
+ .category = BIC_LIBS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "dlib (robust machine learning toolkit)",
+ .json = "dlib",
+ .value = NULL,
+ },
+ [BIB_LIB_PROTOBUF] = {
+ .category = BIC_LIBS,
+ .type = BIT_BOOLEAN,
+ .analytics = "protobuf",
+ .print = "protobuf (platform-neutral data serialization protocol)",
+ .json = "protobuf",
+ .value = NULL,
+ },
+ [BIB_LIB_OPENSSL] = {
+ .category = BIC_LIBS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "OpenSSL (cryptography)",
+ .json = "openssl",
+ .value = NULL,
+ },
+ [BIB_LIB_LIBDATACHANNEL] = {
+ .category = BIC_LIBS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "libdatachannel (stand-alone WebRTC data channels)",
+ .json = "libdatachannel",
+ .value = NULL,
+ },
+ [BIB_LIB_JSONC] = {
+ .category = BIC_LIBS,
+ .type = BIT_BOOLEAN,
+ .analytics = "JSON-C",
+ .print = "JSON-C (lightweight JSON manipulation)",
+ .json = "jsonc",
+ .value = NULL,
+ },
+ [BIB_LIB_LIBCAP] = {
+ .category = BIC_LIBS,
+ .type = BIT_BOOLEAN,
+ .analytics = "libcap",
+ .print = "libcap (Linux capabilities system operations)",
+ .json = "libcap",
+ .value = NULL,
+ },
+ [BIB_LIB_LIBCRYPTO] = {
+ .category = BIC_LIBS,
+ .type = BIT_BOOLEAN,
+ .analytics = "libcrypto",
+ .print = "libcrypto (cryptographic functions)",
+ .json = "libcrypto",
+ .value = NULL,
+ },
+ [BIB_LIB_LIBM] = {
+ .category = BIC_LIBS,
+ .type = BIT_BOOLEAN,
+ .analytics = "libm",
+ .print = "libm (mathematical functions)",
+ .json = "libm",
+ .value = NULL,
+ },
+ [BIB_LIB_JEMALLOC] = {
+ .category = BIC_LIBS,
+ .type = BIT_BOOLEAN,
+ .analytics = "jemalloc",
+ .print = "jemalloc",
+ .json = "jemalloc",
+ .value = NULL,
+ },
+ [BIB_LIB_TCMALLOC] = {
+ .category = BIC_LIBS,
+ .type = BIT_BOOLEAN,
+ .analytics = "tcmalloc",
+ .print = "TCMalloc",
+ .json = "tcmalloc",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_APPS] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = "apps",
+ .print = "apps (monitor processes)",
+ .json = "apps",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_LINUX_CGROUPS] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "cgroups (monitor containers and VMs)",
+ .json = "cgroups",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_LINUX_CGROUP_NETWORK] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = "cgroup Network Tracking",
+ .print = "cgroup-network (associate interfaces to CGROUPS)",
+ .json = "cgroup-network",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_LINUX_PROC] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "proc (monitor Linux systems)",
+ .json = "proc",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_LINUX_TC] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "tc (monitor Linux network QoS)",
+ .json = "tc",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_LINUX_DISKSPACE] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "diskspace (monitor Linux mount points)",
+ .json = "diskspace",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_FREEBSD] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "freebsd (monitor FreeBSD systems)",
+ .json = "freebsd",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_MACOS] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "macos (monitor MacOS systems)",
+ .json = "macos",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_STATSD] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "statsd (collect custom application metrics)",
+ .json = "statsd",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_TIMEX] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "timex (check system clock synchronization)",
+ .json = "timex",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_IDLEJITTER] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "idlejitter (check system latency and jitter)",
+ .json = "idlejitter",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_BASH] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "bash (support shell data collection jobs - charts.d)",
+ .json = "charts.d",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_DEBUGFS] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = "debugfs",
+ .print = "debugfs (kernel debugging metrics)",
+ .json = "debugfs",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_CUPS] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = "CUPS",
+ .print = "cups (monitor printers and print jobs)",
+ .json = "cups",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_EBPF] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = "EBPF",
+ .print = "ebpf (monitor system calls)",
+ .json = "ebpf",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_FREEIPMI] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = "IPMI",
+ .print = "freeipmi (monitor enterprise server H/W)",
+ .json = "freeipmi",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_NFACCT] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = "NFACCT",
+ .print = "nfacct (gather netfilter accounting)",
+ .json = "nfacct",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_PERF] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = "perf",
+ .print = "perf (collect kernel performance events)",
+ .json = "perf",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_SLABINFO] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = "slabinfo",
+ .print = "slabinfo (monitor kernel object caching)",
+ .json = "slabinfo",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_XEN] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = "Xen",
+ .print = "Xen",
+ .json = "xen",
+ .value = NULL,
+ },
+ [BIB_PLUGIN_XEN_VBD_ERROR] = {
+ .category = BIC_PLUGINS,
+ .type = BIT_BOOLEAN,
+ .analytics = "Xen VBD Error Tracking",
+ .print = "Xen VBD Error Tracking",
+ .json = "xen-vbd-error",
+ .value = NULL,
+ },
+ [BIB_EXPORT_MONGOC] = {
+ .category = BIC_EXPORTERS,
+ .type = BIT_BOOLEAN,
+ .analytics = "MongoDB",
+ .print = "MongoDB",
+ .json = "mongodb",
+ .value = NULL,
+ },
+ [BIB_EXPORT_GRAPHITE] = {
+ .category = BIC_EXPORTERS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "Graphite",
+ .json = "graphite",
+ .value = NULL,
+ },
+ [BIB_EXPORT_GRAPHITE_HTTP] = {
+ .category = BIC_EXPORTERS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "Graphite HTTP / HTTPS",
+ .json = "graphite:http",
+ .value = NULL,
+ },
+ [BIB_EXPORT_JSON] = {
+ .category = BIC_EXPORTERS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "JSON",
+ .json = "json",
+ .value = NULL,
+ },
+ [BIB_EXPORT_JSON_HTTP] = {
+ .category = BIC_EXPORTERS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "JSON HTTP / HTTPS",
+ .json = "json:http",
+ .value = NULL,
+ },
+ [BIB_EXPORT_OPENTSDB] = {
+ .category = BIC_EXPORTERS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "OpenTSDB",
+ .json = "opentsdb",
+ .value = NULL,
+ },
+ [BIB_EXPORT_OPENTSDB_HTTP] = {
+ .category = BIC_EXPORTERS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "OpenTSDB HTTP / HTTPS",
+ .json = "opentsdb:http",
+ .value = NULL,
+ },
+ [BIB_EXPORT_ALLMETRICS] = {
+ .category = BIC_EXPORTERS,
+ .analytics = NULL,
+ .type = BIT_BOOLEAN,
+ .print = "All Metrics API",
+ .json = "allmetrics",
+ .value = NULL,
+ },
+ [BIB_EXPORT_SHELL] = {
+ .category = BIC_EXPORTERS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "Shell (use metrics in shell scripts)",
+ .json = "shell",
+ .value = NULL,
+ },
+ [BIB_EXPORT_PROMETHEUS_EXPORTER] = {
+ .category = BIC_EXPORTERS,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "Prometheus (OpenMetrics) Exporter",
+ .json = "openmetrics",
+ .value = NULL,
+ },
+ [BIB_EXPORT_PROMETHEUS_REMOTE_WRITE] = {
+ .category = BIC_EXPORTERS,
+ .type = BIT_BOOLEAN,
+ .analytics = "Prometheus Remote Write",
+ .print = "Prometheus Remote Write",
+ .json = "prom-remote-write",
+ .value = NULL,
+ },
+ [BIB_EXPORT_AWS_KINESIS] = {
+ .category = BIC_EXPORTERS,
+ .type = BIT_BOOLEAN,
+ .analytics = "AWS Kinesis",
+ .print = "AWS Kinesis",
+ .json = "kinesis",
+ .value = NULL,
+ },
+ [BIB_EXPORT_GCP_PUBSUB] = {
+ .category = BIC_EXPORTERS,
+ .type = BIT_BOOLEAN,
+ .analytics = "GCP PubSub",
+ .print = "GCP PubSub",
+ .json = "pubsub",
+ .value = NULL,
+ },
+ [BIB_DEVEL_TRACE_ALLOCATIONS] = {
+ .category = BIC_DEBUG_DEVEL,
+ .type = BIT_BOOLEAN,
+ .analytics = "DebugTraceAlloc",
+ .print = "Trace All Netdata Allocations (with charts)",
+ .json = "trace-allocations",
+ .value = NULL,
+ },
+ [BIB_DEVELOPER_MODE] = {
+ .category = BIC_DEBUG_DEVEL,
+ .type = BIT_BOOLEAN,
+ .analytics = NULL,
+ .print = "Developer Mode (more runtime checks, slower)",
+ .json = "dev-mode",
+ .value = NULL,
+ },
+
+ // leave this last
+ [BIB_TERMINATOR] = {
+ .category = 0,
+ .type = 0,
+ .analytics = NULL,
+ .print = NULL,
+ .json = NULL,
+ .value = NULL,
+ },
+};
+
+static void build_info_set_value(BUILD_INFO_SLOT slot, const char *value) {
+ BUILD_INFO[slot].value = value;
+}
+
+static void build_info_set_value_strdupz(BUILD_INFO_SLOT slot, const char *value) {
+ if(!value) value = "";
+ build_info_set_value(slot, strdupz(value));
+}
+
+static void build_info_set_status(BUILD_INFO_SLOT slot, bool status) {
+ BUILD_INFO[slot].status = status;
+}
+
+__attribute__((constructor)) void initialize_build_info(void) {
+ build_info_set_value(BIB_PACKAGING_NETDATA_VERSION, program_version);
+ build_info_set_value(BIB_PACKAGING_CONFIGURE_OPTIONS, CONFIGURE_COMMAND);
+
+#ifdef COMPILED_FOR_LINUX
+ build_info_set_status(BIB_FEATURE_BUILT_FOR, true);
+ build_info_set_value(BIB_FEATURE_BUILT_FOR, "Linux");
+ build_info_set_status(BIB_PLUGIN_LINUX_CGROUPS, true);
+ build_info_set_status(BIB_PLUGIN_LINUX_PROC, true);
+ build_info_set_status(BIB_PLUGIN_LINUX_DISKSPACE, true);
+ build_info_set_status(BIB_PLUGIN_LINUX_TC, true);
+#endif
+#ifdef COMPILED_FOR_FREEBSD
+ build_info_set_status(BIB_FEATURE_BUILT_FOR, true);
+ build_info_set_value(BIB_FEATURE_BUILT_FOR, "FreeBSD");
+ build_info_set_status(BIB_PLUGIN_FREEBSD, true);
+#endif
+#ifdef COMPILED_FOR_MACOS
+ build_info_set_status(BIB_FEATURE_BUILT_FOR, true);
+ build_info_set_value(BIB_FEATURE_BUILT_FOR, "MacOS");
+ build_info_set_status(BIB_PLUGIN_MACOS, true);
+#endif
#ifdef ENABLE_ACLK
-#define FEAT_CLOUD 1
-#define FEAT_CLOUD_MSG ""
+ build_info_set_status(BIB_FEATURE_CLOUD, true);
+ build_info_set_status(BIB_CONNECTIVITY_ACLK, true);
#else
+ build_info_set_status(BIB_FEATURE_CLOUD, false);
#ifdef DISABLE_CLOUD
-#define FEAT_CLOUD 0
-#define FEAT_CLOUD_MSG "(by user request)"
+ build_info_set_value(BIB_FEATURE_CLOUD, "disabled");
#else
-#define FEAT_CLOUD 0
-#define FEAT_CLOUD_MSG ""
+ build_info_set_value(BIB_FEATURE_CLOUD, "unavailable");
#endif
#endif
-#ifdef ENABLE_HTTPD
-#define FEAT_HTTPD 1
-#else
-#define FEAT_HTTPD 0
-#endif
+ build_info_set_status(BIB_FEATURE_HEALTH, true);
+ build_info_set_status(BIB_FEATURE_STREAMING, true);
+ build_info_set_status(BIB_FEATURE_REPLICATION, true);
-#ifdef ENABLE_DBENGINE
-#define FEAT_DBENGINE 1
-#else
-#define FEAT_DBENGINE 0
+#ifdef ENABLE_RRDPUSH_COMPRESSION
+ build_info_set_status(BIB_FEATURE_STREAMING_COMPRESSION, true);
+#ifdef ENABLE_LZ4
+ build_info_set_value(BIB_FEATURE_STREAMING_COMPRESSION, "lz4");
+#endif
#endif
-#if defined(HAVE_X509_VERIFY_PARAM_set1_host) && HAVE_X509_VERIFY_PARAM_set1_host == 1
-#define FEAT_TLS_HOST_VERIFY 1
-#else
-#define FEAT_TLS_HOST_VERIFY 0
+ build_info_set_status(BIB_FEATURE_CONTEXTS, true);
+ build_info_set_status(BIB_FEATURE_TIERING, true);
+
+#ifdef ENABLE_ML
+ build_info_set_status(BIB_FEATURE_ML, true);
#endif
+#ifdef ENABLE_DBENGINE
+ build_info_set_status(BIB_DB_DBENGINE, true);
+#endif
+ build_info_set_status(BIB_DB_ALLOC, true);
+ build_info_set_status(BIB_DB_RAM, true);
+ build_info_set_status(BIB_DB_MAP, true);
+ build_info_set_status(BIB_DB_SAVE, true);
+ build_info_set_status(BIB_DB_NONE, true);
+
+ build_info_set_status(BIB_CONNECTIVITY_HTTPD_STATIC, true);
+#ifdef ENABLE_H2O
+ build_info_set_status(BIB_CONNECTIVITY_HTTPD_H2O, true);
+#endif
+#ifdef ENABLE_WEBRTC
+ build_info_set_status(BIB_CONNECTIVITY_WEBRTC, true);
+#endif
#ifdef ENABLE_HTTPS
-#define FEAT_NATIVE_HTTPS 1
-#else
-#define FEAT_NATIVE_HTTPS 0
+ build_info_set_status(BIB_CONNECTIVITY_NATIVE_HTTPS, true);
#endif
-
-#ifdef ENABLE_ML
-#define FEAT_ML 1
-#else
-#define FEAT_ML 0
+#if defined(HAVE_X509_VERIFY_PARAM_set1_host) && HAVE_X509_VERIFY_PARAM_set1_host == 1
+ build_info_set_status(BIB_CONNECTIVITY_TLS_HOST_VERIFY, true);
#endif
-#ifdef ENABLE_COMPRESSION
-#define FEAT_STREAM_COMPRESSION 1
-#else
-#define FEAT_STREAM_COMPRESSION 0
-#endif //ENABLE_COMPRESSION
+#ifdef ENABLE_LZ4
+ build_info_set_status(BIB_LIB_LZ4, true);
+#endif
+ build_info_set_status(BIB_LIB_ZLIB, true);
-// Optional libraries
+#ifdef HAVE_DLIB
+ build_info_set_status(BIB_LIB_DLIB, true);
+ build_info_set_value(BIB_LIB_DLIB, "bundled");
+#endif
#ifdef HAVE_PROTOBUF
-#define FEAT_PROTOBUF 1
+ build_info_set_status(BIB_LIB_PROTOBUF, true);
#ifdef BUNDLED_PROTOBUF
-#define FEAT_PROTOBUF_BUNDLED " (bundled)"
+ build_info_set_value(BIB_LIB_PROTOBUF, "bundled");
#else
-#define FEAT_PROTOBUF_BUNDLED " (system)"
+ build_info_set_value(BIB_LIB_PROTOBUF, "system");
#endif
-#else
-#define FEAT_PROTOBUF 0
-#define FEAT_PROTOBUF_BUNDLED ""
#endif
-#ifdef ENABLE_JSONC
-#define FEAT_JSONC 1
-#else
-#define FEAT_JSONC 0
+#ifdef HAVE_LIBDATACHANNEL
+ build_info_set_status(BIB_LIB_LIBDATACHANNEL, true);
#endif
-
-#ifdef ENABLE_JEMALLOC
-#define FEAT_JEMALLOC 1
-#else
-#define FEAT_JEMALLOC 0
+#ifdef ENABLE_OPENSSL
+ build_info_set_status(BIB_LIB_OPENSSL, true);
#endif
-
-#ifdef ENABLE_TCMALLOC
-#define FEAT_TCMALLOC 1
-#else
-#define FEAT_TCMALLOC 0
+#ifdef ENABLE_JSONC
+ build_info_set_status(BIB_LIB_JSONC, true);
#endif
-
#ifdef HAVE_CAPABILITY
-#define FEAT_LIBCAP 1
-#else
-#define FEAT_LIBCAP 0
-#endif
-
-#ifdef STORAGE_WITH_MATH
-#define FEAT_LIBM 1
-#else
-#define FEAT_LIBM 0
+ build_info_set_status(BIB_LIB_LIBCAP, true);
#endif
-
#ifdef HAVE_CRYPTO
-#define FEAT_CRYPTO 1
-#else
-#define FEAT_CRYPTO 0
+ build_info_set_status(BIB_LIB_LIBCRYPTO, true);
#endif
-
-// Optional plugins
-
-#ifdef ENABLE_APPS_PLUGIN
-#define FEAT_APPS_PLUGIN 1
-#else
-#define FEAT_APPS_PLUGIN 0
+#ifdef STORAGE_WITH_MATH
+ build_info_set_status(BIB_LIB_LIBM, true);
#endif
-
-#ifdef ENABLE_DEBUGFS_PLUGIN
-#define FEAT_DEBUGFS_PLUGIN 1
-#else
-#define FEAT_DEBUGFS_PLUGIN 0
+#ifdef ENABLE_JEMALLOC
+ build_info_set_status(BIB_LIB_JEMALLOC, true);
#endif
-
-#ifdef HAVE_FREEIPMI
-#define FEAT_IPMI 1
-#else
-#define FEAT_IPMI 0
+#ifdef ENABLE_TCMALLOC
+ build_info_set_status(BIB_LIB_TCMALLOC, true);
#endif
-#ifdef HAVE_CUPS
-#define FEAT_CUPS 1
-#else
-#define FEAT_CUPS 0
+#ifdef ENABLE_APPS_PLUGIN
+ build_info_set_status(BIB_PLUGIN_APPS, true);
#endif
-
-#ifdef HAVE_NFACCT
-#define FEAT_NFACCT 1
-#else
-#define FEAT_NFACCT 0
+#ifdef HAVE_SETNS
+ build_info_set_status(BIB_PLUGIN_LINUX_CGROUP_NETWORK, true);
#endif
-#ifdef HAVE_LIBXENSTAT
-#define FEAT_XEN 1
-#else
-#define FEAT_XEN 0
-#endif
+ build_info_set_status(BIB_PLUGIN_STATSD, true);
+ build_info_set_status(BIB_PLUGIN_TIMEX, true);
+ build_info_set_status(BIB_PLUGIN_IDLEJITTER, true);
+ build_info_set_status(BIB_PLUGIN_BASH, true);
-#ifdef HAVE_XENSTAT_VBD_ERROR
-#define FEAT_XEN_VBD_ERROR 1
-#else
-#define FEAT_XEN_VBD_ERROR 0
+#ifdef ENABLE_DEBUGFS_PLUGIN
+ build_info_set_status(BIB_PLUGIN_DEBUGFS, true);
+#endif
+#ifdef HAVE_CUPS
+ build_info_set_status(BIB_PLUGIN_CUPS, true);
#endif
-
#ifdef HAVE_LIBBPF
-#define FEAT_EBPF 1
-#else
-#define FEAT_EBPF 0
+ build_info_set_status(BIB_PLUGIN_EBPF, true);
#endif
-
-#ifdef HAVE_SETNS
-#define FEAT_CGROUP_NET 1
-#else
-#define FEAT_CGROUP_NET 0
+#ifdef HAVE_FREEIPMI
+ build_info_set_status(BIB_PLUGIN_FREEIPMI, true);
+#endif
+#ifdef HAVE_NFACCT
+ build_info_set_status(BIB_PLUGIN_NFACCT, true);
#endif
-
#ifdef ENABLE_PERF_PLUGIN
-#define FEAT_PERF 1
-#else
-#define FEAT_PERF 0
+ build_info_set_status(BIB_PLUGIN_PERF, true);
#endif
-
#ifdef ENABLE_SLABINFO
-#define FEAT_SLABINFO 1
-#else
-#define FEAT_SLABINFO 0
+ build_info_set_status(BIB_PLUGIN_SLABINFO, true);
+#endif
+#ifdef HAVE_LIBXENSTAT
+ build_info_set_status(BIB_PLUGIN_XEN, true);
+#endif
+#ifdef HAVE_XENSTAT_VBD_ERROR
+ build_info_set_status(BIB_PLUGIN_XEN_VBD_ERROR, true);
#endif
-// Optional Exporters
+ build_info_set_status(BIB_EXPORT_PROMETHEUS_EXPORTER, true);
+ build_info_set_status(BIB_EXPORT_GRAPHITE, true);
+ build_info_set_status(BIB_EXPORT_GRAPHITE_HTTP, true);
+ build_info_set_status(BIB_EXPORT_JSON, true);
+ build_info_set_status(BIB_EXPORT_JSON_HTTP, true);
+ build_info_set_status(BIB_EXPORT_OPENTSDB, true);
+ build_info_set_status(BIB_EXPORT_OPENTSDB_HTTP, true);
+ build_info_set_status(BIB_EXPORT_ALLMETRICS, true);
+ build_info_set_status(BIB_EXPORT_SHELL, true);
#ifdef HAVE_KINESIS
-#define FEAT_KINESIS 1
-#else
-#define FEAT_KINESIS 0
+ build_info_set_status(BIB_EXPORT_AWS_KINESIS, true);
#endif
-
#ifdef ENABLE_EXPORTING_PUBSUB
-#define FEAT_PUBSUB 1
-#else
-#define FEAT_PUBSUB 0
+ build_info_set_status(BIB_EXPORT_GCP_PUBSUB, true);
#endif
-
#ifdef HAVE_MONGOC
-#define FEAT_MONGO 1
-#else
-#define FEAT_MONGO 0
+ build_info_set_status(BIB_EXPORT_MONGOC, true);
#endif
-
#ifdef ENABLE_PROMETHEUS_REMOTE_WRITE
-#define FEAT_REMOTE_WRITE 1
-#else
-#define FEAT_REMOTE_WRITE 0
+ build_info_set_status(BIB_EXPORT_PROMETHEUS_REMOTE_WRITE, true);
#endif
-#define FEAT_YES_NO(x) ((x) ? "YES" : "NO")
-
#ifdef NETDATA_TRACE_ALLOCATIONS
-#define FEAT_TRACE_ALLOC 1
-#else
-#define FEAT_TRACE_ALLOC 0
+ build_info_set_status(BIB_DEVEL_TRACE_ALLOCATIONS, true);
+#endif
+
+#if defined(NETDATA_DEV_MODE) || defined(NETDATA_INTERNAL_CHECKS)
+ build_info_set_status(BIB_DEVELOPER_MODE, true);
#endif
+}
+
+// ----------------------------------------------------------------------------
+// system info
+
+int get_system_info(struct rrdhost_system_info *system_info, bool log);
+static void populate_system_info(void) {
+ static bool populated = false;
+ static SPINLOCK spinlock = NETDATA_SPINLOCK_INITIALIZER;
+
+ if(populated)
+ return;
+
+ spinlock_lock(&spinlock);
+
+ if(populated) {
+ spinlock_unlock(&spinlock);
+ return;
+ }
+
+ struct rrdhost_system_info *system_info;
+ bool free_system_info = false;
+
+ if(localhost && localhost->system_info) {
+ system_info = localhost->system_info;
+ }
+ else {
+ system_info = callocz(1, sizeof(struct rrdhost_system_info));
+ get_system_info(system_info, false);
+ free_system_info = true;
+ }
+
+ build_info_set_value_strdupz(BIB_OS_KERNEL_NAME, system_info->kernel_name);
+ build_info_set_value_strdupz(BIB_OS_KERNEL_VERSION, system_info->kernel_version);
+ build_info_set_value_strdupz(BIB_OS_NAME, system_info->host_os_name);
+ build_info_set_value_strdupz(BIB_OS_ID, system_info->host_os_id);
+ build_info_set_value_strdupz(BIB_OS_ID_LIKE, system_info->host_os_id_like);
+ build_info_set_value_strdupz(BIB_OS_VERSION, system_info->host_os_version);
+ build_info_set_value_strdupz(BIB_OS_VERSION_ID, system_info->container_os_version_id);
+ build_info_set_value_strdupz(BIB_OS_DETECTION, system_info->host_os_detection);
+ build_info_set_value_strdupz(BIB_HW_CPU_CORES, system_info->host_cores);
+ build_info_set_value_strdupz(BIB_HW_CPU_FREQUENCY, system_info->host_cpu_freq);
+ build_info_set_value_strdupz(BIB_HW_RAM_SIZE, system_info->host_ram_total);
+ build_info_set_value_strdupz(BIB_HW_DISK_SPACE, system_info->host_disk_space);
+ build_info_set_value_strdupz(BIB_HW_ARCHITECTURE, system_info->architecture);
+ build_info_set_value_strdupz(BIB_HW_VIRTUALIZATION, system_info->virtualization);
+ build_info_set_value_strdupz(BIB_HW_VIRTUALIZATION_DETECTION, system_info->virt_detection);
+ build_info_set_value_strdupz(BIB_CONTAINER_NAME, system_info->container);
+ build_info_set_value_strdupz(BIB_CONTAINER_DETECTION, system_info->container_detection);
+
+ if(system_info->is_k8s_node && !strcmp(system_info->is_k8s_node, "true"))
+ build_info_set_value_strdupz(BIB_CONTAINER_ORCHESTRATOR, "kubernetes");
+ else
+ build_info_set_value_strdupz(BIB_CONTAINER_ORCHESTRATOR, "none");
+
+ build_info_set_value_strdupz(BIB_CONTAINER_OS_NAME, system_info->container_os_name);
+ build_info_set_value_strdupz(BIB_CONTAINER_OS_ID, system_info->container_os_id);
+ build_info_set_value_strdupz(BIB_CONTAINER_OS_ID_LIKE, system_info->container_os_id_like);
+ build_info_set_value_strdupz(BIB_CONTAINER_OS_VERSION, system_info->container_os_version);
+ build_info_set_value_strdupz(BIB_CONTAINER_OS_VERSION_ID, system_info->container_os_version_id);
+ build_info_set_value_strdupz(BIB_CONTAINER_OS_DETECTION, system_info->container_os_detection);
+
+ if(free_system_info)
+ rrdhost_system_info_free(system_info);
+
+ populated = true;
+ spinlock_unlock(&spinlock);
+}
+
+// ----------------------------------------------------------------------------
+// packaging info
char *get_value_from_key(char *buffer, char *key) {
char *s = NULL, *t = NULL;
@@ -247,235 +1339,160 @@ void get_install_type(char **install_type, char **prebuilt_arch, char **prebuilt
freez(install_type_filename);
}
-void print_build_info(void) {
- char *install_type = NULL;
- char *prebuilt_arch = NULL;
- char *prebuilt_distro = NULL;
- get_install_type(&install_type, &prebuilt_arch, &prebuilt_distro);
+static struct {
+ SPINLOCK spinlock;
+ bool populated;
+ char *install_type;
+ char *prebuilt_arch;
+ char *prebuilt_distro;
+} BUILD_PACKAGING_INFO = { 0 };
+
+static void populate_packaging_info() {
+ if(!BUILD_PACKAGING_INFO.populated) {
+ spinlock_lock(&BUILD_PACKAGING_INFO.spinlock);
+ if(!BUILD_PACKAGING_INFO.populated) {
+ BUILD_PACKAGING_INFO.populated = true;
- printf("Configure options: %s\n", CONFIGURE_COMMAND);
+ get_install_type(&BUILD_PACKAGING_INFO.install_type, &BUILD_PACKAGING_INFO.prebuilt_arch, &BUILD_PACKAGING_INFO.prebuilt_distro);
- if (install_type == NULL) {
- printf("Install type: unknown\n");
- } else {
- printf("Install type: %s\n", install_type);
+ if(!BUILD_PACKAGING_INFO.install_type)
+ BUILD_PACKAGING_INFO.install_type = "unknown";
+
+ if(!BUILD_PACKAGING_INFO.prebuilt_arch)
+ BUILD_PACKAGING_INFO.prebuilt_arch = "unknown";
+
+ if(!BUILD_PACKAGING_INFO.prebuilt_distro)
+ BUILD_PACKAGING_INFO.prebuilt_distro = "unknown";
+
+ build_info_set_value(BIB_PACKAGING_INSTALL_TYPE, strdupz(BUILD_PACKAGING_INFO.install_type));
+ build_info_set_value(BIB_PACKAGING_ARCHITECTURE, strdupz(BUILD_PACKAGING_INFO.prebuilt_arch));
+ build_info_set_value(BIB_PACKAGING_DISTRO, strdupz(BUILD_PACKAGING_INFO.prebuilt_distro));
+ }
+ spinlock_unlock(&BUILD_PACKAGING_INFO.spinlock);
}
+}
- if (prebuilt_arch != NULL) {
- printf(" Binary architecture: %s\n", prebuilt_arch);
+// ----------------------------------------------------------------------------
+
+static void populate_directories(void) {
+ build_info_set_value(BIB_DIR_USER_CONFIG, netdata_configured_user_config_dir);
+ build_info_set_value(BIB_DIR_STOCK_CONFIG, netdata_configured_stock_config_dir);
+ build_info_set_value(BIB_DIR_CACHE, netdata_configured_cache_dir);
+ build_info_set_value(BIB_DIR_LIB, netdata_configured_varlib_dir);
+ build_info_set_value(BIB_DIR_PLUGINS, netdata_configured_primary_plugins_dir);
+ build_info_set_value(BIB_DIR_WEB, netdata_configured_web_dir);
+ build_info_set_value(BIB_DIR_LOG, netdata_configured_log_dir);
+ build_info_set_value(BIB_DIR_LOCK, netdata_configured_lock_dir);
+ build_info_set_value(BIB_DIR_HOME, netdata_configured_home_dir);
+}
+
+// ----------------------------------------------------------------------------
+
+static void print_build_info_category_to_json(BUFFER *b, BUILD_INFO_CATEGORY category, const char *key) {
+ buffer_json_member_add_object(b, key);
+ for(size_t i = 0; i < BIB_TERMINATOR ;i++) {
+ if(BUILD_INFO[i].category == category && BUILD_INFO[i].json) {
+ if(BUILD_INFO[i].value)
+ buffer_json_member_add_string(b, BUILD_INFO[i].json, BUILD_INFO[i].value);
+ else
+ buffer_json_member_add_boolean(b, BUILD_INFO[i].json, BUILD_INFO[i].status);
+ }
}
+ buffer_json_object_close(b); // key
+}
- if (prebuilt_distro != NULL) {
- printf(" Packaging distro: %s\n", prebuilt_distro);
+static void print_build_info_category_to_console(BUILD_INFO_CATEGORY category, const char *title) {
+ printf("%s:\n", title);
+ for(size_t i = 0; i < BIB_TERMINATOR ;i++) {
+ if(BUILD_INFO[i].category == category && BUILD_INFO[i].print) {
+ const char *v = BUILD_INFO[i].status ? "YES" : "NO";
+ const char *k = BUILD_INFO[i].print;
+ const char *d = BUILD_INFO[i].value;
+
+ int padding_length = 60 - strlen(k) - 1;
+ if (padding_length < 0) padding_length = 0;
+
+ char padding[padding_length + 1];
+ memset(padding, '_', padding_length);
+ padding[padding_length] = '\0';
+
+ if(BUILD_INFO[i].type == BIT_STRING)
+ printf(" %s %s : %s\n", k, padding, d?d:"unknown");
+ else
+ printf(" %s %s : %s%s%s%s\n", k, padding, v,
+ d?" (":"", d?d:"", d?")":"");
+ }
}
+}
- freez(install_type);
- freez(prebuilt_arch);
- freez(prebuilt_distro);
-
- printf("Features:\n");
- printf(" dbengine: %s\n", FEAT_YES_NO(FEAT_DBENGINE));
- printf(" Native HTTPS: %s\n", FEAT_YES_NO(FEAT_NATIVE_HTTPS));
- printf(" Netdata Cloud: %s %s\n", FEAT_YES_NO(FEAT_CLOUD), FEAT_CLOUD_MSG);
- printf(" ACLK: %s\n", FEAT_YES_NO(FEAT_CLOUD));
- printf(" TLS Host Verification: %s\n", FEAT_YES_NO(FEAT_TLS_HOST_VERIFY));
- printf(" Machine Learning: %s\n", FEAT_YES_NO(FEAT_ML));
- printf(" Stream Compression: %s\n", FEAT_YES_NO(FEAT_STREAM_COMPRESSION));
- printf(" HTTPD (h2o): %s\n", FEAT_YES_NO(FEAT_HTTPD));
-
- printf("Libraries:\n");
- printf(" protobuf: %s%s\n", FEAT_YES_NO(FEAT_PROTOBUF), FEAT_PROTOBUF_BUNDLED);
- printf(" jemalloc: %s\n", FEAT_YES_NO(FEAT_JEMALLOC));
- printf(" JSON-C: %s\n", FEAT_YES_NO(FEAT_JSONC));
- printf(" libcap: %s\n", FEAT_YES_NO(FEAT_LIBCAP));
- printf(" libcrypto: %s\n", FEAT_YES_NO(FEAT_CRYPTO));
- printf(" libm: %s\n", FEAT_YES_NO(FEAT_LIBM));
- printf(" tcalloc: %s\n", FEAT_YES_NO(FEAT_TCMALLOC));
- printf(" zlib: %s\n", FEAT_YES_NO(1));
-
- printf("Plugins:\n");
- printf(" apps: %s\n", FEAT_YES_NO(FEAT_APPS_PLUGIN));
- printf(" cgroup Network Tracking: %s\n", FEAT_YES_NO(FEAT_CGROUP_NET));
- printf(" CUPS: %s\n", FEAT_YES_NO(FEAT_CUPS));
- printf(" debugfs: %s\n", FEAT_YES_NO(FEAT_DEBUGFS_PLUGIN));
- printf(" EBPF: %s\n", FEAT_YES_NO(FEAT_EBPF));
- printf(" IPMI: %s\n", FEAT_YES_NO(FEAT_IPMI));
- printf(" NFACCT: %s\n", FEAT_YES_NO(FEAT_NFACCT));
- printf(" perf: %s\n", FEAT_YES_NO(FEAT_PERF));
- printf(" slabinfo: %s\n", FEAT_YES_NO(FEAT_SLABINFO));
- printf(" Xen: %s\n", FEAT_YES_NO(FEAT_XEN));
- printf(" Xen VBD Error Tracking: %s\n", FEAT_YES_NO(FEAT_XEN_VBD_ERROR));
-
- printf("Exporters:\n");
- printf(" AWS Kinesis: %s\n", FEAT_YES_NO(FEAT_KINESIS));
- printf(" GCP PubSub: %s\n", FEAT_YES_NO(FEAT_PUBSUB));
- printf(" MongoDB: %s\n", FEAT_YES_NO(FEAT_MONGO));
- printf(" Prometheus Remote Write: %s\n", FEAT_YES_NO(FEAT_REMOTE_WRITE));
-
- printf("Debug/Developer Features:\n");
- printf(" Trace Allocations: %s\n", FEAT_YES_NO(FEAT_TRACE_ALLOC));
+void print_build_info(void) {
+ populate_packaging_info();
+ populate_system_info();
+ populate_directories();
+
+ print_build_info_category_to_console(BIC_PACKAGING, "Packaging");
+ print_build_info_category_to_console(BIC_DIRECTORIES, "Default Directories");
+ print_build_info_category_to_console(BIC_OPERATING_SYSTEM, "Operating System");
+ print_build_info_category_to_console(BIC_HARDWARE, "Hardware");
+ print_build_info_category_to_console(BIC_CONTAINER, "Container");
+ print_build_info_category_to_console(BIC_FEATURE, "Features");
+ print_build_info_category_to_console(BIC_DATABASE, "Database Engines");
+ print_build_info_category_to_console(BIC_CONNECTIVITY, "Connectivity Capabilities");
+ print_build_info_category_to_console(BIC_LIBS, "Libraries");
+ print_build_info_category_to_console(BIC_PLUGINS, "Plugins");
+ print_build_info_category_to_console(BIC_EXPORTERS, "Exporters");
+ print_build_info_category_to_console(BIC_DEBUG_DEVEL, "Debug/Developer Features");
};
-#define FEAT_JSON_BOOL(x) ((x) ? "true" : "false")
-// This intentionally does not use JSON-C so it works even if JSON-C is not present
-// This is used for anonymous statistics reporting, so it intentionally
-// does not include the configure options, which would be very easy to use
-// for tracking custom builds (and complicate outputting valid JSON).
+void build_info_to_json_object(BUFFER *b) {
+ populate_packaging_info();
+ populate_system_info();
+ populate_directories();
+
+ print_build_info_category_to_json(b, BIC_PACKAGING, "package");
+ print_build_info_category_to_json(b, BIC_DIRECTORIES, "directories");
+ print_build_info_category_to_json(b, BIC_OPERATING_SYSTEM, "os");
+ print_build_info_category_to_json(b, BIC_HARDWARE, "hw");
+ print_build_info_category_to_json(b, BIC_CONTAINER, "container");
+ print_build_info_category_to_json(b, BIC_FEATURE, "features");
+ print_build_info_category_to_json(b, BIC_DATABASE, "databases");
+ print_build_info_category_to_json(b, BIC_CONNECTIVITY, "connectivity");
+ print_build_info_category_to_json(b, BIC_LIBS, "libs");
+ print_build_info_category_to_json(b, BIC_PLUGINS, "plugins");
+ print_build_info_category_to_json(b, BIC_EXPORTERS, "exporters");
+ print_build_info_category_to_json(b, BIC_DEBUG_DEVEL, "debug-n-devel");
+}
+
void print_build_info_json(void) {
- printf("{\n");
- printf(" \"features\": {\n");
- printf(" \"dbengine\": %s,\n", FEAT_JSON_BOOL(FEAT_DBENGINE));
- printf(" \"native-https\": %s,\n", FEAT_JSON_BOOL(FEAT_NATIVE_HTTPS));
- printf(" \"cloud\": %s,\n", FEAT_JSON_BOOL(FEAT_CLOUD));
-#ifdef DISABLE_CLOUD
- printf(" \"cloud-disabled\": true,\n");
-#else
- printf(" \"cloud-disabled\": false,\n");
-#endif
- printf(" \"aclk\": %s,\n", FEAT_JSON_BOOL(FEAT_CLOUD));
-
- printf(" \"tls-host-verify\": %s,\n", FEAT_JSON_BOOL(FEAT_TLS_HOST_VERIFY));
- printf(" \"machine-learning\": %s\n", FEAT_JSON_BOOL(FEAT_ML));
- printf(" \"stream-compression\": %s\n", FEAT_JSON_BOOL(FEAT_STREAM_COMPRESSION));
- printf(" \"httpd-h2o\": %s\n", FEAT_JSON_BOOL(FEAT_HTTPD));
- printf(" },\n");
-
- printf(" \"libs\": {\n");
- printf(" \"protobuf\": %s,\n", FEAT_JSON_BOOL(FEAT_PROTOBUF));
- printf(" \"protobuf-source\": \"%s\",\n", FEAT_PROTOBUF_BUNDLED);
- printf(" \"jemalloc\": %s,\n", FEAT_JSON_BOOL(FEAT_JEMALLOC));
- printf(" \"jsonc\": %s,\n", FEAT_JSON_BOOL(FEAT_JSONC));
- printf(" \"libcap\": %s,\n", FEAT_JSON_BOOL(FEAT_LIBCAP));
- printf(" \"libcrypto\": %s,\n", FEAT_JSON_BOOL(FEAT_CRYPTO));
- printf(" \"libm\": %s,\n", FEAT_JSON_BOOL(FEAT_LIBM));
- printf(" \"tcmalloc\": %s,\n", FEAT_JSON_BOOL(FEAT_TCMALLOC));
- printf(" \"zlib\": %s\n", FEAT_JSON_BOOL(1));
- printf(" },\n");
-
- printf(" \"plugins\": {\n");
- printf(" \"apps\": %s,\n", FEAT_JSON_BOOL(FEAT_APPS_PLUGIN));
- printf(" \"cgroup-net\": %s,\n", FEAT_JSON_BOOL(FEAT_CGROUP_NET));
- printf(" \"cups\": %s,\n", FEAT_JSON_BOOL(FEAT_CUPS));
- printf(" \"debugfs\": %s,\n", FEAT_JSON_BOOL(FEAT_DEBUGFS_PLUGIN));
- printf(" \"ebpf\": %s,\n", FEAT_JSON_BOOL(FEAT_EBPF));
- printf(" \"ipmi\": %s,\n", FEAT_JSON_BOOL(FEAT_IPMI));
- printf(" \"nfacct\": %s,\n", FEAT_JSON_BOOL(FEAT_NFACCT));
- printf(" \"perf\": %s,\n", FEAT_JSON_BOOL(FEAT_PERF));
- printf(" \"slabinfo\": %s,\n", FEAT_JSON_BOOL(FEAT_SLABINFO));
- printf(" \"xen\": %s,\n", FEAT_JSON_BOOL(FEAT_XEN));
- printf(" \"xen-vbd-error\": %s\n", FEAT_JSON_BOOL(FEAT_XEN_VBD_ERROR));
- printf(" },\n");
-
- printf(" \"exporters\": {\n");
- printf(" \"kinesis\": %s,\n", FEAT_JSON_BOOL(FEAT_KINESIS));
- printf(" \"pubsub\": %s,\n", FEAT_JSON_BOOL(FEAT_PUBSUB));
- printf(" \"mongodb\": %s,\n", FEAT_JSON_BOOL(FEAT_MONGO));
- printf(" \"prom-remote-write\": %s\n", FEAT_JSON_BOOL(FEAT_REMOTE_WRITE));
- printf(" }\n");
- printf(" \"debug-n-devel\": {\n");
- printf(" \"trace-allocations\": %s\n }\n",FEAT_JSON_BOOL(FEAT_TRACE_ALLOC));
- printf("}\n");
-};
+ populate_packaging_info();
+ populate_system_info();
+ populate_directories();
-#define add_to_bi(buffer, str) \
- { if(first) { \
- buffer_strcat (b, str); \
- first = 0; \
- } else \
- buffer_strcat (b, "|" str); }
+ BUFFER *b = buffer_create(0, NULL);
+ buffer_json_initialize(b, "\"", "\"", 0, true, false);
-void analytics_build_info(BUFFER *b) {
- int first = 1;
-#ifdef ENABLE_DBENGINE
- add_to_bi(b, "dbengine");
-#endif
-#ifdef ENABLE_HTTPS
- add_to_bi(b, "Native HTTPS");
-#endif
-#ifdef ENABLE_ACLK
- add_to_bi(b, "Netdata Cloud");
-#endif
-#if (FEAT_TLS_HOST_VERIFY!=0)
- add_to_bi(b, "TLS Host Verification");
-#endif
-#ifdef ENABLE_ML
- add_to_bi(b, "Machine Learning");
-#endif
-#ifdef ENABLE_COMPRESSION
- add_to_bi(b, "Stream Compression");
-#endif
+ build_info_to_json_object(b);
-#ifdef HAVE_PROTOBUF
- add_to_bi(b, "protobuf");
-#endif
-#ifdef ENABLE_JEMALLOC
- add_to_bi(b, "jemalloc");
-#endif
-#ifdef ENABLE_JSONC
- add_to_bi(b, "JSON-C");
-#endif
-#ifdef HAVE_CAPABILITY
- add_to_bi(b, "libcap");
-#endif
-#ifdef HAVE_CRYPTO
- add_to_bi(b, "libcrypto");
-#endif
-#ifdef STORAGE_WITH_MATH
- add_to_bi(b, "libm");
-#endif
+ buffer_json_finalize(b);
+ printf("%s\n", buffer_tostring(b));
+ buffer_free(b);
+};
-#ifdef ENABLE_TCMALLOC
- add_to_bi(b, "tcalloc");
-#endif
- add_to_bi(b, "zlib");
+void analytics_build_info(BUFFER *b) {
+ populate_packaging_info();
+ populate_system_info();
+ populate_directories();
-#ifdef ENABLE_APPS_PLUGIN
- add_to_bi(b, "apps");
-#endif
-#ifdef ENABLE_DEBUGFS_PLUGIN
- add_to_bi(b, "debugfs");
-#endif
-#ifdef HAVE_SETNS
- add_to_bi(b, "cgroup Network Tracking");
-#endif
-#ifdef HAVE_CUPS
- add_to_bi(b, "CUPS");
-#endif
-#ifdef HAVE_LIBBPF
- add_to_bi(b, "EBPF");
-#endif
-#ifdef HAVE_FREEIPMI
- add_to_bi(b, "IPMI");
-#endif
-#ifdef HAVE_NFACCT
- add_to_bi(b, "NFACCT");
-#endif
-#ifdef ENABLE_PERF_PLUGIN
- add_to_bi(b, "perf");
-#endif
-#ifdef ENABLE_SLABINFO
- add_to_bi(b, "slabinfo");
-#endif
-#ifdef HAVE_LIBXENSTAT
- add_to_bi(b, "Xen");
-#endif
-#ifdef HAVE_XENSTAT_VBD_ERROR
- add_to_bi(b, "Xen VBD Error Tracking");
-#endif
+ size_t added = 0;
+ for(size_t i = 0; i < BIB_TERMINATOR ;i++) {
+ if(BUILD_INFO[i].analytics && BUILD_INFO[i].status) {
-#ifdef HAVE_KINESIS
- add_to_bi(b, "AWS Kinesis");
-#endif
-#ifdef ENABLE_EXPORTING_PUBSUB
- add_to_bi(b, "GCP PubSub");
-#endif
-#ifdef HAVE_MONGOC
- add_to_bi(b, "MongoDB");
-#endif
-#ifdef ENABLE_PROMETHEUS_REMOTE_WRITE
- add_to_bi(b, "Prometheus Remote Write");
-#endif
-#ifdef NETDATA_TRACE_ALLOCATIONS
- add_to_bi(b, "DebugTraceAlloc");
-#endif
+ if(added)
+ buffer_strcat(b, "|");
+
+ buffer_strcat (b, BUILD_INFO[i].analytics);
+ added++;
+ }
+ }
}
+
diff --git a/daemon/buildinfo.h b/daemon/buildinfo.h
index d3b439fc..1bb1c976 100644
--- a/daemon/buildinfo.h
+++ b/daemon/buildinfo.h
@@ -11,4 +11,6 @@ char *get_value_from_key(char *buffer, char *key);
void get_install_type(char **install_type, char **prebuilt_arch, char **prebuilt_dist);
+void build_info_to_json_object(BUFFER *b);
+
#endif // NETDATA_BUILDINFO_H
diff --git a/daemon/commands.c b/daemon/commands.c
index fcb75b71..84298416 100644
--- a/daemon/commands.c
+++ b/daemon/commands.c
@@ -143,7 +143,7 @@ static cmd_status_t cmd_reload_health_execute(char *args, char **message)
(void)message;
error_log_limit_unlimited();
- info("COMMAND: Reloading HEALTH configuration.");
+ netdata_log_info("COMMAND: Reloading HEALTH configuration.");
health_reload();
error_log_limit_reset();
@@ -156,9 +156,9 @@ static cmd_status_t cmd_save_database_execute(char *args, char **message)
(void)message;
error_log_limit_unlimited();
- info("COMMAND: Saving databases.");
+ netdata_log_info("COMMAND: Saving databases.");
rrdhost_save_all();
- info("COMMAND: Databases saved.");
+ netdata_log_info("COMMAND: Databases saved.");
error_log_limit_reset();
return CMD_STATUS_SUCCESS;
@@ -170,7 +170,7 @@ static cmd_status_t cmd_reopen_logs_execute(char *args, char **message)
(void)message;
error_log_limit_unlimited();
- info("COMMAND: Reopening all log files.");
+ netdata_log_info("COMMAND: Reopening all log files.");
reopen_all_log_files();
error_log_limit_reset();
@@ -183,7 +183,7 @@ static cmd_status_t cmd_exit_execute(char *args, char **message)
(void)message;
error_log_limit_unlimited();
- info("COMMAND: Cleaning up to exit.");
+ netdata_log_info("COMMAND: Cleaning up to exit.");
netdata_cleanup_and_exit(0);
exit(0);
@@ -205,23 +205,19 @@ static cmd_status_t cmd_reload_claiming_state_execute(char *args, char **message
(void)args;
(void)message;
#if defined(DISABLE_CLOUD) || !defined(ENABLE_ACLK)
- info("The claiming feature has been explicitly disabled");
+ netdata_log_info("The claiming feature has been explicitly disabled");
*message = strdupz("This agent cannot be claimed, it was built without support for Cloud");
return CMD_STATUS_FAILURE;
#endif
- error_log_limit_unlimited();
- info("COMMAND: Reloading Agent Claiming configuration.");
- load_claiming_state();
- registry_update_cloud_base_url();
- rrdpush_claimed_id(localhost);
- error_log_limit_reset();
+ netdata_log_info("COMMAND: Reloading Agent Claiming configuration.");
+ claim_reload_all();
return CMD_STATUS_SUCCESS;
}
static cmd_status_t cmd_reload_labels_execute(char *args, char **message)
{
(void)args;
- info("COMMAND: reloading host labels.");
+ netdata_log_info("COMMAND: reloading host labels.");
reload_host_labels();
BUFFER *wb = buffer_create(10, NULL);
@@ -255,8 +251,10 @@ static cmd_status_t cmd_read_config_execute(char *args, char **message)
char *value = appconfig_get(tmp_config, temp + offset + 1, temp + offset2 + 1, NULL);
if (value == NULL)
{
- error("Cannot execute read-config conf_file=%s section=%s / key=%s because no value set", conf_file,
- temp + offset + 1, temp + offset2 + 1);
+ netdata_log_error("Cannot execute read-config conf_file=%s section=%s / key=%s because no value set",
+ conf_file,
+ temp + offset + 1,
+ temp + offset2 + 1);
freez(temp);
return CMD_STATUS_FAILURE;
}
@@ -272,7 +270,7 @@ static cmd_status_t cmd_read_config_execute(char *args, char **message)
static cmd_status_t cmd_write_config_execute(char *args, char **message)
{
UNUSED(message);
- info("write-config %s", args);
+ netdata_log_info("write-config %s", args);
size_t n = strlen(args);
char *separator = strchr(args,'|');
if (separator == NULL)
@@ -296,7 +294,7 @@ static cmd_status_t cmd_write_config_execute(char *args, char **message)
struct config *tmp_config = strcmp(conf_file, "cloud") ? &netdata_config : &cloud_config;
appconfig_set(tmp_config, temp + offset + 1, temp + offset2 + 1, temp + offset3 + 1);
- info("write-config conf_file=%s section=%s key=%s value=%s",conf_file, temp + offset + 1, temp + offset2 + 1,
+ netdata_log_info("write-config conf_file=%s section=%s key=%s value=%s",conf_file, temp + offset + 1, temp + offset2 + 1,
temp + offset3 + 1);
freez(temp);
return CMD_STATUS_SUCCESS;
@@ -313,7 +311,7 @@ static cmd_status_t cmd_ping_execute(char *args, char **message)
static cmd_status_t cmd_aclk_state(char *args, char **message)
{
- info("COMMAND: Reopening aclk/cloud state.");
+ netdata_log_info("COMMAND: Reopening aclk/cloud state.");
if (strstr(args, "json"))
*message = aclk_state_json();
else
@@ -409,7 +407,7 @@ static void pipe_write_cb(uv_write_t* req, int status)
uv_close((uv_handle_t *)client, pipe_close_cb);
--clients;
buffer_free(client->data);
- info("Command Clients = %u\n", clients);
+ netdata_log_info("Command Clients = %u\n", clients);
}
static inline void add_char_to_command_reply(BUFFER *reply_string, unsigned *reply_string_size, char character)
@@ -453,7 +451,7 @@ static void send_command_reply(struct command_context *cmd_ctx, cmd_status_t sta
write_buf.len = reply_string_size;
ret = uv_write(&cmd_ctx->write_req, (uv_stream_t *)client, &write_buf, 1, pipe_write_cb);
if (ret) {
- error("uv_write(): %s", uv_strerror(ret));
+ netdata_log_error("uv_write(): %s", uv_strerror(ret));
}
}
@@ -534,12 +532,12 @@ static void pipe_read_cb(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf
struct command_context *cmd_ctx = (struct command_context *)client;
if (0 == nread) {
- info("%s: Zero bytes read by command pipe.", __func__);
+ netdata_log_info("%s: Zero bytes read by command pipe.", __func__);
} else if (UV_EOF == nread) {
- info("EOF found in command pipe.");
+ netdata_log_info("EOF found in command pipe.");
parse_commands(cmd_ctx);
} else if (nread < 0) {
- error("%s: %s", __func__, uv_strerror(nread));
+ netdata_log_error("%s: %s", __func__, uv_strerror(nread));
}
if (nread < 0) { /* stop stream due to EOF or error */
@@ -559,7 +557,7 @@ static void pipe_read_cb(uv_stream_t *client, ssize_t nread, const uv_buf_t *buf
if (nread < 0 && UV_EOF != nread) {
uv_close((uv_handle_t *)client, pipe_close_cb);
--clients;
- info("Command Clients = %u\n", clients);
+ netdata_log_info("Command Clients = %u\n", clients);
}
}
@@ -583,29 +581,29 @@ static void connection_cb(uv_stream_t *server, int status)
client = (uv_pipe_t *)cmd_ctx;
ret = uv_pipe_init(server->loop, client, 1);
if (ret) {
- error("uv_pipe_init(): %s", uv_strerror(ret));
+ netdata_log_error("uv_pipe_init(): %s", uv_strerror(ret));
freez(cmd_ctx);
return;
}
ret = uv_accept(server, (uv_stream_t *)client);
if (ret) {
- error("uv_accept(): %s", uv_strerror(ret));
+ netdata_log_error("uv_accept(): %s", uv_strerror(ret));
uv_close((uv_handle_t *)client, pipe_close_cb);
return;
}
++clients;
- info("Command Clients = %u\n", clients);
+ netdata_log_info("Command Clients = %u\n", clients);
/* Start parsing a new command */
cmd_ctx->command_string_size = 0;
cmd_ctx->command_string[0] = '\0';
ret = uv_read_start((uv_stream_t*)client, alloc_cb, pipe_read_cb);
if (ret) {
- error("uv_read_start(): %s", uv_strerror(ret));
+ netdata_log_error("uv_read_start(): %s", uv_strerror(ret));
uv_close((uv_handle_t *)client, pipe_close_cb);
--clients;
- info("Command Clients = %u\n", clients);
+ netdata_log_info("Command Clients = %u\n", clients);
return;
}
}
@@ -624,7 +622,7 @@ static void command_thread(void *arg)
loop = mallocz(sizeof(uv_loop_t));
ret = uv_loop_init(loop);
if (ret) {
- error("uv_loop_init(): %s", uv_strerror(ret));
+ netdata_log_error("uv_loop_init(): %s", uv_strerror(ret));
command_thread_error = ret;
goto error_after_loop_init;
}
@@ -632,7 +630,7 @@ static void command_thread(void *arg)
ret = uv_async_init(loop, &async, async_cb);
if (ret) {
- error("uv_async_init(): %s", uv_strerror(ret));
+ netdata_log_error("uv_async_init(): %s", uv_strerror(ret));
command_thread_error = ret;
goto error_after_async_init;
}
@@ -640,26 +638,30 @@ static void command_thread(void *arg)
ret = uv_pipe_init(loop, &server_pipe, 0);
if (ret) {
- error("uv_pipe_init(): %s", uv_strerror(ret));
+ netdata_log_error("uv_pipe_init(): %s", uv_strerror(ret));
command_thread_error = ret;
goto error_after_pipe_init;
}
- (void)uv_fs_unlink(loop, &req, PIPENAME, NULL);
+
+ const char *pipename = daemon_pipename();
+
+ (void)uv_fs_unlink(loop, &req, pipename, NULL);
uv_fs_req_cleanup(&req);
- ret = uv_pipe_bind(&server_pipe, PIPENAME);
+ ret = uv_pipe_bind(&server_pipe, pipename);
if (ret) {
- error("uv_pipe_bind(): %s", uv_strerror(ret));
+ netdata_log_error("uv_pipe_bind(): %s", uv_strerror(ret));
command_thread_error = ret;
goto error_after_pipe_bind;
}
+
ret = uv_listen((uv_stream_t *)&server_pipe, SOMAXCONN, connection_cb);
if (ret) {
/* Fallback to backlog of 1 */
- info("uv_listen() failed with backlog = %d, falling back to backlog = 1.", SOMAXCONN);
+ netdata_log_info("uv_listen() failed with backlog = %d, falling back to backlog = 1.", SOMAXCONN);
ret = uv_listen((uv_stream_t *)&server_pipe, 1, connection_cb);
}
if (ret) {
- error("uv_listen(): %s", uv_strerror(ret));
+ netdata_log_error("uv_listen(): %s", uv_strerror(ret));
command_thread_error = ret;
goto error_after_uv_listen;
}
@@ -673,12 +675,12 @@ static void command_thread(void *arg)
uv_run(loop, UV_RUN_DEFAULT);
}
/* cleanup operations of the event loop */
- info("Shutting down command event loop.");
+ netdata_log_info("Shutting down command event loop.");
uv_close((uv_handle_t *)&async, NULL);
uv_close((uv_handle_t*)&server_pipe, NULL);
uv_run(loop, UV_RUN_DEFAULT); /* flush all libuv handles */
- info("Shutting down command loop complete.");
+ netdata_log_info("Shutting down command loop complete.");
fatal_assert(0 == uv_loop_close(loop));
freez(loop);
@@ -714,7 +716,7 @@ void commands_init(void)
if (command_server_initialized)
return;
- info("Initializing command server.");
+ netdata_log_info("Initializing command server.");
for (i = 0 ; i < CMD_TOTAL_COMMANDS ; ++i) {
fatal_assert(0 == uv_mutex_init(&command_lock_array[i]));
}
@@ -723,7 +725,7 @@ void commands_init(void)
completion_init(&completion);
error = uv_thread_create(&thread, command_thread, NULL);
if (error) {
- error("uv_thread_create(): %s", uv_strerror(error));
+ netdata_log_error("uv_thread_create(): %s", uv_strerror(error));
goto after_error;
}
/* wait for worker thread to initialize */
@@ -734,7 +736,7 @@ void commands_init(void)
if (command_thread_error) {
error = uv_thread_join(&thread);
if (error) {
- error("uv_thread_create(): %s", uv_strerror(error));
+ netdata_log_error("uv_thread_create(): %s", uv_strerror(error));
}
goto after_error;
}
@@ -743,7 +745,7 @@ void commands_init(void)
return;
after_error:
- error("Failed to initialize command server. The netdata cli tool will be unable to send commands.");
+ netdata_log_error("Failed to initialize command server. The netdata cli tool will be unable to send commands.");
}
void commands_exit(void)
@@ -754,7 +756,7 @@ void commands_exit(void)
return;
command_thread_shutdown = 1;
- info("Shutting down command server.");
+ netdata_log_info("Shutting down command server.");
/* wake up event loop */
fatal_assert(0 == uv_async_send(&async));
fatal_assert(0 == uv_thread_join(&thread));
@@ -763,6 +765,6 @@ void commands_exit(void)
uv_mutex_destroy(&command_lock_array[i]);
}
uv_rwlock_destroy(&exclusive_rwlock);
- info("Command server has stopped.");
+ netdata_log_info("Command server has stopped.");
command_server_initialized = 0;
}
diff --git a/daemon/commands.h b/daemon/commands.h
index 43a0ef96..368a70a0 100644
--- a/daemon/commands.h
+++ b/daemon/commands.h
@@ -3,12 +3,6 @@
#ifndef NETDATA_COMMANDS_H
#define NETDATA_COMMANDS_H 1
-#ifdef _WIN32
-# define PIPENAME "\\\\?\\pipe\\netdata-cli"
-#else
-# define PIPENAME "/tmp/netdata-ipc"
-#endif
-
#define MAX_COMMAND_LENGTH 4096
#define MAX_EXIT_STATUS_LENGTH 23 /* Can't ever be bigger than "X-18446744073709551616" */
diff --git a/daemon/common.c b/daemon/common.c
index 6eae07cf..d441c73b 100644
--- a/daemon/common.c
+++ b/daemon/common.c
@@ -6,18 +6,24 @@ char *netdata_configured_hostname = NULL;
char *netdata_configured_user_config_dir = CONFIG_DIR;
char *netdata_configured_stock_config_dir = LIBCONFIG_DIR;
char *netdata_configured_log_dir = LOG_DIR;
-char *netdata_configured_primary_plugins_dir = NULL;
+char *netdata_configured_primary_plugins_dir = PLUGINS_DIR;
char *netdata_configured_web_dir = WEB_DIR;
char *netdata_configured_cache_dir = CACHE_DIR;
char *netdata_configured_varlib_dir = VARLIB_DIR;
-char *netdata_configured_lock_dir = NULL;
+char *netdata_configured_lock_dir = VARLIB_DIR "/lock";
char *netdata_configured_home_dir = VARLIB_DIR;
char *netdata_configured_host_prefix = NULL;
char *netdata_configured_timezone = NULL;
char *netdata_configured_abbrev_timezone = NULL;
int32_t netdata_configured_utc_offset = 0;
-int netdata_ready;
-int netdata_cloud_setting;
+
+bool netdata_ready = false;
+
+#if defined( DISABLE_CLOUD ) || !defined( ENABLE_ACLK )
+int netdata_cloud_enabled = CONFIG_BOOLEAN_NO;
+#else
+int netdata_cloud_enabled = CONFIG_BOOLEAN_AUTO;
+#endif
long get_netdata_cpus(void) {
static long processors = 0;
@@ -52,5 +58,140 @@ long get_netdata_cpus(void) {
processors = cores_user_configured;
+ if(processors < 1)
+ processors = 1;
+
return processors;
}
+
+const char *cloud_status_to_string(CLOUD_STATUS status) {
+ switch(status) {
+ default:
+ case CLOUD_STATUS_UNAVAILABLE:
+ return "unavailable";
+
+ case CLOUD_STATUS_AVAILABLE:
+ return "available";
+
+ case CLOUD_STATUS_DISABLED:
+ return "disabled";
+
+ case CLOUD_STATUS_BANNED:
+ return "banned";
+
+ case CLOUD_STATUS_OFFLINE:
+ return "offline";
+
+ case CLOUD_STATUS_ONLINE:
+ return "online";
+ }
+}
+
+CLOUD_STATUS cloud_status(void) {
+#ifdef ENABLE_ACLK
+ if(aclk_disable_runtime)
+ return CLOUD_STATUS_BANNED;
+
+ if(aclk_connected)
+ return CLOUD_STATUS_ONLINE;
+
+ if(netdata_cloud_enabled == CONFIG_BOOLEAN_YES) {
+ char *agent_id = get_agent_claimid();
+ bool claimed = agent_id != NULL;
+ freez(agent_id);
+
+ if(claimed)
+ return CLOUD_STATUS_OFFLINE;
+ }
+
+ if(netdata_cloud_enabled != CONFIG_BOOLEAN_NO)
+ return CLOUD_STATUS_AVAILABLE;
+
+ return CLOUD_STATUS_DISABLED;
+#else
+ return CLOUD_STATUS_UNAVAILABLE;
+#endif
+}
+
+time_t cloud_last_change(void) {
+#ifdef ENABLE_ACLK
+ time_t ret = MAX(last_conn_time_mqtt, last_disconnect_time);
+ if(!ret) ret = netdata_start_time;
+ return ret;
+#else
+ return netdata_start_time;
+#endif
+}
+
+time_t cloud_next_connection_attempt(void) {
+#ifdef ENABLE_ACLK
+ return next_connection_attempt;
+#else
+ return 0;
+#endif
+}
+
+size_t cloud_connection_id(void) {
+#ifdef ENABLE_ACLK
+ return aclk_connection_counter;
+#else
+ return 0;
+#endif
+}
+
+const char *cloud_offline_reason() {
+#ifdef ENABLE_ACLK
+ if(!netdata_cloud_enabled)
+ return "disabled";
+
+ if(aclk_disable_runtime)
+ return "banned";
+
+ return aclk_status_to_string();
+#else
+ return "disabled";
+#endif
+}
+
+const char *cloud_base_url() {
+#ifdef ENABLE_ACLK
+ return aclk_cloud_base_url;
+#else
+ return NULL;
+#endif
+}
+
+CLOUD_STATUS buffer_json_cloud_status(BUFFER *wb, time_t now_s) {
+ CLOUD_STATUS status = cloud_status();
+
+ buffer_json_member_add_object(wb, "cloud");
+ {
+ size_t id = cloud_connection_id();
+ time_t last_change = cloud_last_change();
+ time_t next_connect = cloud_next_connection_attempt();
+ buffer_json_member_add_uint64(wb, "id", id);
+ buffer_json_member_add_string(wb, "status", cloud_status_to_string(status));
+ buffer_json_member_add_time_t(wb, "since", last_change);
+ buffer_json_member_add_time_t(wb, "age", now_s - last_change);
+
+ if (status != CLOUD_STATUS_ONLINE)
+ buffer_json_member_add_string(wb, "reason", cloud_offline_reason());
+
+ if (status == CLOUD_STATUS_OFFLINE && next_connect > now_s) {
+ buffer_json_member_add_time_t(wb, "next_check", next_connect);
+ buffer_json_member_add_time_t(wb, "next_in", next_connect - now_s);
+ }
+
+ if (cloud_base_url())
+ buffer_json_member_add_string(wb, "url", cloud_base_url());
+
+ char *claim_id = get_agent_claimid();
+ if(claim_id) {
+ buffer_json_member_add_string(wb, "claim_id", claim_id);
+ freez(claim_id);
+ }
+ }
+ buffer_json_object_close(wb); // cloud
+
+ return status;
+}
diff --git a/daemon/common.h b/daemon/common.h
index aeaf0163..4a390592 100644
--- a/daemon/common.h
+++ b/daemon/common.h
@@ -42,8 +42,8 @@
#include "web/server/web_server.h"
// the new h2o based netdata webserver
-#ifdef ENABLE_HTTPD
-#include "httpd/http_server.h"
+#ifdef ENABLE_H2O
+#include "web/server/h2o/http_server.h"
#endif
// streaming metrics between netdata servers
@@ -89,6 +89,7 @@
#include "static_threads.h"
#include "signals.h"
#include "commands.h"
+#include "pipename.h"
#include "analytics.h"
// global netdata daemon variables
@@ -109,9 +110,29 @@ extern int32_t netdata_configured_utc_offset;
extern int netdata_zero_metrics_enabled;
extern int netdata_anonymous_statistics_enabled;
-extern int netdata_ready;
-extern int netdata_cloud_setting;
+extern bool netdata_ready;
+extern int netdata_cloud_enabled;
+
+extern time_t netdata_start_time;
long get_netdata_cpus(void);
+typedef enum __attribute__((packed)) {
+ CLOUD_STATUS_UNAVAILABLE = 0, // cloud and aclk functionality is not available on this agent
+ CLOUD_STATUS_AVAILABLE, // cloud and aclk functionality is available, but the agent is not claimed
+ CLOUD_STATUS_DISABLED, // cloud and aclk functionality is available, but it is disabled
+ CLOUD_STATUS_BANNED, // the agent has been banned from cloud
+ CLOUD_STATUS_OFFLINE, // the agent tries to connect to cloud, but cannot do it
+ CLOUD_STATUS_ONLINE, // the agent is connected to cloud
+} CLOUD_STATUS;
+
+const char *cloud_status_to_string(CLOUD_STATUS status);
+CLOUD_STATUS cloud_status(void);
+time_t cloud_last_change(void);
+time_t cloud_next_connection_attempt(void);
+size_t cloud_connection_id(void);
+const char *cloud_offline_reason(void);
+const char *cloud_base_url(void);
+CLOUD_STATUS buffer_json_cloud_status(BUFFER *wb, time_t now_s);
+
#endif /* NETDATA_COMMON_H */
diff --git a/daemon/config/README.md b/daemon/config/README.md
index 418b12cf..bc5a5885 100644
--- a/daemon/config/README.md
+++ b/daemon/config/README.md
@@ -175,7 +175,7 @@ monitoring](https://github.com/netdata/netdata/blob/master/health/README.md).
| script to execute on alarm | `/usr/libexec/netdata/plugins.d/alarm-notify.sh` | The script that sends alarm notifications. Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`). |
| run at least every seconds | `10` | Controls how often all alarm conditions should be evaluated. |
| postpone alarms during hibernation for seconds | `60` | Prevents false alarms. May need to be increased if you get alarms during hibernation. |
-| rotate log every lines | 2000 | Controls the number of alarm log entries stored in `<lib directory>/health-log.db`, where `<lib directory>` is the one configured in the [\[global\] section](#global-section-options) |
+| health log history | `432000` | Specifies the history of alarm events (in seconds) kept in the agent's sqlite database. |
| enabled alarms | * | Defines which alarms to load from both user and stock directories. This is a [simple pattern](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) list of alarm or template names. Can be used to disable specific alarms. For example, `enabled alarms = !oom_kill *` will load all alarms except `oom_kill`. |
### [web] section options
diff --git a/daemon/daemon.c b/daemon/daemon.c
index 2b8a6552..0d1d1732 100644
--- a/daemon/daemon.c
+++ b/daemon/daemon.c
@@ -20,7 +20,7 @@ void get_netdata_execution_path(void)
exepath_size = sizeof(exepath) - 1;
ret = uv_exepath(exepath, &exepath_size);
if (0 != ret) {
- error("uv_exepath(\"%s\", %u) (user: %s) failed (%s).", exepath, (unsigned)exepath_size, user,
+ netdata_log_error("uv_exepath(\"%s\", %u) (user: %s) failed (%s).", exepath, (unsigned)exepath_size, user,
uv_strerror(ret));
fatal("Cannot start netdata without getting execution path.");
}
@@ -33,29 +33,49 @@ static void chown_open_file(int fd, uid_t uid, gid_t gid) {
struct stat buf;
if(fstat(fd, &buf) == -1) {
- error("Cannot fstat() fd %d", fd);
+ netdata_log_error("Cannot fstat() fd %d", fd);
return;
}
if((buf.st_uid != uid || buf.st_gid != gid) && S_ISREG(buf.st_mode)) {
if(fchown(fd, uid, gid) == -1)
- error("Cannot fchown() fd %d.", fd);
+ netdata_log_error("Cannot fchown() fd %d.", fd);
}
}
-void create_needed_dir(const char *dir, uid_t uid, gid_t gid)
+static void fix_directory_file_permissions(const char *dirname, uid_t uid, gid_t gid, bool recursive)
{
- // attempt to create the directory
- if(mkdir(dir, 0755) == 0) {
- // we created it
+ char filename[FILENAME_MAX + 1];
- // chown it to match the required user
- if(chown(dir, uid, gid) == -1)
- error("Cannot chown directory '%s' to %u:%u", dir, (unsigned int)uid, (unsigned int)gid);
+ DIR *dir = opendir(dirname);
+ if (!dir)
+ return;
+
+ struct dirent *de = NULL;
+
+ while ((de = readdir(dir))) {
+ if (de->d_type == DT_DIR && (!strcmp(de->d_name, ".") || !strcmp(de->d_name, "..")))
+ continue;
+
+ (void) snprintfz(filename, FILENAME_MAX, "%s/%s", dirname, de->d_name);
+ if (de->d_type == DT_REG || recursive) {
+ if (chown(filename, uid, gid) == -1)
+ netdata_log_error("Cannot chown %s '%s' to %u:%u", de->d_type == DT_DIR ? "directory" : "file", filename, (unsigned int)uid, (unsigned int)gid);
+ }
+
+ if (de->d_type == DT_DIR && recursive)
+ fix_directory_file_permissions(filename, uid, gid, recursive);
}
- else if(errno != EEXIST)
- // log an error only if the directory does not exist
- error("Cannot create directory '%s'", dir);
+
+ closedir(dir);
+}
+
+void change_dir_ownership(const char *dir, uid_t uid, gid_t gid, bool recursive)
+{
+ if (chown(dir, uid, gid) == -1)
+ netdata_log_error("Cannot chown directory '%s' to %u:%u", dir, (unsigned int)uid, (unsigned int)gid);
+
+ fix_directory_file_permissions(dir, uid, gid, recursive);
}
void clean_directory(char *dirname)
@@ -69,33 +89,45 @@ void clean_directory(char *dirname)
while((de = readdir(dir)))
if(de->d_type == DT_REG)
if (unlinkat(dir_fd, de->d_name, 0))
- error("Cannot delete %s/%s", dirname, de->d_name);
+ netdata_log_error("Cannot delete %s/%s", dirname, de->d_name);
closedir(dir);
}
+void prepare_required_directories(uid_t uid, gid_t gid) {
+ change_dir_ownership(netdata_configured_cache_dir, uid, gid, true);
+ change_dir_ownership(netdata_configured_varlib_dir, uid, gid, false);
+ change_dir_ownership(netdata_configured_lock_dir, uid, gid, false);
+ change_dir_ownership(netdata_configured_log_dir, uid, gid, false);
+ change_dir_ownership(claimingdirectory, uid, gid, false);
+
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/registry", netdata_configured_varlib_dir);
+ change_dir_ownership(filename, uid, gid, false);
+
+ clean_directory(netdata_configured_lock_dir);
+}
+
int become_user(const char *username, int pid_fd) {
int am_i_root = (getuid() == 0)?1:0;
struct passwd *pw = getpwnam(username);
if(!pw) {
- error("User %s is not present.", username);
+ netdata_log_error("User %s is not present.", username);
return -1;
}
uid_t uid = pw->pw_uid;
gid_t gid = pw->pw_gid;
- create_needed_dir(netdata_configured_cache_dir, uid, gid);
- create_needed_dir(netdata_configured_varlib_dir, uid, gid);
- create_needed_dir(netdata_configured_lock_dir, uid, gid);
- create_needed_dir(claimingdirectory, uid, gid);
+ if (am_i_root)
+ netdata_log_info("I am root, so checking permissions");
- clean_directory(netdata_configured_lock_dir);
+ prepare_required_directories(uid, gid);
if(pidfile[0]) {
if(chown(pidfile, uid, gid) == -1)
- error("Cannot chown '%s' to %u:%u", pidfile, (unsigned int)uid, (unsigned int)gid);
+ netdata_log_error("Cannot chown '%s' to %u:%u", pidfile, (unsigned int)uid, (unsigned int)gid);
}
int ngroups = (int)sysconf(_SC_NGROUPS_MAX);
@@ -108,7 +140,7 @@ int become_user(const char *username, int pid_fd) {
if(getgrouplist(username, gid, supplementary_groups, &ngroups) == -1) {
#endif /* __APPLE__ */
if(am_i_root)
- error("Cannot get supplementary groups of user '%s'.", username);
+ netdata_log_error("Cannot get supplementary groups of user '%s'.", username);
ngroups = 0;
}
@@ -122,7 +154,7 @@ int become_user(const char *username, int pid_fd) {
if(supplementary_groups && ngroups > 0) {
if(setgroups((size_t)ngroups, supplementary_groups) == -1) {
if(am_i_root)
- error("Cannot set supplementary groups for user '%s'", username);
+ netdata_log_error("Cannot set supplementary groups for user '%s'", username);
}
ngroups = 0;
}
@@ -135,7 +167,7 @@ int become_user(const char *username, int pid_fd) {
#else
if(setresgid(gid, gid, gid) != 0) {
#endif /* __APPLE__ */
- error("Cannot switch to user's %s group (gid: %u).", username, gid);
+ netdata_log_error("Cannot switch to user's %s group (gid: %u).", username, gid);
return -1;
}
@@ -144,24 +176,24 @@ int become_user(const char *username, int pid_fd) {
#else
if(setresuid(uid, uid, uid) != 0) {
#endif /* __APPLE__ */
- error("Cannot switch to user %s (uid: %u).", username, uid);
+ netdata_log_error("Cannot switch to user %s (uid: %u).", username, uid);
return -1;
}
if(setgid(gid) != 0) {
- error("Cannot switch to user's %s group (gid: %u).", username, gid);
+ netdata_log_error("Cannot switch to user's %s group (gid: %u).", username, gid);
return -1;
}
if(setegid(gid) != 0) {
- error("Cannot effectively switch to user's %s group (gid: %u).", username, gid);
+ netdata_log_error("Cannot effectively switch to user's %s group (gid: %u).", username, gid);
return -1;
}
if(setuid(uid) != 0) {
- error("Cannot switch to user %s (uid: %u).", username, uid);
+ netdata_log_error("Cannot switch to user %s (uid: %u).", username, uid);
return -1;
}
if(seteuid(uid) != 0) {
- error("Cannot effectively switch to user %s (uid: %u).", username, uid);
+ netdata_log_error("Cannot effectively switch to user %s (uid: %u).", username, uid);
return -1;
}
@@ -181,7 +213,7 @@ static void oom_score_adj(void) {
// read the existing score
if(read_single_signed_number_file("/proc/self/oom_score_adj", &old_score)) {
- error("Out-Of-Memory (OOM) score setting is not supported on this system.");
+ netdata_log_error("Out-Of-Memory (OOM) score setting is not supported on this system.");
return;
}
@@ -202,26 +234,26 @@ static void oom_score_adj(void) {
if(s && *s && (isdigit(*s) || *s == '-' || *s == '+'))
wanted_score = atoll(s);
else if(s && !strcmp(s, "keep")) {
- info("Out-Of-Memory (OOM) kept as-is (running with %d)", (int) old_score);
+ netdata_log_info("Out-Of-Memory (OOM) kept as-is (running with %d)", (int) old_score);
return;
}
else {
- info("Out-Of-Memory (OOM) score not changed due to non-numeric setting: '%s' (running with %d)", s, (int)old_score);
+ netdata_log_info("Out-Of-Memory (OOM) score not changed due to non-numeric setting: '%s' (running with %d)", s, (int)old_score);
return;
}
if(wanted_score < OOM_SCORE_ADJ_MIN) {
- error("Wanted Out-Of-Memory (OOM) score %d is too small. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MIN);
+ netdata_log_error("Wanted Out-Of-Memory (OOM) score %d is too small. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MIN);
wanted_score = OOM_SCORE_ADJ_MIN;
}
if(wanted_score > OOM_SCORE_ADJ_MAX) {
- error("Wanted Out-Of-Memory (OOM) score %d is too big. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MAX);
+ netdata_log_error("Wanted Out-Of-Memory (OOM) score %d is too big. Using %d", (int)wanted_score, (int)OOM_SCORE_ADJ_MAX);
wanted_score = OOM_SCORE_ADJ_MAX;
}
if(old_score == wanted_score) {
- info("Out-Of-Memory (OOM) score is already set to the wanted value %d", (int)old_score);
+ netdata_log_info("Out-Of-Memory (OOM) score is already set to the wanted value %d", (int)old_score);
return;
}
@@ -235,25 +267,27 @@ static void oom_score_adj(void) {
if(written) {
if(read_single_signed_number_file("/proc/self/oom_score_adj", &final_score))
- error("Adjusted my Out-Of-Memory (OOM) score to %d, but cannot verify it.", (int)wanted_score);
+ netdata_log_error("Adjusted my Out-Of-Memory (OOM) score to %d, but cannot verify it.", (int)wanted_score);
else if(final_score == wanted_score)
- info("Adjusted my Out-Of-Memory (OOM) score from %d to %d.", (int)old_score, (int)final_score);
+ netdata_log_info("Adjusted my Out-Of-Memory (OOM) score from %d to %d.", (int)old_score, (int)final_score);
else
- error("Adjusted my Out-Of-Memory (OOM) score from %d to %d, but it has been set to %d.", (int)old_score, (int)wanted_score, (int)final_score);
+ netdata_log_error("Adjusted my Out-Of-Memory (OOM) score from %d to %d, but it has been set to %d.", (int)old_score, (int)wanted_score, (int)final_score);
analytics_report_oom_score(final_score);
}
else
- error("Failed to adjust my Out-Of-Memory (OOM) score to %d. Running with %d. (systemd systems may change it via netdata.service)", (int)wanted_score, (int)old_score);
+ netdata_log_error("Failed to adjust my Out-Of-Memory (OOM) score to %d. Running with %d. (systemd systems may change it via netdata.service)", (int)wanted_score, (int)old_score);
}
else
- error("Failed to adjust my Out-Of-Memory (OOM) score. Cannot open /proc/self/oom_score_adj for writing.");
+ netdata_log_error("Failed to adjust my Out-Of-Memory (OOM) score. Cannot open /proc/self/oom_score_adj for writing.");
}
static void process_nice_level(void) {
#ifdef HAVE_NICE
int nice_level = (int)config_get_number(CONFIG_SECTION_GLOBAL, "process nice level", 19);
- if(nice(nice_level) == -1) error("Cannot set netdata CPU nice level to %d.", nice_level);
- else debug(D_SYSTEM, "Set netdata nice level to %d.", nice_level);
+ if(nice(nice_level) == -1)
+ netdata_log_error("Cannot set netdata CPU nice level to %d.", nice_level);
+ else
+ netdata_log_debug(D_SYSTEM, "Set netdata nice level to %d.", nice_level);
#endif // HAVE_NICE
};
@@ -309,7 +343,7 @@ struct sched_def {
static void sched_getscheduler_report(void) {
int sched = sched_getscheduler(0);
if(sched == -1) {
- error("Cannot get my current process scheduling policy.");
+ netdata_log_error("Cannot get my current process scheduling policy.");
return;
}
else {
@@ -319,23 +353,23 @@ static void sched_getscheduler_report(void) {
if(scheduler_defaults[i].flags & SCHED_FLAG_PRIORITY_CONFIGURABLE) {
struct sched_param param;
if(sched_getparam(0, &param) == -1) {
- error("Cannot get the process scheduling priority for my policy '%s'", scheduler_defaults[i].name);
+ netdata_log_error("Cannot get the process scheduling priority for my policy '%s'", scheduler_defaults[i].name);
return;
}
else {
- info("Running with process scheduling policy '%s', priority %d", scheduler_defaults[i].name, param.sched_priority);
+ netdata_log_info("Running with process scheduling policy '%s', priority %d", scheduler_defaults[i].name, param.sched_priority);
}
}
else if(scheduler_defaults[i].flags & SCHED_FLAG_USE_NICE) {
#ifdef HAVE_GETPRIORITY
int n = getpriority(PRIO_PROCESS, 0);
- info("Running with process scheduling policy '%s', nice level %d", scheduler_defaults[i].name, n);
+ netdata_log_info("Running with process scheduling policy '%s', nice level %d", scheduler_defaults[i].name, n);
#else // !HAVE_GETPRIORITY
- info("Running with process scheduling policy '%s'", scheduler_defaults[i].name);
+ netdata_log_info("Running with process scheduling policy '%s'", scheduler_defaults[i].name);
#endif // !HAVE_GETPRIORITY
}
else {
- info("Running with process scheduling policy '%s'", scheduler_defaults[i].name);
+ netdata_log_info("Running with process scheduling policy '%s'", scheduler_defaults[i].name);
}
return;
@@ -374,14 +408,14 @@ static void sched_setscheduler_set(void) {
#ifdef HAVE_SCHED_GET_PRIORITY_MIN
errno = 0;
if(priority < sched_get_priority_min(policy)) {
- error("scheduler %s (%d) priority %d is below the minimum %d. Using the minimum.", name, policy, priority, sched_get_priority_min(policy));
+ netdata_log_error("scheduler %s (%d) priority %d is below the minimum %d. Using the minimum.", name, policy, priority, sched_get_priority_min(policy));
priority = sched_get_priority_min(policy);
}
#endif
#ifdef HAVE_SCHED_GET_PRIORITY_MAX
errno = 0;
if(priority > sched_get_priority_max(policy)) {
- error("scheduler %s (%d) priority %d is above the maximum %d. Using the maximum.", name, policy, priority, sched_get_priority_max(policy));
+ netdata_log_error("scheduler %s (%d) priority %d is above the maximum %d. Using the maximum.", name, policy, priority, sched_get_priority_max(policy));
priority = sched_get_priority_max(policy);
}
#endif
@@ -390,7 +424,7 @@ static void sched_setscheduler_set(void) {
}
if(!found) {
- error("Unknown scheduling policy '%s' - falling back to nice", name);
+ netdata_log_error("Unknown scheduling policy '%s' - falling back to nice", name);
goto fallback;
}
@@ -401,10 +435,13 @@ static void sched_setscheduler_set(void) {
errno = 0;
i = sched_setscheduler(0, policy, &param);
if(i != 0) {
- error("Cannot adjust netdata scheduling policy to %s (%d), with priority %d. Falling back to nice.", name, policy, priority);
+ netdata_log_error("Cannot adjust netdata scheduling policy to %s (%d), with priority %d. Falling back to nice.",
+ name,
+ policy,
+ priority);
}
else {
- info("Adjusted netdata scheduling policy to %s (%d), with priority %d.", name, policy, priority);
+ netdata_log_info("Adjusted netdata scheduling policy to %s (%d), with priority %d.", name, policy, priority);
if(!(flags & SCHED_FLAG_USE_NICE))
goto report;
}
@@ -457,15 +494,16 @@ int become_daemon(int dont_fork, const char *user)
pidfd = open(pidfile, O_WRONLY | O_CREAT, 0644);
if(pidfd >= 0) {
if(ftruncate(pidfd, 0) != 0)
- error("Cannot truncate pidfile '%s'.", pidfile);
+ netdata_log_error("Cannot truncate pidfile '%s'.", pidfile);
char b[100];
sprintf(b, "%d\n", getpid());
ssize_t i = write(pidfd, b, strlen(b));
if(i <= 0)
- error("Cannot write pidfile '%s'.", pidfile);
+ netdata_log_error("Cannot write pidfile '%s'.", pidfile);
}
- else error("Failed to open pidfile '%s'.", pidfile);
+ else
+ netdata_log_error("Failed to open pidfile '%s'.", pidfile);
}
// Set new file permissions
@@ -482,17 +520,13 @@ int become_daemon(int dont_fork, const char *user)
if(user && *user) {
if(become_user(user, pidfd) != 0) {
- error("Cannot become user '%s'. Continuing as we are.", user);
+ netdata_log_error("Cannot become user '%s'. Continuing as we are.", user);
}
- else debug(D_SYSTEM, "Successfully became user '%s'.", user);
+ else
+ netdata_log_debug(D_SYSTEM, "Successfully became user '%s'.", user);
}
else {
- create_needed_dir(netdata_configured_cache_dir, getuid(), getgid());
- create_needed_dir(netdata_configured_varlib_dir, getuid(), getgid());
- create_needed_dir(netdata_configured_lock_dir, getuid(), getgid());
- create_needed_dir(claimingdirectory, getuid(), getgid());
-
- clean_directory(netdata_configured_lock_dir);
+ prepare_required_directories(getuid(), getgid());
}
if(pidfd != -1)
diff --git a/daemon/event_loop.c b/daemon/event_loop.c
index 5fd02377..fb387915 100644
--- a/daemon/event_loop.c
+++ b/daemon/event_loop.c
@@ -56,5 +56,10 @@ void register_libuv_worker_jobs() {
// netdatacli
worker_register_job_name(UV_EVENT_SCHEDULE_CMD, "schedule command");
- uv_thread_set_name_np(pthread_self(), "LIBUV_WORKER");
+ static int workers = 0;
+ int worker_id = __atomic_add_fetch(&workers, 1, __ATOMIC_RELAXED);
+
+ char buf[NETDATA_THREAD_TAG_MAX + 1];
+ snprintfz(buf, NETDATA_THREAD_TAG_MAX, "UV_WORKER[%d]", worker_id);
+ uv_thread_set_name_np(pthread_self(), buf);
}
diff --git a/daemon/global_statistics.c b/daemon/global_statistics.c
index ee68bebd..ce8d4140 100644
--- a/daemon/global_statistics.c
+++ b/daemon/global_statistics.c
@@ -231,10 +231,10 @@ static void global_statistics_charts(void) {
static collected_number compression_ratio = -1,
average_response_time = -1;
- static time_t netdata_start_time = 0;
- if (!netdata_start_time)
- netdata_start_time = now_boottime_sec();
- time_t netdata_uptime = now_boottime_sec() - netdata_start_time;
+ static time_t netdata_boottime_time = 0;
+ if (!netdata_boottime_time)
+ netdata_boottime_time = now_boottime_sec();
+ time_t netdata_uptime = now_boottime_sec() - netdata_boottime_time;
struct global_statistics gs;
struct rusage me;
@@ -1718,7 +1718,7 @@ static void dbengine2_statistics_charts(void) {
cache_efficiency_stats = rrdeng_get_cache_efficiency_stats();
mrg_stats_old = mrg_stats;
- mrg_stats = mrg_get_statistics(main_mrg);
+ mrg_get_statistics(main_mrg, &mrg_stats);
struct rrdeng_buffer_sizes buffers = rrdeng_get_buffer_sizes();
size_t buffers_total_size = buffers.handles + buffers.xt_buf + buffers.xt_io + buffers.pdc + buffers.descriptors +
@@ -3435,6 +3435,7 @@ static struct worker_utilization all_workers_utilization[] = {
{ .name = "RRDCONTEXT", .family = "workers contexts", .priority = 1000000 },
{ .name = "REPLICATION", .family = "workers replication sender", .priority = 1000000 },
{ .name = "SERVICE", .family = "workers service", .priority = 1000000 },
+ { .name = "PROFILER", .family = "workers profile", .priority = 1000000 },
// has to be terminated with a NULL
{ .name = NULL, .family = NULL }
@@ -4123,7 +4124,7 @@ static void global_statistics_cleanup(void *ptr)
struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
- info("cleaning up...");
+ netdata_log_info("cleaning up...");
static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
}
@@ -4194,7 +4195,7 @@ static void global_statistics_workers_cleanup(void *ptr)
struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
- info("cleaning up...");
+ netdata_log_info("cleaning up...");
worker_utilization_finish();
@@ -4238,7 +4239,7 @@ static void global_statistics_sqlite3_cleanup(void *ptr)
struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
- info("cleaning up...");
+ netdata_log_info("cleaning up...");
static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
}
diff --git a/daemon/main.c b/daemon/main.c
index cff6530f..13644d4b 100644
--- a/daemon/main.c
+++ b/daemon/main.c
@@ -14,7 +14,7 @@ int netdata_anonymous_statistics_enabled;
int libuv_worker_threads = MIN_LIBUV_WORKER_THREADS;
bool ieee754_doubles = false;
-
+time_t netdata_start_time = 0;
struct netdata_static_thread *static_threads;
struct config netdata_config = {
@@ -35,6 +35,7 @@ typedef struct service_thread {
SERVICE_THREAD_TYPE type;
SERVICE_TYPE services;
char name[NETDATA_THREAD_NAME_MAX + 1];
+ bool stop_immediately;
bool cancelled;
union {
@@ -48,11 +49,9 @@ typedef struct service_thread {
} SERVICE_THREAD;
struct service_globals {
- SERVICE_TYPE running;
SPINLOCK lock;
Pvoid_t pid_judy;
} service_globals = {
- .running = ~0,
.pid_judy = NULL,
};
@@ -60,7 +59,7 @@ SERVICE_THREAD *service_register(SERVICE_THREAD_TYPE thread_type, request_quit_t
SERVICE_THREAD *sth = NULL;
pid_t tid = gettid();
- netdata_spinlock_lock(&service_globals.lock);
+ spinlock_lock(&service_globals.lock);
Pvoid_t *PValue = JudyLIns(&service_globals.pid_judy, tid, PJE0);
if(!*PValue) {
sth = callocz(1, sizeof(SERVICE_THREAD));
@@ -87,7 +86,7 @@ SERVICE_THREAD *service_register(SERVICE_THREAD_TYPE thread_type, request_quit_t
else {
sth = *PValue;
}
- netdata_spinlock_unlock(&service_globals.lock);
+ spinlock_unlock(&service_globals.lock);
return sth;
}
@@ -95,13 +94,13 @@ SERVICE_THREAD *service_register(SERVICE_THREAD_TYPE thread_type, request_quit_t
void service_exits(void) {
pid_t tid = gettid();
- netdata_spinlock_lock(&service_globals.lock);
+ spinlock_lock(&service_globals.lock);
Pvoid_t *PValue = JudyLGet(service_globals.pid_judy, tid, PJE0);
if(PValue) {
freez(*PValue);
JudyLDel(&service_globals.pid_judy, tid, PJE0);
}
- netdata_spinlock_unlock(&service_globals.lock);
+ spinlock_unlock(&service_globals.lock);
}
bool service_running(SERVICE_TYPE service) {
@@ -110,21 +109,13 @@ bool service_running(SERVICE_TYPE service) {
if(unlikely(!sth))
sth = service_register(SERVICE_THREAD_TYPE_NETDATA, NULL, NULL, NULL, false);
- if(netdata_exit)
- __atomic_store_n(&service_globals.running, 0, __ATOMIC_RELAXED);
-
- if(service == 0)
- service = sth->services;
-
sth->services |= service;
- return ((__atomic_load_n(&service_globals.running, __ATOMIC_RELAXED) & service) == service);
+ return !(sth->stop_immediately || netdata_exit);
}
void service_signal_exit(SERVICE_TYPE service) {
- __atomic_and_fetch(&service_globals.running, ~(service), __ATOMIC_RELAXED);
-
- netdata_spinlock_lock(&service_globals.lock);
+ spinlock_lock(&service_globals.lock);
Pvoid_t *PValue;
Word_t tid = 0;
@@ -132,15 +123,18 @@ void service_signal_exit(SERVICE_TYPE service) {
while((PValue = JudyLFirstThenNext(service_globals.pid_judy, &tid, &first))) {
SERVICE_THREAD *sth = *PValue;
- if((sth->services & service) && sth->request_quit_callback) {
- netdata_spinlock_unlock(&service_globals.lock);
- sth->request_quit_callback(sth->data);
- netdata_spinlock_lock(&service_globals.lock);
- continue;
+ if((sth->services & service)) {
+ sth->stop_immediately = true;
+
+ if(sth->request_quit_callback) {
+ spinlock_unlock(&service_globals.lock);
+ sth->request_quit_callback(sth->data);
+ spinlock_lock(&service_globals.lock);
+ }
}
}
- netdata_spinlock_unlock(&service_globals.lock);
+ spinlock_unlock(&service_globals.lock);
}
static void service_to_buffer(BUFFER *wb, SERVICE_TYPE service) {
@@ -187,7 +181,7 @@ static bool service_wait_exit(SERVICE_TYPE service, usec_t timeout_ut) {
{
buffer_flush(thread_list);
- netdata_spinlock_lock(&service_globals.lock);
+ spinlock_lock(&service_globals.lock);
Pvoid_t *PValue;
Word_t tid = 0;
@@ -217,15 +211,15 @@ static bool service_wait_exit(SERVICE_TYPE service, usec_t timeout_ut) {
running_services |= sth->services & service;
if(sth->force_quit_callback) {
- netdata_spinlock_unlock(&service_globals.lock);
+ spinlock_unlock(&service_globals.lock);
sth->force_quit_callback(sth->data);
- netdata_spinlock_lock(&service_globals.lock);
+ spinlock_lock(&service_globals.lock);
continue;
}
}
}
- netdata_spinlock_unlock(&service_globals.lock);
+ spinlock_unlock(&service_globals.lock);
}
service_signal_exit(service);
@@ -244,7 +238,7 @@ static bool service_wait_exit(SERVICE_TYPE service, usec_t timeout_ut) {
running_services = 0;
buffer_flush(thread_list);
- netdata_spinlock_lock(&service_globals.lock);
+ spinlock_lock(&service_globals.lock);
Pvoid_t *PValue;
Word_t tid = 0;
@@ -262,7 +256,7 @@ static bool service_wait_exit(SERVICE_TYPE service, usec_t timeout_ut) {
}
}
- netdata_spinlock_unlock(&service_globals.lock);
+ spinlock_unlock(&service_globals.lock);
if(running) {
log_countdown_ut -= (log_countdown_ut >= sleep_ut) ? sleep_ut : log_countdown_ut;
@@ -271,7 +265,7 @@ static bool service_wait_exit(SERVICE_TYPE service, usec_t timeout_ut) {
buffer_flush(service_list);
service_to_buffer(service_list, running_services);
- info("SERVICE CONTROL: waiting for the following %zu services [ %s] to exit: %s",
+ netdata_log_info("SERVICE CONTROL: waiting for the following %zu services [ %s] to exit: %s",
running, buffer_tostring(service_list),
running <= 10 ? buffer_tostring(thread_list) : "");
}
@@ -286,7 +280,7 @@ static bool service_wait_exit(SERVICE_TYPE service, usec_t timeout_ut) {
if(running) {
buffer_flush(service_list);
service_to_buffer(service_list, running_services);
- info("SERVICE CONTROL: "
+ netdata_log_info("SERVICE CONTROL: "
"the following %zu service(s) [ %s] take too long to exit: %s; "
"giving up on them...",
running, buffer_tostring(service_list),
@@ -303,9 +297,9 @@ static bool service_wait_exit(SERVICE_TYPE service, usec_t timeout_ut) {
{ \
usec_t now_ut = now_monotonic_usec(); \
if(prev_msg) \
- info("NETDATA SHUTDOWN: in %7llu ms, %s%s - next: %s", (now_ut - last_ut) / USEC_PER_MS, (timeout)?"(TIMEOUT) ":"", prev_msg, msg); \
+ netdata_log_info("NETDATA SHUTDOWN: in %7llu ms, %s%s - next: %s", (now_ut - last_ut) / USEC_PER_MS, (timeout)?"(TIMEOUT) ":"", prev_msg, msg); \
else \
- info("NETDATA SHUTDOWN: next: %s", msg); \
+ netdata_log_info("NETDATA SHUTDOWN: next: %s", msg); \
last_ut = now_ut; \
prev_msg = msg; \
timeout = false; \
@@ -320,7 +314,7 @@ void netdata_cleanup_and_exit(int ret) {
bool timeout = false;
error_log_limit_unlimited();
- info("NETDATA SHUTDOWN: initializing shutdown with code %d...", ret);
+ netdata_log_info("NETDATA SHUTDOWN: initializing shutdown with code %d...", ret);
send_statistics("EXIT", ret?"ERROR":"OK","-");
@@ -344,11 +338,6 @@ void netdata_cleanup_and_exit(int ret) {
webrtc_close_all_connections();
- delta_shutdown_time("disable ML detection and training threads");
-
- ml_stop_threads();
- ml_fini();
-
delta_shutdown_time("disable maintenance, new queries, new web requests, new streaming connections and aclk");
service_signal_exit(
@@ -363,8 +352,7 @@ void netdata_cleanup_and_exit(int ret) {
delta_shutdown_time("stop replication, exporters, health and web servers threads");
timeout = !service_wait_exit(
- SERVICE_REPLICATION
- | SERVICE_EXPORTERS
+ SERVICE_EXPORTERS
| SERVICE_HEALTH
| SERVICE_WEB_SERVER
| SERVICE_HTTPD
@@ -377,6 +365,17 @@ void netdata_cleanup_and_exit(int ret) {
| SERVICE_STREAMING
, 3 * USEC_PER_SEC);
+ delta_shutdown_time("stop replication threads");
+
+ timeout = !service_wait_exit(
+ SERVICE_REPLICATION // replication has to be stopped after STREAMING, because it cleans up ARAL
+ , 3 * USEC_PER_SEC);
+
+ delta_shutdown_time("disable ML detection and training threads");
+
+ ml_stop_threads();
+ ml_fini();
+
delta_shutdown_time("stop context thread");
timeout = !service_wait_exit(
@@ -446,8 +445,11 @@ void netdata_cleanup_and_exit(int ret) {
for (size_t tier = 0; tier < storage_tiers; tier++)
running += rrdeng_collectors_running(multidb_ctx[tier]);
- if(running)
- sleep_usec(100 * USEC_PER_MS);
+ if(running) {
+ error_limit_static_thread_var(erl, 1, 100 * USEC_PER_MS);
+ error_limit(&erl, "waiting for %zu collectors to finish", running);
+ // sleep_usec(100 * USEC_PER_MS);
+ }
}
delta_shutdown_time("wait for dbengine main cache to finish flushing");
@@ -477,7 +479,7 @@ void netdata_cleanup_and_exit(int ret) {
delta_shutdown_time("remove pid file");
if(unlink(pidfile) != 0)
- error("EXIT: cannot unlink pidfile '%s'.", pidfile);
+ netdata_log_error("EXIT: cannot unlink pidfile '%s'.", pidfile);
}
#ifdef ENABLE_HTTPS
@@ -492,7 +494,7 @@ void netdata_cleanup_and_exit(int ret) {
delta_shutdown_time("exit");
usec_t ended_ut = now_monotonic_usec();
- info("NETDATA SHUTDOWN: completed in %llu ms - netdata is now exiting - bye bye...", (ended_ut - started_ut) / USEC_PER_MS);
+ netdata_log_info("NETDATA SHUTDOWN: completed in %llu ms - netdata is now exiting - bye bye...", (ended_ut - started_ut) / USEC_PER_MS);
exit(ret);
}
@@ -516,7 +518,7 @@ int make_dns_decision(const char *section_name, const char *config_name, const c
if(!strcmp("no",value))
return 0;
if(strcmp("heuristic",value))
- error("Invalid configuration option '%s' for '%s'/'%s'. Valid options are 'yes', 'no' and 'heuristic'. Proceeding with 'heuristic'",
+ netdata_log_error("Invalid configuration option '%s' for '%s'/'%s'. Valid options are 'yes', 'no' and 'heuristic'. Proceeding with 'heuristic'",
value, section_name, config_name);
return simple_pattern_is_potential_name(p);
@@ -590,17 +592,17 @@ void web_server_config_options(void)
else if(!strcmp(s, "fixed"))
web_gzip_strategy = Z_FIXED;
else {
- error("Invalid compression strategy '%s'. Valid strategies are 'default', 'filtered', 'huffman only', 'rle' and 'fixed'. Proceeding with 'default'.", s);
+ netdata_log_error("Invalid compression strategy '%s'. Valid strategies are 'default', 'filtered', 'huffman only', 'rle' and 'fixed'. Proceeding with 'default'.", s);
web_gzip_strategy = Z_DEFAULT_STRATEGY;
}
web_gzip_level = (int)config_get_number(CONFIG_SECTION_WEB, "gzip compression level", 3);
if(web_gzip_level < 1) {
- error("Invalid compression level %d. Valid levels are 1 (fastest) to 9 (best ratio). Proceeding with level 1 (fastest compression).", web_gzip_level);
+ netdata_log_error("Invalid compression level %d. Valid levels are 1 (fastest) to 9 (best ratio). Proceeding with level 1 (fastest compression).", web_gzip_level);
web_gzip_level = 1;
}
else if(web_gzip_level > 9) {
- error("Invalid compression level %d. Valid levels are 1 (fastest) to 9 (best ratio). Proceeding with level 9 (best compression).", web_gzip_level);
+ netdata_log_error("Invalid compression level %d. Valid levels are 1 (fastest) to 9 (best ratio). Proceeding with level 9 (best compression).", web_gzip_level);
web_gzip_level = 9;
}
}
@@ -609,7 +611,7 @@ void web_server_config_options(void)
// killpid kills pid with SIGTERM.
int killpid(pid_t pid) {
int ret;
- debug(D_EXIT, "Request to kill pid %d", pid);
+ netdata_log_debug(D_EXIT, "Request to kill pid %d", pid);
errno = 0;
ret = kill(pid, SIGTERM);
@@ -620,11 +622,11 @@ int killpid(pid_t pid) {
return ret;
case EPERM:
- error("Cannot kill pid %d, but I do not have enough permissions.", pid);
+ netdata_log_error("Cannot kill pid %d, but I do not have enough permissions.", pid);
break;
default:
- error("Cannot kill pid %d, but I received an error.", pid);
+ netdata_log_error("Cannot kill pid %d, but I received an error.", pid);
break;
}
}
@@ -635,27 +637,27 @@ int killpid(pid_t pid) {
static void set_nofile_limit(struct rlimit *rl) {
// get the num files allowed
if(getrlimit(RLIMIT_NOFILE, rl) != 0) {
- error("getrlimit(RLIMIT_NOFILE) failed");
+ netdata_log_error("getrlimit(RLIMIT_NOFILE) failed");
return;
}
- info("resources control: allowed file descriptors: soft = %zu, max = %zu",
+ netdata_log_info("resources control: allowed file descriptors: soft = %zu, max = %zu",
(size_t) rl->rlim_cur, (size_t) rl->rlim_max);
// make the soft/hard limits equal
rl->rlim_cur = rl->rlim_max;
if (setrlimit(RLIMIT_NOFILE, rl) != 0) {
- error("setrlimit(RLIMIT_NOFILE, { %zu, %zu }) failed", (size_t)rl->rlim_cur, (size_t)rl->rlim_max);
+ netdata_log_error("setrlimit(RLIMIT_NOFILE, { %zu, %zu }) failed", (size_t)rl->rlim_cur, (size_t)rl->rlim_max);
}
// sanity check to make sure we have enough file descriptors available to open
if (getrlimit(RLIMIT_NOFILE, rl) != 0) {
- error("getrlimit(RLIMIT_NOFILE) failed");
+ netdata_log_error("getrlimit(RLIMIT_NOFILE) failed");
return;
}
if (rl->rlim_cur < 1024)
- error("Number of open file descriptors allowed for this process is too low (RLIMIT_NOFILE=%zu)", (size_t)rl->rlim_cur);
+ netdata_log_error("Number of open file descriptors allowed for this process is too low (RLIMIT_NOFILE=%zu)", (size_t)rl->rlim_cur);
}
void cancel_main_threads() {
@@ -666,10 +668,10 @@ void cancel_main_threads() {
for (i = 0; static_threads[i].name != NULL ; i++) {
if (static_threads[i].enabled == NETDATA_MAIN_THREAD_RUNNING) {
if (static_threads[i].thread) {
- info("EXIT: Stopping main thread: %s", static_threads[i].name);
+ netdata_log_info("EXIT: Stopping main thread: %s", static_threads[i].name);
netdata_thread_cancel(*static_threads[i].thread);
} else {
- info("EXIT: No thread running (marking as EXITED): %s", static_threads[i].name);
+ netdata_log_info("EXIT: No thread running (marking as EXITED): %s", static_threads[i].name);
static_threads[i].enabled = NETDATA_MAIN_THREAD_EXITED;
}
found++;
@@ -680,7 +682,7 @@ void cancel_main_threads() {
while(found && max > 0) {
max -= step;
- info("Waiting %d threads to finish...", found);
+ netdata_log_info("Waiting %d threads to finish...", found);
sleep_usec(step);
found = 0;
for (i = 0; static_threads[i].name != NULL ; i++) {
@@ -692,11 +694,11 @@ void cancel_main_threads() {
if(found) {
for (i = 0; static_threads[i].name != NULL ; i++) {
if (static_threads[i].enabled != NETDATA_MAIN_THREAD_EXITED)
- error("Main thread %s takes too long to exit. Giving up...", static_threads[i].name);
+ netdata_log_error("Main thread %s takes too long to exit. Giving up...", static_threads[i].name);
}
}
else
- info("All threads finished.");
+ netdata_log_info("All threads finished.");
for (i = 0; static_threads[i].name != NULL ; i++)
freez(static_threads[i].thread);
@@ -1054,18 +1056,18 @@ static void get_netdata_configured_variables() {
char buf[HOSTNAME_MAX + 1];
if(gethostname(buf, HOSTNAME_MAX) == -1){
- error("Cannot get machine hostname.");
+ netdata_log_error("Cannot get machine hostname.");
}
netdata_configured_hostname = config_get(CONFIG_SECTION_GLOBAL, "hostname", buf);
- debug(D_OPTIONS, "hostname set to '%s'", netdata_configured_hostname);
+ netdata_log_debug(D_OPTIONS, "hostname set to '%s'", netdata_configured_hostname);
// ------------------------------------------------------------------------
// get default database update frequency
default_rrd_update_every = (int) config_get_number(CONFIG_SECTION_DB, "update every", UPDATE_EVERY);
if(default_rrd_update_every < 1 || default_rrd_update_every > 600) {
- error("Invalid data collection frequency (update every) %d given. Defaulting to %d.", default_rrd_update_every, UPDATE_EVERY);
+ netdata_log_error("Invalid data collection frequency (update every) %d given. Defaulting to %d.", default_rrd_update_every, UPDATE_EVERY);
default_rrd_update_every = UPDATE_EVERY;
config_set_number(CONFIG_SECTION_DB, "update every", default_rrd_update_every);
}
@@ -1077,7 +1079,7 @@ static void get_netdata_configured_variables() {
const char *mode = config_get(CONFIG_SECTION_DB, "mode", rrd_memory_mode_name(default_rrd_memory_mode));
default_rrd_memory_mode = rrd_memory_mode_id(mode);
if(strcmp(mode, rrd_memory_mode_name(default_rrd_memory_mode)) != 0) {
- error("Invalid memory mode '%s' given. Using '%s'", mode, rrd_memory_mode_name(default_rrd_memory_mode));
+ netdata_log_error("Invalid memory mode '%s' given. Using '%s'", mode, rrd_memory_mode_name(default_rrd_memory_mode));
config_set(CONFIG_SECTION_DB, "mode", rrd_memory_mode_name(default_rrd_memory_mode));
}
}
@@ -1128,7 +1130,7 @@ static void get_netdata_configured_variables() {
default_rrdeng_extent_cache_mb = 0;
if(default_rrdeng_page_cache_mb < RRDENG_MIN_PAGE_CACHE_SIZE_MB) {
- error("Invalid page cache size %d given. Defaulting to %d.", default_rrdeng_page_cache_mb, RRDENG_MIN_PAGE_CACHE_SIZE_MB);
+ netdata_log_error("Invalid page cache size %d given. Defaulting to %d.", default_rrdeng_page_cache_mb, RRDENG_MIN_PAGE_CACHE_SIZE_MB);
default_rrdeng_page_cache_mb = RRDENG_MIN_PAGE_CACHE_SIZE_MB;
config_set_number(CONFIG_SECTION_DB, "dbengine page cache size MB", default_rrdeng_page_cache_mb);
}
@@ -1138,14 +1140,14 @@ static void get_netdata_configured_variables() {
default_rrdeng_disk_quota_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine disk space MB", default_rrdeng_disk_quota_mb);
if(default_rrdeng_disk_quota_mb < RRDENG_MIN_DISK_SPACE_MB) {
- error("Invalid dbengine disk space %d given. Defaulting to %d.", default_rrdeng_disk_quota_mb, RRDENG_MIN_DISK_SPACE_MB);
+ netdata_log_error("Invalid dbengine disk space %d given. Defaulting to %d.", default_rrdeng_disk_quota_mb, RRDENG_MIN_DISK_SPACE_MB);
default_rrdeng_disk_quota_mb = RRDENG_MIN_DISK_SPACE_MB;
config_set_number(CONFIG_SECTION_DB, "dbengine disk space MB", default_rrdeng_disk_quota_mb);
}
default_multidb_disk_quota_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine multihost disk space MB", compute_multidb_diskspace());
if(default_multidb_disk_quota_mb < RRDENG_MIN_DISK_SPACE_MB) {
- error("Invalid multidb disk space %d given. Defaulting to %d.", default_multidb_disk_quota_mb, default_rrdeng_disk_quota_mb);
+ netdata_log_error("Invalid multidb disk space %d given. Defaulting to %d.", default_multidb_disk_quota_mb, default_rrdeng_disk_quota_mb);
default_multidb_disk_quota_mb = default_rrdeng_disk_quota_mb;
config_set_number(CONFIG_SECTION_DB, "dbengine multihost disk space MB", default_multidb_disk_quota_mb);
}
@@ -1183,7 +1185,7 @@ static void get_netdata_configured_variables() {
// https://github.com/netdata/netdata/pull/11222#issuecomment-868367920 for more information.
if (rrdset_free_obsolete_time_s < 10) {
rrdset_free_obsolete_time_s = 10;
- info("The \"cleanup obsolete charts after seconds\" option was set to 10 seconds.");
+ netdata_log_info("The \"cleanup obsolete charts after seconds\" option was set to 10 seconds.");
config_set_number(CONFIG_SECTION_DB, "cleanup obsolete charts after secs", rrdset_free_obsolete_time_s);
}
@@ -1204,7 +1206,22 @@ static void get_netdata_configured_variables() {
}
-int load_netdata_conf(char *filename, char overwrite_used) {
+static void post_conf_load(char **user)
+{
+ // --------------------------------------------------------------------
+ // get the user we should run
+
+ // IMPORTANT: this is required before web_files_uid()
+ if(getuid() == 0) {
+ *user = config_get(CONFIG_SECTION_GLOBAL, "run as user", NETDATA_USER);
+ }
+ else {
+ struct passwd *passwd = getpwuid(getuid());
+ *user = config_get(CONFIG_SECTION_GLOBAL, "run as user", (passwd && passwd->pw_name)?passwd->pw_name:"");
+ }
+}
+
+static bool load_netdata_conf(char *filename, char overwrite_used, char **user) {
errno = 0;
int ret = 0;
@@ -1212,25 +1229,26 @@ int load_netdata_conf(char *filename, char overwrite_used) {
if(filename && *filename) {
ret = config_load(filename, overwrite_used, NULL);
if(!ret)
- error("CONFIG: cannot load config file '%s'.", filename);
+ netdata_log_error("CONFIG: cannot load config file '%s'.", filename);
}
else {
filename = strdupz_path_subpath(netdata_configured_user_config_dir, "netdata.conf");
ret = config_load(filename, overwrite_used, NULL);
if(!ret) {
- info("CONFIG: cannot load user config '%s'. Will try the stock version.", filename);
+ netdata_log_info("CONFIG: cannot load user config '%s'. Will try the stock version.", filename);
freez(filename);
filename = strdupz_path_subpath(netdata_configured_stock_config_dir, "netdata.conf");
ret = config_load(filename, overwrite_used, NULL);
if(!ret)
- info("CONFIG: cannot load stock config '%s'. Running with internal defaults.", filename);
+ netdata_log_info("CONFIG: cannot load stock config '%s'. Running with internal defaults.", filename);
}
freez(filename);
}
+ post_conf_load(user);
return ret;
}
@@ -1240,20 +1258,18 @@ static inline void coverity_remove_taint(char *s)
(void)s;
}
-int get_system_info(struct rrdhost_system_info *system_info) {
+int get_system_info(struct rrdhost_system_info *system_info, bool log) {
char *script;
script = mallocz(sizeof(char) * (strlen(netdata_configured_primary_plugins_dir) + strlen("system-info.sh") + 2));
sprintf(script, "%s/%s", netdata_configured_primary_plugins_dir, "system-info.sh");
if (unlikely(access(script, R_OK) != 0)) {
- info("System info script %s not found.",script);
+ netdata_log_error("System info script %s not found.",script);
freez(script);
return 1;
}
pid_t command_pid;
- info("Executing %s", script);
-
FILE *fp_child_input;
FILE *fp_child_output = netdata_popen(script, &command_pid, &fp_child_input);
if(fp_child_output) {
@@ -1273,10 +1289,12 @@ int get_system_info(struct rrdhost_system_info *system_info) {
coverity_remove_taint(value);
if(unlikely(rrdhost_set_system_info_variable(system_info, line, value))) {
- info("Unexpected environment variable %s=%s", line, value);
+ netdata_log_error("Unexpected environment variable %s=%s", line, value);
}
else {
- info("%s=%s", line, value);
+ if(log)
+ netdata_log_info("%s=%s", line, value);
+
setenv(line, value, 1);
}
}
@@ -1296,38 +1314,14 @@ void set_silencers_filename() {
/* Any config setting that can be accessed without a default value i.e. configget(...,...,NULL) *MUST*
be set in this procedure to be called in all the relevant code paths.
*/
-void post_conf_load(char **user)
-{
- // --------------------------------------------------------------------
- // get the user we should run
-
- // IMPORTANT: this is required before web_files_uid()
- if(getuid() == 0) {
- *user = config_get(CONFIG_SECTION_GLOBAL, "run as user", NETDATA_USER);
- }
- else {
- struct passwd *passwd = getpwuid(getuid());
- *user = config_get(CONFIG_SECTION_GLOBAL, "run as user", (passwd && passwd->pw_name)?passwd->pw_name:"");
- }
-
- // --------------------------------------------------------------------
- // Check if the cloud is enabled
-#if defined( DISABLE_CLOUD ) || !defined( ENABLE_ACLK )
- netdata_cloud_setting = 0;
-#else
- netdata_cloud_setting = appconfig_get_boolean(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", 1);
-#endif
- // This must be set before any point in the code that accesses it. Do not move it from this function.
- appconfig_get(&cloud_config, CONFIG_SECTION_GLOBAL, "cloud base url", DEFAULT_CLOUD_BASE_URL);
-}
#define delta_startup_time(msg) \
{ \
usec_t now_ut = now_monotonic_usec(); \
if(prev_msg) \
- info("NETDATA STARTUP: in %7llu ms, %s - next: %s", (now_ut - last_ut) / USEC_PER_MS, prev_msg, msg); \
+ netdata_log_info("NETDATA STARTUP: in %7llu ms, %s - next: %s", (now_ut - last_ut) / USEC_PER_MS, prev_msg, msg); \
else \
- info("NETDATA STARTUP: next: %s", msg); \
+ netdata_log_info("NETDATA STARTUP: next: %s", msg); \
last_ut = now_ut; \
prev_msg = msg; \
}
@@ -1342,10 +1336,12 @@ void replication_initialize(void);
int main(int argc, char **argv) {
// initialize the system clocks
clocks_init();
+ netdata_start_time = now_realtime_sec();
+
usec_t started_ut = now_monotonic_usec();
usec_t last_ut = started_ut;
const char *prev_msg = NULL;
- // Initialize stderror avoiding coredump when info() or error() is called
+ // Initialize stderror avoiding coredump when netdata_log_info() or netdata_log_error() is called
stderror = stderr;
int i;
@@ -1357,7 +1353,7 @@ int main(int argc, char **argv) {
static_threads = static_threads_get();
- netdata_ready=0;
+ netdata_ready = false;
// set the name for logging
program_name = "netdata";
@@ -1389,13 +1385,12 @@ int main(int argc, char **argv) {
while( (opt = getopt(argc, argv, optstring)) != -1 ) {
switch(opt) {
case 'c':
- if(load_netdata_conf(optarg, 1) != 1) {
- error("Cannot load configuration file %s.", optarg);
+ if(!load_netdata_conf(optarg, 1, &user)) {
+ netdata_log_error("Cannot load configuration file %s.", optarg);
return 1;
}
else {
- debug(D_OPTIONS, "Configuration loaded from %s.", optarg);
- post_conf_load(&user);
+ netdata_log_debug(D_OPTIONS, "Configuration loaded from %s.", optarg);
load_cloud_conf(1);
config_loaded = 1;
}
@@ -1469,7 +1464,7 @@ int main(int argc, char **argv) {
return 1;
if (buffer_unittest())
return 1;
- if (unit_test_bitmap256())
+ if (unit_test_bitmaps())
return 1;
// No call to load the config file on this code-path
post_conf_load(&user);
@@ -1550,6 +1545,10 @@ int main(int argc, char **argv) {
unittest_running = true;
return julytest();
}
+ else if(strcmp(optarg, "parsertest") == 0) {
+ unittest_running = true;
+ return pluginsd_parser_unittest();
+ }
else if(strncmp(optarg, createdataset_string, strlen(createdataset_string)) == 0) {
optarg += strlen(createdataset_string);
unsigned history_seconds = strtoul(optarg, NULL, 0);
@@ -1728,8 +1727,7 @@ int main(int argc, char **argv) {
if(!config_loaded) {
fprintf(stderr, "warning: no configuration file has been loaded. Use -c CONFIG_FILE, before -W get. Using default config.\n");
- load_netdata_conf(NULL, 0);
- post_conf_load(&user);
+ load_netdata_conf(NULL, 0, &user);
}
get_netdata_configured_variables();
@@ -1756,8 +1754,7 @@ int main(int argc, char **argv) {
if(!config_loaded) {
fprintf(stderr, "warning: no configuration file has been loaded. Use -c CONFIG_FILE, before -W get. Using default config.\n");
- load_netdata_conf(NULL, 0);
- post_conf_load(&user);
+ load_netdata_conf(NULL, 0, &user);
load_cloud_conf(1);
}
@@ -1777,7 +1774,6 @@ int main(int argc, char **argv) {
claiming_pending_arguments = optarg + strlen(claim_string);
}
else if(strcmp(optarg, "buildinfo") == 0) {
- printf("Version: %s %s\n", program_name, program_version);
print_build_info();
return 0;
}
@@ -1813,16 +1809,10 @@ int main(int argc, char **argv) {
if(!config_loaded) {
- load_netdata_conf(NULL, 0);
- post_conf_load(&user);
+ load_netdata_conf(NULL, 0, &user);
load_cloud_conf(0);
}
- char *nd_disable_cloud = getenv("NETDATA_DISABLE_CLOUD");
- if (nd_disable_cloud && !strncmp(nd_disable_cloud, "1", 1)) {
- appconfig_set(&cloud_config, CONFIG_SECTION_GLOBAL, "enabled", "false");
- }
-
// ------------------------------------------------------------------------
// initialize netdata
{
@@ -1886,12 +1876,12 @@ int main(int argc, char **argv) {
setenv("NETDATA_DEBUG_FLAGS", flags, 1);
debug_flags = strtoull(flags, NULL, 0);
- debug(D_OPTIONS, "Debug flags set to '0x%" PRIX64 "'.", debug_flags);
+ netdata_log_debug(D_OPTIONS, "Debug flags set to '0x%" PRIX64 "'.", debug_flags);
if(debug_flags != 0) {
struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY };
if(setrlimit(RLIMIT_CORE, &rl) != 0)
- error("Cannot request unlimited core dumps for debugging... Proceeding anyway...");
+ netdata_log_error("Cannot request unlimited core dumps for debugging... Proceeding anyway...");
#ifdef HAVE_SYS_PRCTL_H
prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
@@ -1914,6 +1904,8 @@ int main(int argc, char **argv) {
get_system_timezone();
+ bearer_tokens_init();
+
replication_initialize();
// --------------------------------------------------------------------
@@ -1980,10 +1972,10 @@ int main(int argc, char **argv) {
if(web_server_mode != WEB_SERVER_MODE_NONE)
api_listen_sockets_setup();
-#ifdef ENABLE_HTTPD
- delta_startup_time("initialize httpd server");
+#ifdef ENABLE_H2O
+ delta_startup_time("initialize h2o server");
for (int i = 0; static_threads[i].name; i++) {
- if (static_threads[i].start_routine == httpd_main)
+ if (static_threads[i].start_routine == h2o_main)
static_threads[i].enabled = httpd_is_enabled();
}
#endif
@@ -1995,7 +1987,7 @@ int main(int argc, char **argv) {
if(debug_flags != 0) {
struct rlimit rl = { RLIM_INFINITY, RLIM_INFINITY };
if(setrlimit(RLIMIT_CORE, &rl) != 0)
- error("Cannot request unlimited core dumps for debugging... Proceeding anyway...");
+ netdata_log_error("Cannot request unlimited core dumps for debugging... Proceeding anyway...");
#ifdef HAVE_SYS_PRCTL_H
prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
#endif
@@ -2010,7 +2002,7 @@ int main(int argc, char **argv) {
if(become_daemon(dont_fork, user) == -1)
fatal("Cannot daemonize myself.");
- info("netdata started on pid %d.", getpid());
+ netdata_log_info("netdata started on pid %d.", getpid());
delta_startup_time("initialize threads after fork");
@@ -2041,7 +2033,7 @@ int main(int argc, char **argv) {
netdata_anonymous_statistics_enabled=-1;
struct rrdhost_system_info *system_info = callocz(1, sizeof(struct rrdhost_system_info));
__atomic_sub_fetch(&netdata_buffers_statistics.rrdhost_allocations_size, sizeof(struct rrdhost_system_info), __ATOMIC_RELAXED);
- get_system_info(system_info);
+ get_system_info(system_info, true);
(void) registry_get_this_machine_guid();
system_info->hops = 0;
get_install_type(&system_info->install_type, &system_info->prebuilt_arch, &system_info->prebuilt_dist);
@@ -2072,7 +2064,7 @@ int main(int argc, char **argv) {
delta_startup_time("collect claiming info");
if (claiming_pending_arguments)
- claim_agent(claiming_pending_arguments);
+ claim_agent(claiming_pending_arguments, false, NULL);
load_claiming_state();
// ------------------------------------------------------------------------
@@ -2099,10 +2091,11 @@ int main(int argc, char **argv) {
if(st->enabled) {
st->thread = mallocz(sizeof(netdata_thread_t));
- debug(D_SYSTEM, "Starting thread %s.", st->name);
+ netdata_log_debug(D_SYSTEM, "Starting thread %s.", st->name);
netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, st);
}
- else debug(D_SYSTEM, "Not starting thread %s.", st->name);
+ else
+ netdata_log_debug(D_SYSTEM, "Not starting thread %s.", st->name);
}
ml_start_threads();
@@ -2116,8 +2109,8 @@ int main(int argc, char **argv) {
delta_startup_time("ready");
usec_t ready_ut = now_monotonic_usec();
- info("NETDATA STARTUP: completed in %llu ms. Enjoy real-time performance monitoring!", (ready_ut - started_ut) / USEC_PER_MS);
- netdata_ready = 1;
+ netdata_log_info("NETDATA STARTUP: completed in %llu ms. Enjoy real-time performance monitoring!", (ready_ut - started_ut) / USEC_PER_MS);
+ netdata_ready = true;
send_statistics("START", "-", "-");
if (crash_detected)
@@ -2132,7 +2125,7 @@ int main(int argc, char **argv) {
struct netdata_static_thread *st = &static_threads[i];
st->thread = mallocz(sizeof(netdata_thread_t));
st->enabled = 1;
- debug(D_SYSTEM, "Starting thread %s.", st->name);
+ netdata_log_debug(D_SYSTEM, "Starting thread %s.", st->name);
netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, st);
}
}
@@ -2141,14 +2134,14 @@ int main(int argc, char **argv) {
// ------------------------------------------------------------------------
// Report ACLK build failure
#ifndef ENABLE_ACLK
- error("This agent doesn't have ACLK.");
+ netdata_log_error("This agent doesn't have ACLK.");
char filename[FILENAME_MAX + 1];
snprintfz(filename, FILENAME_MAX, "%s/.aclk_report_sent", netdata_configured_varlib_dir);
if (netdata_anonymous_statistics_enabled > 0 && access(filename, F_OK)) { // -1 -> not initialized
send_statistics("ACLK_DISABLED", "-", "-");
int fd = open(filename, O_WRONLY | O_CREAT | O_TRUNC, 444);
if (fd == -1)
- error("Cannot create file '%s'. Please fix this.", filename);
+ netdata_log_error("Cannot create file '%s'. Please fix this.", filename);
else
close(fd);
}
diff --git a/daemon/metrics.csv b/daemon/metrics.csv
new file mode 100644
index 00000000..4aa71a36
--- /dev/null
+++ b/daemon/metrics.csv
@@ -0,0 +1,254 @@
+metric,scope,dimensions,unit,description,chart_type,labels,plugin,module
+netdata.aclk_cloud_req,,"received, malformed",req/s,Requests received from cloud,stacked,,netdata,stats
+netdata.aclk_cloud_req_http_type,,"other, info, data, alarms, alarm_log, chart, charts, function, functions",req/s,Requests received from cloud via HTTP by their type,stacked,,netdata,stats
+netdata.aclk_mqtt_tx_perc,,used,%,"Actively used percentage of MQTT Tx Buffer,",line,,netdata,stats
+netdata.aclk_mqtt_tx_queue,,"usable, reclaimable, used, free, size",B,State of transmit MQTT queue.,line,,netdata,stats
+netdata.aclk_mqtt_wss_time,,"keep-alive, socket_read_ssl, socket_write_ssl, process_websocket, process_mqtt",us,"Time spent handling MQTT, WSS, SSL and network communication.",stacked,,netdata,stats
+netdata.aclk_openssl_bytes,,"sent, received",B/s,Received and Sent bytes.,stacked,,netdata,stats
+netdata.aclk_processed_query_type,,"unknown, http_api_request_v2, register_node, node_state_update, chart_and_dim_update, chart_config_updated, reset_chart_messages, update_retention_info, update_node_info, alarm_checkpoint, provide_alarm_config, alarm_snapshot, update_node_collectors, generic_binary_proto_message",cmd/s,Query thread commands processed by their type,stacked,,netdata,stats
+netdata.aclk_protobuf_rx_types,,"cmd, CreateNodeInstanceResult, SendNodeInstances, StreamChartsAndDimensions, ChartsAndDimensionsAck, UpdateChartConfigs, StartAlarmStreaming, SendAlarmCheckpoint, SendAlarmConfiguration, SendAlarmSnapshot, DisconnectReq, ContextsCheckpoint, StopStreamingContexts, CancelPendingRequest",msg/s,Received new cloud architecture messages by their type.,stacked,,netdata,stats
+netdata.aclk_query_per_second,,"added, dispatched",queries/s,ACLK Queries per second,area,,netdata,stats
+netdata.aclk_query_threads,,"Query 0, Query 1, Query 2, Query 3, Query 4, Query 5",req/s,Queries Processed Per Thread,stacked,,netdata,stats
+netdata.aclk_query_time,,"avg, max, total",us,Time it took to process cloud requested DB queries,line,,netdata,stats
+netdata.aclk_status,,online,connected,ACLK/Cloud connection status,line,,netdata,stats
+netdata.apps_children_fix,,"cutime, cstime, cgtime, cminflt, cmajflt",percentage,Apps Plugin Exited Children Normalization Ratios,line,,apps.plugin,
+netdata.apps_cpu,,"user, system",milliseconds/s,Apps Plugin CPU,stacked,,apps.plugin,
+netdata.apps_fix,,"utime, stime, gtime, minflt, majflt",percentage,Apps Plugin Normalization Ratios,line,,apps.plugin,
+netdata.apps_sizes,,"calls, files, filenames, inode_changes, link_changes, pids, fds, targets, new_pids",files/s,Apps Plugin Files,line,,apps.plugin,
+netdata.clients,,clients,connected clients,Netdata Web Clients,line,,netdata,stats
+netdata.compression_ratio,,savings,percentage,Netdata API Responses Compression Savings Ratio,line,,netdata,stats
+netdata.db_points_read,,"/api/v1/data, /api/v1/weights, /api/v1/badge, health, ml, exporters, backfill, replication",points/s,Netdata DB Points Query Read,stacked,,netdata,stats
+netdata.db_points_results,,"/api/v1/data, /api/v1/weights, /api/v1/badge, health, ml, replication",points/s,Netdata Points in Query Results,stacked,,netdata,stats
+netdata.db_points_stored,,"tier0, tier1, tier2",points/s,Netdata DB Points Stored,stacked,,netdata,stats
+netdata.dbengine_buffers,,"pgc, mrg, opcodes, query handles, descriptors, wal, workers, pdc, pd, extent io, extent buffers, epdl, deol",bytes,Netdata DB Buffers,stacked,,netdata,stats
+netdata.dbengine_cache_hit_ratio,,"overall, main cache, extent cache, extent merge",%,Netdata Queries Cache Hit Ratio,line,,netdata,stats
+netdata.dbengine_compression_ratio,,savings,percentage,Netdata DB engine data extents' compression savings ratio,line,,netdata,stats
+netdata.dbengine_events,,"journal v2 mapped, journal v2 unmapped, datafile creation, datafile deletion, datafile deletion spin, journal v2 indexing, retention",events/s,Netdata Database Events,line,,netdata,stats
+netdata.dbengine_extent_cache_events,,"evictions aggressive, evictions critical, flushes critical",events/s,Netdata extent Cache Events,area,,netdata,stats
+netdata.dbengine_extent_cache_hit_ratio,,"closest, exact",%,Netdata extent Cache Hit Ratio,line,,netdata,stats
+netdata.dbengine_extent_cache_memory,,"free, hot, dirty, clean, index, evicting, flushing",bytes,Netdata extent Cache Memory,stacked,,netdata,stats
+netdata.dbengine_extent_cache_memory_changes,,"new clean, evictions, new hot",bytes/s,Netdata extent Cache Memory Changes,area,,netdata,stats
+netdata.dbengine_extent_cache_memory_migrations,,"dirty to clean, hot to dirty",bytes/s,Netdata extent Cache Memory Migrations,area,,netdata,stats
+netdata.dbengine_extent_cache_operations,,"search closest, search exact, add hot, add clean, evictions, flushes, acquires, releases, del acquires",ops/s,Netdata extent Cache Operations,line,,netdata,stats
+netdata.dbengine_extent_cache_pages,,"clean, hot, dirty, referenced",pages,Netdata extent Cache Pages,line,,netdata,stats
+netdata.dbengine_extent_cache_workers,,"searchers, adders, evictors, flushers, hot2dirty, jv2 flushers",workers,Netdata extent Cache Workers,line,,netdata,stats
+netdata.dbengine_extent_target_memory,,"current, wanted, referenced, hot max, dirty max, hot, dirty",bytes,Netdata extent Target Cache Memory,line,,netdata,stats
+netdata.dbengine_extent_waste_events,,"evictions skipped, flushes cancelled, acquire spins, release spins, insert spins, delete spins, evict spins, flush spins",events/s,Netdata extent Waste Events,line,,netdata,stats
+netdata.dbengine_global_errors,,"io_errors, fs_errors, pg_cache_over_half_dirty_events",errors/s,Netdata DB engine errors,line,,netdata,stats
+netdata.dbengine_global_file_descriptors,,"current, max",descriptors,Netdata DB engine File Descriptors,line,,netdata,stats
+netdata.dbengine_io_operations,,"reads, writes",operations/s,Netdata DB engine I/O operations,line,,netdata,stats
+netdata.dbengine_io_throughput,,"reads, writes",MiB/s,Netdata DB engine I/O throughput,line,,netdata,stats
+netdata.dbengine_main_cache_events,,"evictions aggressive, evictions critical, flushes critical",events/s,Netdata main Cache Events,area,,netdata,stats
+netdata.dbengine_main_cache_hit_ratio,,"closest, exact",%,Netdata main Cache Hit Ratio,line,,netdata,stats
+netdata.dbengine_main_cache_memory,,"free, hot, dirty, clean, index, evicting, flushing",bytes,Netdata main Cache Memory,stacked,,netdata,stats
+netdata.dbengine_main_cache_memory_changes,,"new clean, evictions, new hot",bytes/s,Netdata main Cache Memory Changes,area,,netdata,stats
+netdata.dbengine_main_cache_memory_migrations,,"dirty to clean, hot to dirty",bytes/s,Netdata main Cache Memory Migrations,area,,netdata,stats
+netdata.dbengine_main_cache_operations,,"search closest, search exact, add hot, add clean, evictions, flushes, acquires, releases, del acquires",ops/s,Netdata main Cache Operations,line,,netdata,stats
+netdata.dbengine_main_cache_pages,,"clean, hot, dirty, referenced",pages,Netdata main Cache Pages,line,,netdata,stats
+netdata.dbengine_main_cache_workers,,"searchers, adders, evictors, flushers, hot2dirty, jv2 flushers",workers,Netdata main Cache Workers,line,,netdata,stats
+netdata.dbengine_main_target_memory,,"current, wanted, referenced, hot max, dirty max, hot, dirty",bytes,Netdata main Target Cache Memory,line,,netdata,stats
+netdata.dbengine_main_waste_events,,"evictions skipped, flushes cancelled, acquire spins, release spins, insert spins, delete spins, evict spins, flush spins",events/s,Netdata main Waste Events,line,,netdata,stats
+netdata.dbengine_memory,,"main cache, open cache, extent cache, metrics registry, buffers",bytes,Netdata DB Memory,stacked,,netdata,stats
+netdata.dbengine_metrics,,"all, acquired, collected, with retention, without retention, multi-collected",metrics,Netdata Metrics in Metrics Registry,line,,netdata,stats
+netdata.dbengine_metrics_registry_operations,,"add, delete, search",metrics,Netdata Metrics Registry Operations,line,,netdata,stats
+netdata.dbengine_metrics_registry_references,,references,references,Netdata Metrics Registry References,line,,netdata,stats
+netdata.dbengine_open_cache_events,,"evictions aggressive, evictions critical, flushes critical",events/s,Netdata open Cache Events,area,,netdata,stats
+netdata.dbengine_open_cache_hit_ratio,,"closest, exact",%,Netdata open Cache Hit Ratio,line,,netdata,stats
+netdata.dbengine_open_cache_memory,,"free, hot, dirty, clean, index, evicting, flushing",bytes,Netdata open Cache Memory,stacked,,netdata,stats
+netdata.dbengine_open_cache_memory_changes,,"new clean, evictions, new hot",bytes/s,Netdata open Cache Memory Changes,area,,netdata,stats
+netdata.dbengine_open_cache_memory_migrations,,"dirty to clean, hot to dirty",bytes/s,Netdata open Cache Memory Migrations,area,,netdata,stats
+netdata.dbengine_open_cache_operations,,"search closest, search exact, add hot, add clean, evictions, flushes, acquires, releases, del acquires",ops/s,Netdata open Cache Operations,line,,netdata,stats
+netdata.dbengine_open_cache_pages,,"clean, hot, dirty, referenced",pages,Netdata open Cache Pages,line,,netdata,stats
+netdata.dbengine_open_cache_workers,,"searchers, adders, evictors, flushers, hot2dirty, jv2 flushers",workers,Netdata open Cache Workers,line,,netdata,stats
+netdata.dbengine_open_target_memory,,"current, wanted, referenced, hot max, dirty max, hot, dirty",bytes,Netdata open Target Cache Memory,line,,netdata,stats
+netdata.dbengine_open_waste_events,,"evictions skipped, flushes cancelled, acquire spins, release spins, insert spins, delete spins, evict spins, flush spins",events/s,Netdata open Waste Events,line,,netdata,stats
+netdata.dbengine_prep_timings,,"routing, main cache, open cache, journal v2, pass4",usec/s,Netdata Query Preparation Timings,stacked,,netdata,stats
+netdata.dbengine_queries,,"total, open cache, journal v2, planned with gaps, executed with gaps",queries/s,Netdata Queries,line,,netdata,stats
+netdata.dbengine_queries_running,,queries,queries,Netdata Queries Running,line,,netdata,stats
+netdata.dbengine_query_next_page,,"pass4, failed slow, failed fast, loaded slow, loaded fast",pages/s,Netdata Query Next Page,stacked,,netdata,stats
+netdata.dbengine_query_next_page_issues,,"zero timestamp, invalid size, past time, overlapping, update every fixed, entries fixed",pages/s,Netdata Query Next Page Issues,stacked,,netdata,stats
+netdata.dbengine_query_pages_data_source,,"main cache, disk, extent cache",pages/s,Netdata Query Pages to Data Source,stacked,,netdata,stats
+netdata.dbengine_query_pages_disk_load,,"ok compressed, fail invalid page, ok uncompressed, fail cant mmap, fail unavailable, fail unroutable, fail not found, fail invalid extent, extent merged, cancelled",pages/s,Netdata Query Pages Loaded from Disk,line,,netdata,stats
+netdata.dbengine_query_pages_metadata_source,,"cache hit, journal v2 scan, open journal",pages/s,Netdata Query Pages Metadata Source,stacked,,netdata,stats
+netdata.dbengine_query_timings,,"init, prep wait, next page disk fast, next page disk slow, next page preload fast, next page preload slow",usec/s,Netdata Query Timings,stacked,,netdata,stats
+netdata.dictionaries.category.callbacks,category,"inserts, deletes, conflicts, reacts",callbacks/s,Dictionary Callbacks,line,category,netdata,stats
+netdata.dictionaries.category.dictionaries,category,"active, deleted",dictionaries,Dictionaries,line,category,netdata,stats
+netdata.dictionaries.category.items,category,"active, deleted, referenced",items,Dictionary Items,line,category,netdata,stats
+netdata.dictionaries.category.memory,category,"index, data, structures",bytes,Dictionary Memory,stacked,category,netdata,stats
+netdata.dictionaries.category.ops,category,"creations, destructions, flushes, traversals, walkthroughs, garbage_collections, searches, inserts, resets, deletes",ops/s,Dictionary Operations,line,category,netdata,stats
+netdata.dictionaries.category.spins,category,"use, search, insert, delete",count,Dictionary Spins,line,category,netdata,stats
+netdata.ebpf_hash_tables_count,,hash_table,hash tables,Number of hash tables loaded.,line,,ebpf.plugin,process
+netdata.ebpf_hash_tables_per_core,,"per_core, unique",threads,How threads are loading hash/array tables.,line,,ebpf.plugin,process
+netdata.ebpf_kernel_memory,,memory_locked,bytes,Memory allocated for hash tables.,line,,ebpf.plugin,process
+netdata.ebpf_load_methods,,"legacy, co-re",methods,Load info.,line,,ebpf.plugin,process
+netdata.ebpf_threads,,"total, running",threads,Threads info.,line,,ebpf.plugin,process
+netdata.go_plugin_execution_time,,time,ms,Execution time,line,,go.d,logind
+netdata.heartbeat,,"min, max, average",microseconds,System clock jitter,area,,netdata,stats
+netdata.machine_learning_status,,"enabled, disabled-sp",dimensions,Machine learning status,line,,ml.plugin,training
+netdata.memory,,"db, collectors, hosts, rrdset rrddim, contexts, health, functions, labels, strings, streaming, replication, buffers, workers, aral, judy, other",bytes,Netdata Memory,stacked,,netdata,stats
+netdata.memory_buffers,,"queries, collection, aclk, api, functions, sqlite, exporters, health, streaming, streaming cbuf, replication, web, aral, judy",bytes,Netdata Memory Buffers,stacked,,netdata,stats
+netdata.metric_types,,"constant, variable",dimensions,Dimensions by metric type,line,,ml.plugin,training
+netdata.ml_models_consulted,,num_models_consulted,models,KMeans models used for prediction,area,,ml.plugin,detection
+netdata.net,,"in, out",kilobits/s,Netdata Network Traffic,area,,netdata,stats
+netdata.private_charts,,charts,charts,Private metric charts created by the netdata statsd server,area,,statsd.plugin,stats
+netdata.queries,,"/api/v1/data, /api/v1/weights, /api/v1/badge, health, ml, exporters, backfill, replication",queries/s,Netdata DB Queries,stacked,,netdata,stats
+netdata.queue_stats,,"queue_size, popped_items",items,Training queue stats,line,,ml.plugin,training
+netdata.requests,,requests,requests/s,Netdata Web Requests,line,,netdata,stats
+netdata.response_time,,"average, max",milliseconds/request,Netdata API Response Time,line,,netdata,stats
+netdata.server_cpu,,"user, system",milliseconds/s,Netdata CPU usage,stacked,,netdata,stats
+netdata.sqlite3_context_cache,,"cache_hit, cache_miss, cache_spill, cache_write",ops/s,Netdata SQLite3 context cache,line,,netdata,stats
+netdata.sqlite3_metatada_cache,,"cache_hit, cache_miss, cache_spill, cache_write",ops/s,Netdata SQLite3 metadata cache,line,,netdata,stats
+netdata.sqlite3_queries,,queries,queries/s,Netdata SQLite3 Queries,line,,netdata,stats
+netdata.sqlite3_queries_by_status,,"ok, failed, busy, locked",queries/s,Netdata SQLite3 Queries by status,line,,netdata,stats
+netdata.sqlite3_rows,,ok,rows/s,Netdata SQLite3 Rows,line,,netdata,stats
+netdata.statsd_bytes,,"tcp, udp",kilobits/s,Bytes read by the netdata statsd server,stacked,,statsd.plugin,stats
+netdata.statsd_events,,"gauges, counters, timers, meters, histograms, sets, dictionaries, unknown, errors",events/s,Events processed by the netdata statsd server,stacked,,statsd.plugin,stats
+netdata.statsd_metrics,,"gauges, counters, timers, meters, histograms, sets, dictionaries",metrics,Metrics in the netdata statsd database,stacked,,statsd.plugin,stats
+netdata.statsd_packets,,"tcp, udp",packets/s,Network packets processed by the netdata statsd server,stacked,,statsd.plugin,stats
+netdata.statsd_reads,,"tcp, udp",reads/s,Read operations made by the netdata statsd server,stacked,,statsd.plugin,stats
+netdata.statsd_useful_metrics,,"gauges, counters, timers, meters, histograms, sets, dictionaries",metrics,Useful metrics in the netdata statsd database,stacked,,statsd.plugin,stats
+netdata.strings_entries,,"entries, references",entries,Strings entries,area,,netdata,stats
+netdata.strings_memory,,memory,bytes,Strings memory,area,,netdata,stats
+netdata.strings_ops,,"inserts, deletes, searches, duplications, releases",ops/s,Strings operations,line,,netdata,stats
+netdata.tcp_connected,,connected,sockets,statsd server TCP connected sockets,line,,statsd.plugin,stats
+netdata.tcp_connects,,"connects, disconnects",events,statsd server TCP connects and disconnects,line,,statsd.plugin,stats
+netdata.training_results,,"ok, invalid-queries, not-enough-values, null-acquired-dimensions, chart-under-replication",events,Training results,line,,ml.plugin,training
+netdata.training_status,,"untrained, pending-without-model, trained, pending-with-model, silenced",dimensions,Training status of dimensions,line,,ml.plugin,training
+netdata.training_time_stats,,"allotted, consumed, remaining",milliseconds,Training time stats,line,,ml.plugin,training
+netdata.uptime,,uptime,seconds,Netdata uptime,line,,netdata,stats
+netdata.workers.aclkquery.cpu,,"min, max, average",%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.aclkquery.jobs_started_by_type,,"http_api_request_v2, register_node, node_state_update, chart_and_dim_update, chart_config_updated, reset_chart_messages, update_retention_info, update_node_info, alarm_checkpoint, provide_alarm_config, alarm_snapshot, update_node_collectors, generic_binary_proto_message",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.aclkquery.threads,,"free, busy",threads,Netdata Workers Threads,stacked,,netdata,stats
+netdata.workers.aclkquery.time,,"min, max, average",%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.aclkquery.time_by_type,,"http_api_request_v2, register_node, node_state_update, chart_and_dim_update, chart_config_updated, reset_chart_messages, update_retention_info, update_node_info, alarm_checkpoint, provide_alarm_config, alarm_snapshot, update_node_collectors, generic_binary_proto_message",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.aclksync.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.aclksync.jobs_started_by_type,,"noop, cleanup, node delete, node state, alert push, alert conf push, alert snapshot, alert checkpoint, alerts check, timer",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.aclksync.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.aclksync.time_by_type,,"noop, cleanup, node delete, node state, alert push, alert conf push, alert snapshot, alert checkpoint, alerts check, timer",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.cgroups.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.cgroups.jobs_started_by_type,,"lock, read, chart",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.cgroups.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.cgroups.time_by_type,,"lock, read, chart",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.cgroupsdisc.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.cgroupsdisc.jobs_started_by_type,,"init, find, process, rename, network, new, update, cleanup, copy, share, lock",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.cgroupsdisc.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.cgroupsdisc.time_by_type,,"init, find, process, rename, network, new, update, cleanup, copy, share, lock",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.cpu_total,,"stats, health, mltrain, mldetect, streamsnd, dbengine, libuv, web, aclksync, metasync, pluginsd, statsd, statsdflush, proc, netdev, cgroups, cgroupsdisc, diskspace, tc, timex, idlejitter, rrdcontext, replication, service, aclkquery, streamrcv",%,Netdata Workers CPU Utilization,stacked,,netdata,stats
+netdata.workers.dbengine.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.dbengine.jobs_started_by_type,,"noop, query, extent write, extent read, flushed to open, db rotate, journal index, flush init, evict init, ctx shutdown, ctx quiesce, get opcode, query cb, extent write cb, extent read cb, flushed to open cb, db rotate cb, journal index cb, flush init cb, evict init cb, ctx shutdown cb, ctx quiesce cb, timer, transaction buffer flush cb",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.dbengine.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.dbengine.time_by_type,,"noop, query, extent write, extent read, flushed to open, db rotate, journal index, flush init, evict init, ctx shutdown, ctx quiesce, get opcode, query cb, extent write cb, extent read cb, flushed to open cb, db rotate cb, journal index cb, flush init cb, evict init cb, ctx shutdown cb, ctx quiesce cb, timer, transaction buffer flush cb",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.dbengine.value.opcodes_waiting,,"min, max, average",opcodes,Netdata Workers dbengine value of opcodes waiting,line,,netdata,stats
+netdata.workers.dbengine.value.works_dispatched,,"min, max, average",works,Netdata Workers dbengine value of works dispatched,line,,netdata,stats
+netdata.workers.dbengine.value.works_executing,,"min, max, average",works,Netdata Workers dbengine value of works executing,line,,netdata,stats
+netdata.workers.diskspace.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.diskspace.jobs_started_by_type,,"mountinfo, mountpoint, cleanup",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.diskspace.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.diskspace.time_by_type,,"mountinfo, mountpoint, cleanup",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.health.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.health.jobs_started_by_type,,"rrd lock, host lock, db lookup, calc eval, warning eval, critical eval, alarm log entry, alarm log process, rrdset init, rrddim init",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.health.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.health.time_by_type,,"rrd lock, host lock, db lookup, calc eval, warning eval, critical eval, alarm log entry, alarm log process, rrdset init, rrddim init",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.idlejitter.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.idlejitter.jobs_started_by_type,,measurements,jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.idlejitter.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.idlejitter.time_by_type,,measurements,ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.libuv.cpu,,"min, max, average",%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.libuv.jobs_started_by_type,,"worker init, query, extent cache, extent mmap, extent decompression, page lookup, page populate, page allocate, flush main, extent write, flushed to open, jv2 index wait, jv2 indexing, datafile delete wait, datafile deletion, find rotated metrics, find remaining retention, update retention, evict main, dbengine buffers cleanup, dbengine quiesce, dbengine shutdown, metadata load host context, metadata store host, metadata cleanup, schedule command",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.libuv.threads,,"free, busy",threads,Netdata Workers Threads,stacked,,netdata,stats
+netdata.workers.libuv.time,,"min, max, average",%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.libuv.time_by_type,,"worker init, query, extent cache, extent mmap, extent decompression, page lookup, page populate, page allocate, flush main, extent write, flushed to open, jv2 index wait, jv2 indexing, datafile delete wait, datafile deletion, find rotated metrics, find remaining retention, update retention, evict main, dbengine buffers cleanup, dbengine quiesce, dbengine shutdown, metadata load host context, metadata store host, metadata cleanup, schedule command",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.metasync.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.metasync.jobs_started_by_type,,"noop, timer, delete dimension, add claim id, add host info, maintenance, ml load models",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.metasync.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.metasync.time_by_type,,"noop, timer, delete dimension, add claim id, add host info, maintenance, ml load models",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.mldetect.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.mldetect.jobs_started_by_type,,"collect stats, dim chart, host chart, training stats",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.mldetect.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.mldetect.time_by_type,,"collect stats, dim chart, host chart, training stats",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.mltrain.cpu,,"min, max, average",%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.mltrain.jobs_started_by_type,,"pop queue, acquire, query, kmeans, update models, release, update host, flush models",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.mltrain.threads,,"free, busy",threads,Netdata Workers Threads,stacked,,netdata,stats
+netdata.workers.mltrain.time,,"min, max, average",%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.mltrain.time_by_type,,"pop queue, acquire, query, kmeans, update models, release, update host, flush models",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.netdev.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.netdev.jobs_started_by_type,,netdev,jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.netdev.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.netdev.time_by_type,,netdev,ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.pluginsd.cpu,,"min, max, average",%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.pluginsd.jobs_started_by_type,,"FLUSH, DISABLE, HOST_DEFINE, HOST_DEFINE_END, HOST_LABEL, HOST, EXIT, CHART, DIMENSION, VARIABLE, LABEL, OVERWRITE, CLABEL_COMMIT, CLABEL, FUNCTION, FUNCTION_RESULT_BEGIN, BEGIN, SET, END",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.pluginsd.threads,,"free, busy",threads,Netdata Workers Threads,stacked,,netdata,stats
+netdata.workers.pluginsd.time,,"min, max, average",%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.pluginsd.time_by_type,,"FLUSH, DISABLE, HOST_DEFINE, HOST_DEFINE_END, HOST_LABEL, HOST, EXIT, CHART, DIMENSION, VARIABLE, LABEL, OVERWRITE, CLABEL_COMMIT, CLABEL, FUNCTION, FUNCTION_RESULT_BEGIN, BEGIN, SET, END",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.proc.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.proc.jobs_started_by_type,,"stat, uptime, loadavg, entropy, pressure, interrupts, softirqs, vmstat, meminfo, ksm, zram, ecc, numa, pagetypeinfo, netwireless, sockstat, sockstat6, netstat, sctp, softnet, ipvs, infiniband, conntrack, synproxy, diskstats, mdstat, nfsd, nfs, zfs_arcstats, zfs_pool_state, btrfs, ipc, power_supply",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.proc.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.proc.time_by_type,,"stat, uptime, loadavg, entropy, pressure, interrupts, softirqs, vmstat, meminfo, ksm, zram, ecc, numa, pagetypeinfo, netwireless, sockstat, sockstat6, netstat, sctp, softnet, ipvs, infiniband, conntrack, synproxy, diskstats, mdstat, nfsd, nfs, zfs_arcstats, zfs_pool_state, btrfs, ipc, power_supply",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.replication.cpu,,"min, max, average",%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.replication.jobs_started_by_type,,"find next, querying, dict delete, find chart, prepare query, check consistency, commit, cleanup, wait, statistics",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.replication.rate.added_requests,,"min, max, average",requests/s,Netdata Workers replication rate of added requests,line,,netdata,stats
+netdata.workers.replication.rate.finished_requests,,"min, max, average",requests/s,Netdata Workers replication rate of finished requests,line,,netdata,stats
+netdata.workers.replication.rate.sender_resets,,"min, max, average",resets/s,Netdata Workers replication rate of sender resets,line,,netdata,stats
+netdata.workers.replication.threads,,"free, busy",threads,Netdata Workers Threads,stacked,,netdata,stats
+netdata.workers.replication.time,,"min, max, average",%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.replication.time_by_type,,"find next, querying, dict delete, find chart, prepare query, check consistency, commit, cleanup, wait, statistics",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.replication.value.completion,,"min, max, average",%,Netdata Workers replication value of completion,line,,netdata,stats
+netdata.workers.replication.value.no_room_requests,,"min, max, average",requests,Netdata Workers replication value of no room requests,line,,netdata,stats
+netdata.workers.replication.value.pending_requests,,"min, max, average",requests,Netdata Workers replication value of pending requests,line,,netdata,stats
+netdata.workers.replication.value.senders_full,,"min, max, average",senders,Netdata Workers replication value of senders full,line,,netdata,stats
+netdata.workers.rrdcontext.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.rrdcontext.jobs_started_by_type,,"hosts, dedup checks, sent contexts, deduplicated contexts, metrics retention, queued contexts, cleanups, deletes, check metrics, check instances, check contexts",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.rrdcontext.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.rrdcontext.time_by_type,,"hosts, dedup checks, sent contexts, deduplicated contexts, metrics retention, queued contexts, cleanups, deletes, check metrics, check instances, check contexts",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.rrdcontext.value.hub_queue_size,,"min, max, average",contexts,Netdata Workers rrdcontext value of hub queue size,line,,netdata,stats
+netdata.workers.rrdcontext.value.post_processing_queue_size,,"min, max, average",contexts,Netdata Workers rrdcontext value of post processing queue size,line,,netdata,stats
+netdata.workers.service.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.service.jobs_started_by_type,,"child chart obsoletion check, cleanup obsolete charts, archive chart, archive chart dimensions, archive dimension, cleanup orphan hosts, cleanup obsolete charts on all hosts, free host, save host charts, delete host charts, free chart, save chart, delete chart, free dimension, main cache evictions, main cache flushes, open cache evictions, open cache flushes",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.service.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.service.time_by_type,,"child chart obsoletion check, cleanup obsolete charts, archive chart, archive chart dimensions, archive dimension, cleanup orphan hosts, cleanup obsolete charts on all hosts, free host, save host charts, delete host charts, free chart, save chart, delete chart, free dimension, main cache evictions, main cache flushes, open cache evictions, open cache flushes",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.stats.cpu,,"min, max, average",%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.stats.jobs_started_by_type,,"global, registry, workers, dbengine, strings, dictionaries, malloc_trace, sqlite3",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.stats.threads,,"free, busy",threads,Netdata Workers Threads,stacked,,netdata,stats
+netdata.workers.stats.time,,"min, max, average",%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.stats.time_by_type,,"global, registry, workers, dbengine, strings, dictionaries, malloc_trace, sqlite3",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.statsd.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.statsd.jobs_started_by_type,,"tcp connect, tcp disconnect, receive, send",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.statsd.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.statsd.time_by_type,,"tcp connect, tcp disconnect, receive, send",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.statsdflush.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.statsdflush.jobs_started_by_type,,"gauges, counters, meters, timers, histograms, sets, dictionaries, statistics",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.statsdflush.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.statsdflush.time_by_type,,"gauges, counters, meters, timers, histograms, sets, dictionaries, statistics",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.streamrcv.cpu,,"average, min, max",%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.streamrcv.jobs_started_by_type,,"CHART, DIMENSION, VARIABLE, LABEL, OVERWRITE, CLABEL_COMMIT, CLABEL, FUNCTION, FUNCTION_RESULT_BEGIN, BEGIN, SET, END, CHART_DEFINITION_END, RBEGIN, RSET, RDSTATE, RSSTATE, REND, BEGIN2, SET2, END2, CLAIMED_ID",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.streamrcv.rate.received_bytes,,"min, max, average",bytes/s,Netdata Workers streamrcv rate of received bytes,line,,netdata,stats
+netdata.workers.streamrcv.rate.uncompressed_bytes,,"min, max, average",bytes/s,Netdata Workers streamrcv rate of uncompressed bytes,line,,netdata,stats
+netdata.workers.streamrcv.threads,,"free, busy",threads,Netdata Workers Threads,stacked,,netdata,stats
+netdata.workers.streamrcv.time,,"average, min, max",%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.streamrcv.time_by_type,,"CHART, DIMENSION, VARIABLE, LABEL, OVERWRITE, CLABEL_COMMIT, CLABEL, FUNCTION, FUNCTION_RESULT_BEGIN, BEGIN, SET, END, CHART_DEFINITION_END, RBEGIN, RSET, RDSTATE, RSSTATE, REND, BEGIN2, SET2, END2, CLAIMED_ID",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.streamrcv.value.replication_completion,,"min, max, average",%,Netdata Workers streamrcv value of replication completion,line,,netdata,stats
+netdata.workers.streamsnd.cpu,,"average, min, max",%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.streamsnd.jobs_started_by_type,,"connect, pipe read, receive, execute, send, disconnect bad handshake, disconnect overflow, disconnect timeout, disconnect poll error, disconnect socket error, disconnect ssl error, disconnect parent closed, disconnect receive error, disconnect send error, disconnect no compression, replay request, function",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.streamsnd.rate.bytes_received,,"min, max, average",bytes/s,Netdata Workers streamsnd rate of bytes received,line,,netdata,stats
+netdata.workers.streamsnd.rate.bytes_sent,,"min, max, average",bytes/s,Netdata Workers streamsnd rate of bytes sent,line,,netdata,stats
+netdata.workers.streamsnd.threads,,"free, busy",threads,Netdata Workers Threads,stacked,,netdata,stats
+netdata.workers.streamsnd.time,,"average, min, max",%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.streamsnd.time_by_type,,"connect, pipe read, receive, execute, send, disconnect bad handshake, disconnect overflow, disconnect timeout, disconnect poll error, disconnect socket error, disconnect ssl error, disconnect parent closed, disconnect receive error, disconnect send error, disconnect no compression, replay request, function",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.streamsnd.value.replication_dict_entries,,"min, max, average",entries,Netdata Workers streamsnd value of replication dict entries,line,,netdata,stats
+netdata.workers.streamsnd.value.used_buffer_ratio,,"min, max, average",%,Netdata Workers streamsnd value of used buffer ratio,line,,netdata,stats
+netdata.workers.tc.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.tc.jobs_started_by_type,,"class, begin, end, sent, lended, tokens, devicename, devicegroup, classname, worktime",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.tc.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.tc.time_by_type,,"class, begin, end, sent, lended, tokens, devicename, devicegroup, classname, worktime",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.tc.value.number_of_classes,,"min, max, average",classes,Netdata Workers tc value of number of classes,line,,netdata,stats
+netdata.workers.tc.value.number_of_devices,,"min, max, average",devices,Netdata Workers tc value of number of devices,line,,netdata,stats
+netdata.workers.tc.value.tc_script_execution_time,,"min, max, average",milliseconds/run,Netdata Workers tc value of tc script execution time,line,,netdata,stats
+netdata.workers.timex.cpu,,average,%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.timex.jobs_started_by_type,,clock check,jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.timex.time,,average,%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.timex.time_by_type,,clock check,ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
+netdata.workers.web.cpu,,"min, max, average",%,Netdata Workers CPU Utilization,area,,netdata,stats
+netdata.workers.web.jobs_started_by_type,,"connect, disconnect, file start, file end, file read, file write, receive, send, process",jobs,Netdata Workers Jobs Started by Type,stacked,,netdata,stats
+netdata.workers.web.threads,,"free, busy",threads,Netdata Workers Threads,stacked,,netdata,stats
+netdata.workers.web.time,,"min, max, average",%,Netdata Workers Busy Time,area,,netdata,stats
+netdata.workers.web.time_by_type,,"connect, disconnect, file start, file end, file read, file write, receive, send, process",ms,Netdata Workers Busy Time by Type,stacked,,netdata,stats
diff --git a/daemon/pipename.c b/daemon/pipename.c
new file mode 100644
index 00000000..70b6a25b
--- /dev/null
+++ b/daemon/pipename.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "pipename.h"
+
+#include <stdlib.h>
+
+const char *daemon_pipename(void) {
+ const char *pipename = getenv("NETDATA_PIPENAME");
+ if (pipename)
+ return pipename;
+
+#ifdef _WIN32
+ return "\\\\?\\pipe\\netdata-cli";
+#else
+ return "/tmp/netdata-ipc";
+#endif
+}
diff --git a/daemon/pipename.h b/daemon/pipename.h
new file mode 100644
index 00000000..6ca6e8d0
--- /dev/null
+++ b/daemon/pipename.h
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef DAEMON_PIPENAME_H
+#define DAEMON_PIPENAME_H
+
+const char *daemon_pipename(void);
+
+#endif /* DAEMON_PIPENAME_H */
diff --git a/daemon/service.c b/daemon/service.c
index 57c7c7f3..a25e2a26 100644
--- a/daemon/service.c
+++ b/daemon/service.c
@@ -40,9 +40,9 @@ static void svc_rrddim_obsolete_to_archive(RRDDIM *rd) {
const char *cache_filename = rrddim_cache_filename(rd);
if(cache_filename) {
- info("Deleting dimension file '%s'.", cache_filename);
+ netdata_log_info("Deleting dimension file '%s'.", cache_filename);
if (unlikely(unlink(cache_filename) == -1))
- error("Cannot delete dimension file '%s'", cache_filename);
+ netdata_log_error("Cannot delete dimension file '%s'", cache_filename);
}
if (rd->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
@@ -87,11 +87,11 @@ static bool svc_rrdset_archive_obsolete_dimensions(RRDSET *st, bool all_dimensio
dfe_start_write(st->rrddim_root_index, rd) {
if(unlikely(
all_dimensions ||
- (rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE) && (rd->last_collected_time.tv_sec + rrdset_free_obsolete_time_s < now))
+ (rrddim_flag_check(rd, RRDDIM_FLAG_OBSOLETE) && (rd->collector.last_collected_time.tv_sec + rrdset_free_obsolete_time_s < now))
)) {
if(dictionary_acquired_item_references(rd_dfe.item) == 1) {
- info("Removing obsolete dimension '%s' (%s) of '%s' (%s).", rrddim_name(rd), rrddim_id(rd), rrdset_name(st), rrdset_id(st));
+ netdata_log_info("Removing obsolete dimension '%s' (%s) of '%s' (%s).", rrddim_name(rd), rrddim_id(rd), rrdset_name(st), rrdset_id(st));
svc_rrddim_obsolete_to_archive(rd);
}
else
@@ -227,7 +227,7 @@ restart_after_removal:
if(!rrdhost_should_be_removed(host, protected_host, now))
continue;
- info("Host '%s' with machine guid '%s' is obsolete - cleaning up.", rrdhost_hostname(host), host->machine_guid);
+ netdata_log_info("Host '%s' with machine guid '%s' is obsolete - cleaning up.", rrdhost_hostname(host), host->machine_guid);
if (rrdhost_option_check(host, RRDHOST_OPTION_DELETE_ORPHAN_HOST)
/* don't delete multi-host DB host files */
@@ -254,7 +254,7 @@ static void service_main_cleanup(void *ptr)
struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
static_thread->enabled = NETDATA_MAIN_THREAD_EXITING;
- debug(D_SYSTEM, "Cleaning up...");
+ netdata_log_debug(D_SYSTEM, "Cleaning up...");
worker_unregister();
static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
@@ -290,7 +290,7 @@ void *service_main(void *ptr)
heartbeat_init(&hb);
usec_t step = USEC_PER_SEC * SERVICE_HEARTBEAT;
- debug(D_SYSTEM, "Service thread starts");
+ netdata_log_debug(D_SYSTEM, "Service thread starts");
while (service_running(SERVICE_MAINTENANCE)) {
worker_is_idle();
diff --git a/daemon/signals.c b/daemon/signals.c
index 3699010c..ae28874c 100644
--- a/daemon/signals.c
+++ b/daemon/signals.c
@@ -59,7 +59,7 @@ void signals_block(void) {
sigfillset(&sigset);
if(pthread_sigmask(SIG_BLOCK, &sigset, NULL) == -1)
- error("SIGNAL: Could not block signals for threads");
+ netdata_log_error("SIGNAL: Could not block signals for threads");
}
void signals_unblock(void) {
@@ -67,7 +67,7 @@ void signals_unblock(void) {
sigfillset(&sigset);
if(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL) == -1) {
- error("SIGNAL: Could not unblock signals for threads");
+ netdata_log_error("SIGNAL: Could not unblock signals for threads");
}
}
@@ -91,7 +91,7 @@ void signals_init(void) {
}
if(sigaction(signals_waiting[i].signo, &sa, NULL) == -1)
- error("SIGNAL: Failed to change signal handler for: %s", signals_waiting[i].name);
+ netdata_log_error("SIGNAL: Failed to change signal handler for: %s", signals_waiting[i].name);
}
}
@@ -104,7 +104,7 @@ void signals_restore_SIGCHLD(void)
sa.sa_handler = signal_handler;
if(sigaction(SIGCHLD, &sa, NULL) == -1)
- error("SIGNAL: Failed to change signal handler for: SIGCHLD");
+ netdata_log_error("SIGNAL: Failed to change signal handler for: SIGCHLD");
}
void signals_reset(void) {
@@ -116,7 +116,7 @@ void signals_reset(void) {
int i;
for (i = 0; signals_waiting[i].action != NETDATA_SIGNAL_END_OF_LIST; i++) {
if(sigaction(signals_waiting[i].signo, &sa, NULL) == -1)
- error("SIGNAL: Failed to reset signal handler for: %s", signals_waiting[i].name);
+ netdata_log_error("SIGNAL: Failed to reset signal handler for: %s", signals_waiting[i].name);
}
}
@@ -125,41 +125,41 @@ static void reap_child(pid_t pid) {
siginfo_t i;
errno = 0;
- debug(D_CHILDS, "SIGNAL: reap_child(%d)...", pid);
+ netdata_log_debug(D_CHILDS, "SIGNAL: reap_child(%d)...", pid);
if (netdata_waitid(P_PID, (id_t)pid, &i, WEXITED|WNOHANG) == -1) {
if (errno != ECHILD)
- error("SIGNAL: waitid(%d): failed to wait for child", pid);
+ netdata_log_error("SIGNAL: waitid(%d): failed to wait for child", pid);
else
- info("SIGNAL: waitid(%d): failed - it seems the child is already reaped", pid);
+ netdata_log_info("SIGNAL: waitid(%d): failed - it seems the child is already reaped", pid);
return;
}
else if (i.si_pid == 0) {
// Process didn't exit, this shouldn't happen.
- error("SIGNAL: waitid(%d): reports pid 0 - child has not exited", pid);
+ netdata_log_error("SIGNAL: waitid(%d): reports pid 0 - child has not exited", pid);
return;
}
switch (i.si_code) {
case CLD_EXITED:
- info("SIGNAL: reap_child(%d) exited with code: %d", pid, i.si_status);
+ netdata_log_info("SIGNAL: reap_child(%d) exited with code: %d", pid, i.si_status);
break;
case CLD_KILLED:
- info("SIGNAL: reap_child(%d) killed by signal: %d", pid, i.si_status);
+ netdata_log_info("SIGNAL: reap_child(%d) killed by signal: %d", pid, i.si_status);
break;
case CLD_DUMPED:
- info("SIGNAL: reap_child(%d) dumped core by signal: %d", pid, i.si_status);
+ netdata_log_info("SIGNAL: reap_child(%d) dumped core by signal: %d", pid, i.si_status);
break;
case CLD_STOPPED:
- info("SIGNAL: reap_child(%d) stopped by signal: %d", pid, i.si_status);
+ netdata_log_info("SIGNAL: reap_child(%d) stopped by signal: %d", pid, i.si_status);
break;
case CLD_TRAPPED:
- info("SIGNAL: reap_child(%d) trapped by signal: %d", pid, i.si_status);
+ netdata_log_info("SIGNAL: reap_child(%d) trapped by signal: %d", pid, i.si_status);
break;
case CLD_CONTINUED:
- info("SIGNAL: reap_child(%d) continued by signal: %d", pid, i.si_status);
+ netdata_log_info("SIGNAL: reap_child(%d) continued by signal: %d", pid, i.si_status);
break;
default:
- info("SIGNAL: reap_child(%d) gave us a SIGCHLD with code %d and status %d.", pid, i.si_code, i.si_status);
+ netdata_log_info("SIGNAL: reap_child(%d) gave us a SIGCHLD with code %d and status %d.", pid, i.si_code, i.si_status);
break;
}
}
@@ -204,28 +204,28 @@ void signals_handle(void) {
switch (signals_waiting[i].action) {
case NETDATA_SIGNAL_RELOAD_HEALTH:
error_log_limit_unlimited();
- info("SIGNAL: Received %s. Reloading HEALTH configuration...", name);
+ netdata_log_info("SIGNAL: Received %s. Reloading HEALTH configuration...", name);
error_log_limit_reset();
execute_command(CMD_RELOAD_HEALTH, NULL, NULL);
break;
case NETDATA_SIGNAL_SAVE_DATABASE:
error_log_limit_unlimited();
- info("SIGNAL: Received %s. Saving databases...", name);
+ netdata_log_info("SIGNAL: Received %s. Saving databases...", name);
error_log_limit_reset();
execute_command(CMD_SAVE_DATABASE, NULL, NULL);
break;
case NETDATA_SIGNAL_REOPEN_LOGS:
error_log_limit_unlimited();
- info("SIGNAL: Received %s. Reopening all log files...", name);
+ netdata_log_info("SIGNAL: Received %s. Reopening all log files...", name);
error_log_limit_reset();
execute_command(CMD_REOPEN_LOGS, NULL, NULL);
break;
case NETDATA_SIGNAL_EXIT_CLEANLY:
error_log_limit_unlimited();
- info("SIGNAL: Received %s. Cleaning up to exit...", name);
+ netdata_log_info("SIGNAL: Received %s. Cleaning up to exit...", name);
commands_exit();
netdata_cleanup_and_exit(0);
exit(0);
@@ -240,7 +240,7 @@ void signals_handle(void) {
break;
default:
- info("SIGNAL: Received %s. No signal handler configured. Ignoring it.", name);
+ netdata_log_info("SIGNAL: Received %s. No signal handler configured. Ignoring it.", name);
break;
}
}
@@ -248,6 +248,6 @@ void signals_handle(void) {
}
}
else
- error("SIGNAL: pause() returned but it was not interrupted by a signal.");
+ netdata_log_error("SIGNAL: pause() returned but it was not interrupted by a signal.");
}
}
diff --git a/daemon/static_threads.c b/daemon/static_threads.c
index fe83945c..4ee2a466 100644
--- a/daemon/static_threads.c
+++ b/daemon/static_threads.c
@@ -13,6 +13,7 @@ void *pluginsd_main(void *ptr);
void *service_main(void *ptr);
void *statsd_main(void *ptr);
void *timex_main(void *ptr);
+void *profile_main(void *ptr);
void *replication_thread_main(void *ptr __maybe_unused);
extern bool global_statistics_enabled;
@@ -142,15 +143,15 @@ const struct netdata_static_thread static_threads_common[] = {
.start_routine = socket_listen_main_static_threaded
},
-#ifdef ENABLE_HTTPD
+#ifdef ENABLE_H2O
{
- .name = "httpd",
+ .name = "h2o",
.config_section = NULL,
.config_name = NULL,
.enabled = 0,
.thread = NULL,
.init_routine = NULL,
- .start_routine = httpd_main
+ .start_routine = h2o_main
},
#endif
@@ -185,6 +186,15 @@ const struct netdata_static_thread static_threads_common[] = {
.init_routine = NULL,
.start_routine = replication_thread_main
},
+ {
+ .name = "P[PROFILE]",
+ .config_section = CONFIG_SECTION_PLUGINS,
+ .config_name = "profile",
+ .enabled = 0,
+ .thread = NULL,
+ .init_routine = NULL,
+ .start_routine = profile_main
+ },
// terminator
{
diff --git a/daemon/unit_test.c b/daemon/unit_test.c
index fa3fa847..b8d22931 100644
--- a/daemon/unit_test.c
+++ b/daemon/unit_test.c
@@ -1325,7 +1325,7 @@ int run_test(struct test *test)
// align the first entry to second boundary
if(!c) {
fprintf(stderr, " > %s: fixing first collection time to be %llu microseconds to second boundary\n", test->name, test->feed[c].microseconds);
- rd->last_collected_time.tv_usec = st->last_collected_time.tv_usec = st->last_updated.tv_usec = test->feed[c].microseconds;
+ rd->collector.last_collected_time.tv_usec = st->last_collected_time.tv_usec = st->last_updated.tv_usec = test->feed[c].microseconds;
// time_start = st->last_collected_time.tv_sec;
}
}
@@ -1334,13 +1334,14 @@ int run_test(struct test *test)
int errors = 0;
if(st->counter != test->result_entries) {
- fprintf(stderr, " %s stored %zu entries, but we were expecting %lu, ### E R R O R ###\n", test->name, st->counter, test->result_entries);
+ fprintf(stderr, " %s stored %u entries, but we were expecting %lu, ### E R R O R ###\n",
+ test->name, st->counter, test->result_entries);
errors++;
}
unsigned long max = (st->counter < test->result_entries)?st->counter:test->result_entries;
for(c = 0 ; c < max ; c++) {
- NETDATA_DOUBLE v = unpack_storage_number(rd->db[c]);
+ NETDATA_DOUBLE v = unpack_storage_number(rd->db.data[c]);
NETDATA_DOUBLE n = unpack_storage_number(pack_storage_number(test->results[c], SN_DEFAULT_FLAGS));
int same = (roundndd(v * 10000000.0) == roundndd(n * 10000000.0))?1:0;
fprintf(stderr, " %s/%s: checking position %lu (at %"PRId64" secs), expecting value " NETDATA_DOUBLE_FORMAT
@@ -1352,7 +1353,7 @@ int run_test(struct test *test)
if(!same) errors++;
if(rd2) {
- v = unpack_storage_number(rd2->db[c]);
+ v = unpack_storage_number(rd2->db.data[c]);
n = test->results2[c];
same = (roundndd(v * 10000000.0) == roundndd(n * 10000000.0))?1:0;
fprintf(stderr, " %s/%s: checking position %lu (at %"PRId64" secs), expecting value " NETDATA_DOUBLE_FORMAT
@@ -1584,7 +1585,7 @@ int unit_test(long delay, long shift)
// prevent it from deleting the dimensions
rrddim_foreach_read(rd, st) {
- rd->last_collected_time.tv_sec = st->last_collected_time.tv_sec;
+ rd->collector.last_collected_time.tv_sec = st->last_collected_time.tv_sec;
}
rrddim_foreach_done(rd);
@@ -1602,7 +1603,7 @@ int unit_test(long delay, long shift)
fprintf(stderr, "\nPOSITION: c = %lu, EXPECTED VALUE %lu\n", c, (oincrement + c * increment + increment * (1000000 - shift) / 1000000 )* 10);
rrddim_foreach_read(rd, st) {
- sn = rd->db[c];
+ sn = rd->db.data[c];
cn = unpack_storage_number(sn);
fprintf(stderr, "\t %s " NETDATA_DOUBLE_FORMAT " (PACKED AS " STORAGE_NUMBER_FORMAT ") -> ", rrddim_id(rd), cn, sn);
@@ -1661,6 +1662,18 @@ int test_sqlite(void) {
return 1;
}
+ rc = sqlite3_create_function(db_meta, "now_usec", 1, SQLITE_ANY, 0, sqlite_now_usec, 0, 0);
+ if (unlikely(rc != SQLITE_OK)) {
+ fprintf(stderr, "Failed to register internal now_usec function");
+ return 1;
+ }
+
+ rc = sqlite3_exec_monitored(db_meta, "UPDATE MINE SET id1=now_usec(0);", 0, 0, NULL);
+ if (rc != SQLITE_OK) {
+ fprintf(stderr,"Failed to test SQLite: Update with now_usec() failed\n");
+ return 1;
+ }
+
BUFFER *sql = buffer_create(ACLK_SYNC_QUERY_SIZE, NULL);
char *uuid_str = "0000_000";
@@ -1688,145 +1701,129 @@ error:
return 1;
}
-int unit_test_bitmap256(void) {
- fprintf(stderr, "%s() running...\n", __FUNCTION__ );
+static int bitmapX_test(BITMAPX *ptr, char *expected, const char *msg) {
+ int errors = 0;
- BITMAP256 test_bitmap = {0};
-
- bitmap256_set_bit(&test_bitmap, 0, 1);
- bitmap256_set_bit(&test_bitmap, 64, 1);
- bitmap256_set_bit(&test_bitmap, 128, 1);
- bitmap256_set_bit(&test_bitmap, 192, 1);
- if (test_bitmap.data[0] == 1)
- fprintf(stderr, "%s() INDEX 1 is OK\n", __FUNCTION__ );
- if (test_bitmap.data[1] == 1)
- fprintf(stderr, "%s() INDEX 65 is OK\n", __FUNCTION__ );
- if (test_bitmap.data[2] == 1)
- fprintf(stderr, "%s() INDEX 129 is OK\n", __FUNCTION__ );
- if (test_bitmap.data[3] == 1)
- fprintf(stderr, "%s() INDEX 192 is OK\n", __FUNCTION__ );
-
- uint8_t i=0;
- int j = 0;
- do {
- bitmap256_set_bit(&test_bitmap, i++, 1);
- j++;
- } while (j < 256);
+ for(uint32_t idx = 0; idx < ptr->bits ; idx++) {
+ bool found_set = bitmapX_get_bit(ptr, idx);
+ bool expected_set = expected[idx];
- if (test_bitmap.data[0] == 0xffffffffffffffff)
- fprintf(stderr, "%s() INDEX 0 is fully set OK\n", __FUNCTION__);
- else {
- fprintf(stderr, "%s() INDEX 0 is %"PRIu64" expected 0xffffffffffffffff\n", __FUNCTION__, test_bitmap.data[0]);
- return 1;
+ if(found_set != expected_set) {
+ fprintf(stderr, " >>> %s(): %s, bit %u is expected %s but found %s\n",
+ __FUNCTION__, msg, idx, expected_set?"SET":"UNSET", found_set?"SET":"UNSET");
+ errors++;
+ }
}
- if (test_bitmap.data[1] == 0xffffffffffffffff)
- fprintf(stderr, "%s() INDEX 1 is fully set OK\n", __FUNCTION__);
- else {
- fprintf(stderr, "%s() INDEX 1 is %"PRIu64" expected 0xffffffffffffffff\n", __FUNCTION__, test_bitmap.data[0]);
- return 1;
- }
+ if(errors)
+ fprintf(stderr,"%s(): %s, found %d errors\n",
+ __FUNCTION__, msg, errors);
- if (test_bitmap.data[2] == 0xffffffffffffffff)
- fprintf(stderr, "%s() INDEX 2 is fully set OK\n", __FUNCTION__);
- else {
- fprintf(stderr, "%s() INDEX 2 is %"PRIu64" expected 0xffffffffffffffff\n", __FUNCTION__, test_bitmap.data[0]);
- return 1;
- }
+ return errors;
+}
- if (test_bitmap.data[3] == 0xffffffffffffffff)
- fprintf(stderr, "%s() INDEX 3 is fully set OK\n", __FUNCTION__);
- else {
- fprintf(stderr, "%s() INDEX 3 is %"PRIu64" expected 0xffffffffffffffff\n", __FUNCTION__, test_bitmap.data[0]);
- return 1;
- }
+#define bitmapX_set_bit_and_track(ptr, bit, value, expected) do { \
+ bitmapX_set_bit(ptr, bit, value); \
+ (expected)[bit] = value; \
+} while(0)
- i = 0;
- j = 0;
- do {
- bitmap256_set_bit(&test_bitmap, i++, 0);
- j++;
- } while (j < 256);
+int unit_test_bitmaps(void) {
+ fprintf(stderr, "%s() running...\n", __FUNCTION__ );
- if (test_bitmap.data[0] == 0)
- fprintf(stderr, "%s() INDEX 0 is reset OK\n", __FUNCTION__);
- else {
- fprintf(stderr, "%s() INDEX 0 is not reset FAILED\n", __FUNCTION__);
- return 1;
- }
- if (test_bitmap.data[1] == 0)
- fprintf(stderr, "%s() INDEX 1 is reset OK\n", __FUNCTION__);
- else {
- fprintf(stderr, "%s() INDEX 1 is not reset FAILED\n", __FUNCTION__);
- return 1;
- }
+ int errors = 0;
- if (test_bitmap.data[2] == 0)
- fprintf(stderr, "%s() INDEX 2 is reset OK\n", __FUNCTION__);
- else {
- fprintf(stderr, "%s() INDEX 2 is not reset FAILED\n", __FUNCTION__);
- return 1;
- }
+ char expected[8192];
- if (test_bitmap.data[3] == 0)
- fprintf(stderr, "%s() INDEX 3 is reset OK\n", __FUNCTION__);
- else {
- fprintf(stderr, "%s() INDEX 3 is not reset FAILED\n", __FUNCTION__);
- return 1;
- }
+ BITMAP256 bmp256 = BITMAP256_INITIALIZER;
+ BITMAP1024 bmp1024 = BITMAP1024_INITIALIZER;
+ BITMAPX *bmp = NULL;
- i=0;
- j = 0;
- do {
- bitmap256_set_bit(&test_bitmap, i, 1);
- i += 4;
- j += 4;
- } while (j < 256);
+ for(int x = 0; x < 3 ; x++) {
+ char msg[100 + 1];
- if (test_bitmap.data[0] == 0x1111111111111111)
- fprintf(stderr, "%s() INDEX 0 is 0x1111111111111111 set OK\n", __FUNCTION__);
- else {
- fprintf(stderr, "%s() INDEX 0 is %"PRIu64" expected 0x1111111111111111\n", __FUNCTION__, test_bitmap.data[0]);
- return 1;
- }
+ switch (x) {
+ default:
+ case 0:
+ bmp = (BITMAPX *) &bmp256;
+ break;
- if (test_bitmap.data[1] == 0x1111111111111111)
- fprintf(stderr, "%s() INDEX 1 is 0x1111111111111111 set OK\n", __FUNCTION__);
- else {
- fprintf(stderr, "%s() INDEX 1 is %"PRIu64" expected 0x1111111111111111\n", __FUNCTION__, test_bitmap.data[1]);
- return 1;
- }
+ case 1:
+ bmp = (BITMAPX *) &bmp1024;
+ break;
- if (test_bitmap.data[2] == 0x1111111111111111)
- fprintf(stderr, "%s() INDEX 2 is 0x1111111111111111 set OK\n", __FUNCTION__);
- else {
- fprintf(stderr, "%s() INDEX 2 is %"PRIu64" expected 0x1111111111111111\n", __FUNCTION__, test_bitmap.data[2]);
- return 1;
- }
+ case 2:
+ bmp = bitmapX_create(8192);
+ break;
+ }
- if (test_bitmap.data[3] == 0x1111111111111111)
- fprintf(stderr, "%s() INDEX 3 is 0x1111111111111111 set OK\n", __FUNCTION__);
- else {
- fprintf(stderr, "%s() INDEX 3 is %"PRIu64" expected 0x1111111111111111\n", __FUNCTION__, test_bitmap.data[3]);
- return 1;
+ // reset
+ memset(expected, 0, bmp->bits);
+ memset(bmp->data, 0, bmp->bits / 8);
+
+ snprintf(msg, 100, "TEST 1 BITMAP %u", bmp->bits);
+ bitmapX_set_bit_and_track(bmp, 0, true, expected);
+ errors += bitmapX_test(bmp, expected, msg);
+
+ snprintf(msg, 100, "TEST 2 BITMAP %u", bmp->bits);
+ bitmapX_set_bit_and_track(bmp, 64, true, expected);
+ errors += bitmapX_test(bmp, expected, msg);
+
+ snprintf(msg, 100, "TEST 3 BITMAP %u", bmp->bits);
+ bitmapX_set_bit_and_track(bmp, 128, true, expected);
+ errors += bitmapX_test(bmp, expected, msg);
+
+ snprintf(msg, 100, "TEST 4 BITMAP %u", bmp->bits);
+ bitmapX_set_bit_and_track(bmp, 192, true, expected);
+ errors += bitmapX_test(bmp, expected, msg);
+
+ for (uint32_t step = 1; step < 256; step++) {
+ snprintf(msg, 100, "TEST 5 (setting) BITMAP %u STEP %u", bmp->bits, step);
+
+ // reset
+ memset(expected, 0, bmp->bits);
+ memset(bmp->data, 0, bmp->bits / 8);
+
+ for (uint32_t i = 0; i < bmp->bits ; i += step)
+ bitmapX_set_bit_and_track(bmp, i, true, expected);
+
+ errors += bitmapX_test(bmp, expected, msg);
+ }
+
+ for (uint32_t step = 1; step < 256; step++) {
+ snprintf(msg, 100, "TEST 6 (clearing) BITMAP %u STEP %u", bmp->bits, step);
+
+ // reset
+ memset(expected, 0, bmp->bits);
+ memset(bmp->data, 0, bmp->bits / 8);
+
+ for (uint32_t i = 0; i < bmp->bits ; i++)
+ bitmapX_set_bit_and_track(bmp, i, true, expected);
+
+ for (uint32_t i = 0; i < bmp->bits ; i += step)
+ bitmapX_set_bit_and_track(bmp, i, false, expected);
+
+ errors += bitmapX_test(bmp, expected, msg);
+ }
}
- fprintf(stderr, "%s() tests passed\n", __FUNCTION__);
- return 0;
+ freez(bmp);
+
+ fprintf(stderr, "%s() %d errors\n", __FUNCTION__, errors);
+ return errors;
}
#ifdef ENABLE_DBENGINE
static inline void rrddim_set_by_pointer_fake_time(RRDDIM *rd, collected_number value, time_t now)
{
- rd->last_collected_time.tv_sec = now;
- rd->last_collected_time.tv_usec = 0;
- rd->collected_value = value;
- rd->updated = 1;
+ rd->collector.last_collected_time.tv_sec = now;
+ rd->collector.last_collected_time.tv_usec = 0;
+ rd->collector.collected_value = value;
+ rrddim_set_updated(rd);
- rd->collections_counter++;
+ rd->collector.counter++;
collected_number v = (value >= 0) ? value : -value;
- if(unlikely(v > rd->collected_value_max)) rd->collected_value_max = v;
+ if(unlikely(v > rd->collector.collected_value_max)) rd->collector.collected_value_max = v;
}
static RRDHOST *dbengine_rrdhost_find_or_create(char *name)
@@ -1898,9 +1895,9 @@ static void test_dbengine_create_charts(RRDHOST *host, RRDSET *st[CHARTS], RRDDI
// Initialize DB with the very first entries
for (i = 0 ; i < CHARTS ; ++i) {
for (j = 0 ; j < DIMS ; ++j) {
- rd[i][j]->last_collected_time.tv_sec =
+ rd[i][j]->collector.last_collected_time.tv_sec =
st[i]->last_collected_time.tv_sec = st[i]->last_updated.tv_sec = 2 * API_RELATIVE_TIME_MAX - 1;
- rd[i][j]->last_collected_time.tv_usec =
+ rd[i][j]->collector.last_collected_time.tv_usec =
st[i]->last_collected_time.tv_usec = st[i]->last_updated.tv_usec = 0;
}
}
@@ -1939,9 +1936,9 @@ static time_t test_dbengine_create_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS
for (j = 0 ; j < DIMS ; ++j) {
storage_engine_store_change_collection_frequency(rd[i][j]->tiers[0].db_collection_handle, update_every);
- rd[i][j]->last_collected_time.tv_sec =
+ rd[i][j]->collector.last_collected_time.tv_sec =
st[i]->last_collected_time.tv_sec = st[i]->last_updated.tv_sec = time_now;
- rd[i][j]->last_collected_time.tv_usec =
+ rd[i][j]->collector.last_collected_time.tv_usec =
st[i]->last_collected_time.tv_usec = st[i]->last_updated.tv_usec = 0;
}
}
@@ -2305,9 +2302,9 @@ static void generate_dbengine_chart(void *arg)
// feed it with the test data
time_current = time_present - history_seconds;
for (j = 0 ; j < DSET_DIMS ; ++j) {
- rd[j]->last_collected_time.tv_sec =
+ rd[j]->collector.last_collected_time.tv_sec =
st->last_collected_time.tv_sec = st->last_updated.tv_sec = time_current - update_every;
- rd[j]->last_collected_time.tv_usec =
+ rd[j]->collector.last_collected_time.tv_usec =
st->last_collected_time.tv_usec = st->last_updated.tv_usec = 0;
}
for( ; !thread_info->done && time_current < time_present ; time_current += update_every) {
diff --git a/daemon/unit_test.h b/daemon/unit_test.h
index f79bd5c4..c7cd104e 100644
--- a/daemon/unit_test.h
+++ b/daemon/unit_test.h
@@ -12,7 +12,7 @@ int unit_test_str2ld(void);
int unit_test_buffer(void);
int unit_test_static_threads(void);
int test_sqlite(void);
-int unit_test_bitmap256(void);
+int unit_test_bitmaps(void);
#ifdef ENABLE_DBENGINE
int test_dbengine(void);
void generate_dbengine_dataset(unsigned history_seconds);