summaryrefslogtreecommitdiffstats
path: root/libnetdata
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-03-09 13:19:48 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-03-09 13:20:02 +0000
commit58daab21cd043e1dc37024a7f99b396788372918 (patch)
tree96771e43bb69f7c1c2b0b4f7374cb74d7866d0cb /libnetdata
parentReleasing debian version 1.43.2-1. (diff)
downloadnetdata-58daab21cd043e1dc37024a7f99b396788372918.tar.xz
netdata-58daab21cd043e1dc37024a7f99b396788372918.zip
Merging upstream version 1.44.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'libnetdata')
-rw-r--r--libnetdata/Makefile.am5
-rw-r--r--libnetdata/buffer/buffer.c61
-rw-r--r--libnetdata/buffer/buffer.h55
-rw-r--r--libnetdata/buffered_reader/Makefile.am8
-rw-r--r--libnetdata/buffered_reader/README.md0
-rw-r--r--libnetdata/buffered_reader/buffered_reader.c3
-rw-r--r--libnetdata/buffered_reader/buffered_reader.h146
-rw-r--r--libnetdata/clocks/clocks.c67
-rw-r--r--libnetdata/clocks/clocks.h1
-rw-r--r--libnetdata/config/appconfig.h2
-rw-r--r--libnetdata/datetime/Makefile.am8
-rw-r--r--libnetdata/datetime/README.md11
-rw-r--r--libnetdata/datetime/iso8601.c81
-rw-r--r--libnetdata/datetime/iso8601.h18
-rw-r--r--libnetdata/datetime/rfc3339.c135
-rw-r--r--libnetdata/datetime/rfc3339.h12
-rw-r--r--libnetdata/datetime/rfc7231.c29
-rw-r--r--libnetdata/datetime/rfc7231.h12
-rw-r--r--libnetdata/dictionary/dictionary.c10
-rw-r--r--libnetdata/dyn_conf/README.md229
-rw-r--r--libnetdata/dyn_conf/dyn_conf.h2
-rw-r--r--libnetdata/ebpf/ebpf.c2
-rw-r--r--libnetdata/ebpf/ebpf.h1
-rw-r--r--libnetdata/endian.h32
-rw-r--r--libnetdata/eval/eval.c2
-rw-r--r--libnetdata/facets/facets.c625
-rw-r--r--libnetdata/facets/facets.h10
-rw-r--r--libnetdata/functions_evloop/functions_evloop.c13
-rw-r--r--libnetdata/functions_evloop/functions_evloop.h3
-rw-r--r--libnetdata/gorilla/README.md39
-rwxr-xr-xlibnetdata/gorilla/fuzzer.sh2
-rw-r--r--libnetdata/gorilla/gorilla.cc726
-rw-r--r--libnetdata/gorilla/gorilla.h77
-rw-r--r--libnetdata/health/health.c22
-rw-r--r--libnetdata/http/http_defs.h7
-rw-r--r--libnetdata/inlined.h36
-rw-r--r--libnetdata/libnetdata.c74
-rw-r--r--libnetdata/libnetdata.h121
-rw-r--r--libnetdata/line_splitter/Makefile.am8
-rw-r--r--libnetdata/line_splitter/README.md14
-rw-r--r--libnetdata/line_splitter/line_splitter.c69
-rw-r--r--libnetdata/line_splitter/line_splitter.h120
-rw-r--r--libnetdata/log/Makefile.am1
-rw-r--r--libnetdata/log/README.md196
-rw-r--r--libnetdata/log/journal.c138
-rw-r--r--libnetdata/log/journal.h18
-rw-r--r--libnetdata/log/log.c3074
-rw-r--r--libnetdata/log/log.h303
-rw-r--r--libnetdata/log/systemd-cat-native.c820
-rw-r--r--libnetdata/log/systemd-cat-native.h8
-rw-r--r--libnetdata/log/systemd-cat-native.md209
-rw-r--r--libnetdata/os.c6
-rw-r--r--libnetdata/os.h1
-rw-r--r--libnetdata/procfile/procfile.c9
-rw-r--r--libnetdata/simple_hashtable.h396
-rw-r--r--libnetdata/socket/security.c49
-rw-r--r--libnetdata/socket/security.h1
-rw-r--r--libnetdata/socket/socket.c534
-rw-r--r--libnetdata/string/string.c6
-rw-r--r--libnetdata/string/string.h5
-rw-r--r--libnetdata/threads/threads.c82
-rw-r--r--libnetdata/threads/threads.h2
-rw-r--r--libnetdata/uuid/Makefile.am8
-rw-r--r--libnetdata/uuid/README.md13
-rw-r--r--libnetdata/uuid/uuid.c179
-rw-r--r--libnetdata/uuid/uuid.h29
-rw-r--r--libnetdata/worker_utilization/worker_utilization.c21
-rw-r--r--libnetdata/worker_utilization/worker_utilization.h1
68 files changed, 6736 insertions, 2271 deletions
diff --git a/libnetdata/Makefile.am b/libnetdata/Makefile.am
index f01a1d34..333155aa 100644
--- a/libnetdata/Makefile.am
+++ b/libnetdata/Makefile.am
@@ -8,9 +8,11 @@ SUBDIRS = \
aral \
avl \
buffer \
+ buffered_reader \
clocks \
completion \
config \
+ datetime \
dictionary \
ebpf \
eval \
@@ -19,6 +21,7 @@ SUBDIRS = \
json \
july \
health \
+ line_splitter \
locks \
log \
onewayalloc \
@@ -31,10 +34,12 @@ SUBDIRS = \
string \
threads \
url \
+ uuid \
worker_utilization \
tests \
$(NULL)
dist_noinst_DATA = \
README.md \
+ gorilla/README.md \
$(NULL)
diff --git a/libnetdata/buffer/buffer.c b/libnetdata/buffer/buffer.c
index feee72fc..64f9cce4 100644
--- a/libnetdata/buffer/buffer.c
+++ b/libnetdata/buffer/buffer.c
@@ -81,6 +81,7 @@ void buffer_snprintf(BUFFER *wb, size_t len, const char *fmt, ...)
va_list args;
va_start(args, fmt);
+ // vsnprintfz() returns the number of bytes actually written - after possible truncation
wb->len += vsnprintfz(&wb->buffer[wb->len], len, fmt, args);
va_end(args);
@@ -89,53 +90,39 @@ void buffer_snprintf(BUFFER *wb, size_t len, const char *fmt, ...)
// the buffer is \0 terminated by vsnprintfz
}
-void buffer_vsprintf(BUFFER *wb, const char *fmt, va_list args)
-{
+inline void buffer_vsprintf(BUFFER *wb, const char *fmt, va_list args) {
if(unlikely(!fmt || !*fmt)) return;
- size_t wrote = 0, need = 2, space_remaining = 0;
+ size_t full_size_bytes = 0, need = 2, space_remaining = 0;
do {
- need += space_remaining * 2;
+ need += full_size_bytes + 2;
- netdata_log_debug(D_WEB_BUFFER, "web_buffer_sprintf(): increasing web_buffer at position %zu, size = %zu, by %zu bytes (wrote = %zu)\n", wb->len, wb->size, need, wrote);
buffer_need_bytes(wb, need);
space_remaining = wb->size - wb->len - 1;
- wrote = (size_t) vsnprintfz(&wb->buffer[wb->len], space_remaining, fmt, args);
+ // Use the copy of va_list for vsnprintf
+ va_list args_copy;
+ va_copy(args_copy, args);
+ // vsnprintf() returns the number of bytes required, even if bigger than the buffer provided
+ full_size_bytes = (size_t) vsnprintf(&wb->buffer[wb->len], space_remaining, fmt, args_copy);
+ va_end(args_copy);
- } while(wrote >= space_remaining);
+ } while(full_size_bytes >= space_remaining);
- wb->len += wrote;
+ wb->len += full_size_bytes;
- // the buffer is \0 terminated by vsnprintf
+ wb->buffer[wb->len] = '\0';
+ buffer_overflow_check(wb);
}
void buffer_sprintf(BUFFER *wb, const char *fmt, ...)
{
- if(unlikely(!fmt || !*fmt)) return;
-
va_list args;
- size_t wrote = 0, need = 2, space_remaining = 0;
-
- do {
- need += space_remaining * 2;
-
- netdata_log_debug(D_WEB_BUFFER, "web_buffer_sprintf(): increasing web_buffer at position %zu, size = %zu, by %zu bytes (wrote = %zu)\n", wb->len, wb->size, need, wrote);
- buffer_need_bytes(wb, need);
-
- space_remaining = wb->size - wb->len - 1;
-
- va_start(args, fmt);
- wrote = (size_t) vsnprintfz(&wb->buffer[wb->len], space_remaining, fmt, args);
- va_end(args);
-
- } while(wrote >= space_remaining);
-
- wb->len += wrote;
-
- // the buffer is \0 terminated by vsnprintf
+ va_start(args, fmt);
+ buffer_vsprintf(wb, fmt, args);
+ va_end(args);
}
// generate a javascript date, the fastest possible way...
@@ -301,12 +288,15 @@ void buffer_json_initialize(BUFFER *wb, const char *key_quote, const char *value
strncpyz(wb->json.key_quote, key_quote, BUFFER_QUOTE_MAX_SIZE);
strncpyz(wb->json.value_quote, value_quote, BUFFER_QUOTE_MAX_SIZE);
- wb->json.options = options;
wb->json.depth = (int8_t)(depth - 1);
_buffer_json_depth_push(wb, BUFFER_JSON_OBJECT);
if(add_anonymous_object)
buffer_fast_strcat(wb, "{", 1);
+ else
+ options |= BUFFER_JSON_OPTIONS_NON_ANONYMOUS;
+
+ wb->json.options = options;
wb->content_type = CT_APPLICATION_JSON;
buffer_no_cacheable(wb);
@@ -316,9 +306,14 @@ void buffer_json_finalize(BUFFER *wb) {
while(wb->json.depth >= 0) {
switch(wb->json.stack[wb->json.depth].type) {
case BUFFER_JSON_OBJECT:
- buffer_json_object_close(wb);
+ if (wb->json.depth == 0)
+ if (!(wb->json.options & BUFFER_JSON_OPTIONS_NON_ANONYMOUS))
+ buffer_json_object_close(wb);
+ else
+ _buffer_json_depth_pop(wb);
+ else
+ buffer_json_object_close(wb);
break;
-
case BUFFER_JSON_ARRAY:
buffer_json_array_close(wb);
break;
diff --git a/libnetdata/buffer/buffer.h b/libnetdata/buffer/buffer.h
index 26efe007..88d3f028 100644
--- a/libnetdata/buffer/buffer.h
+++ b/libnetdata/buffer/buffer.h
@@ -72,6 +72,7 @@ typedef enum __attribute__ ((__packed__)) {
BUFFER_JSON_OPTIONS_DEFAULT = 0,
BUFFER_JSON_OPTIONS_MINIFY = (1 << 0),
BUFFER_JSON_OPTIONS_NEWLINE_ON_ARRAY_ITEMS = (1 << 1),
+ BUFFER_JSON_OPTIONS_NON_ANONYMOUS = (1 << 2),
} BUFFER_JSON_OPTIONS;
typedef struct web_buffer {
@@ -93,6 +94,8 @@ typedef struct web_buffer {
} json;
} BUFFER;
+#define CLEAN_BUFFER _cleanup_(buffer_freep) BUFFER
+
#define buffer_cacheable(wb) do { (wb)->options |= WB_CONTENT_CACHEABLE; if((wb)->options & WB_CONTENT_NO_CACHEABLE) (wb)->options &= ~WB_CONTENT_NO_CACHEABLE; } while(0)
#define buffer_no_cacheable(wb) do { (wb)->options |= WB_CONTENT_NO_CACHEABLE; if((wb)->options & WB_CONTENT_CACHEABLE) (wb)->options &= ~WB_CONTENT_CACHEABLE; (wb)->expires = 0; } while(0)
@@ -134,6 +137,10 @@ BUFFER *buffer_create(size_t size, size_t *statistics);
void buffer_free(BUFFER *b);
void buffer_increase(BUFFER *b, size_t free_size_required);
+static inline void buffer_freep(BUFFER **bp) {
+ if(bp) buffer_free(*bp);
+}
+
void buffer_snprintf(BUFFER *wb, size_t len, const char *fmt, ...) PRINTFLIKE(3, 4);
void buffer_vsprintf(BUFFER *wb, const char *fmt, va_list args);
void buffer_sprintf(BUFFER *wb, const char *fmt, ...) PRINTFLIKE(2,3);
@@ -209,6 +216,13 @@ static inline void buffer_fast_rawcat(BUFFER *wb, const char *txt, size_t len) {
buffer_overflow_check(wb);
}
+static inline void buffer_putc(BUFFER *wb, char c) {
+ buffer_need_bytes(wb, 2);
+ wb->buffer[wb->len++] = c;
+ wb->buffer[wb->len] = '\0';
+ buffer_overflow_check(wb);
+}
+
static inline void buffer_fast_strcat(BUFFER *wb, const char *txt, size_t len) {
if(unlikely(!txt || !*txt || !len)) return;
@@ -282,6 +296,19 @@ static inline void buffer_strncat(BUFFER *wb, const char *txt, size_t len) {
buffer_overflow_check(wb);
}
+static inline void buffer_memcat(BUFFER *wb, const void *mem, size_t bytes) {
+ if(unlikely(!mem)) return;
+
+ buffer_need_bytes(wb, bytes + 1);
+
+ memcpy(&wb->buffer[wb->len], mem, bytes);
+
+ wb->len += bytes;
+ wb->buffer[wb->len] = '\0';
+
+ buffer_overflow_check(wb);
+}
+
static inline void buffer_json_strcat(BUFFER *wb, const char *txt) {
if(unlikely(!txt || !*txt)) return;
@@ -809,8 +836,13 @@ static inline void buffer_json_member_add_boolean(BUFFER *wb, const char *key, b
static inline void buffer_json_member_add_array(BUFFER *wb, const char *key) {
buffer_print_json_comma_newline_spacing(wb);
- buffer_print_json_key(wb, key);
- buffer_fast_strcat(wb, ":[", 2);
+ if (key) {
+ buffer_print_json_key(wb, key);
+ buffer_fast_strcat(wb, ":[", 2);
+ }
+ else
+ buffer_fast_strcat(wb, "[", 1);
+
wb->json.stack[wb->json.depth].count++;
_buffer_json_depth_push(wb, BUFFER_JSON_ARRAY);
@@ -860,6 +892,13 @@ static inline void buffer_json_add_array_item_uint64(BUFFER *wb, uint64_t value)
wb->json.stack[wb->json.depth].count++;
}
+static inline void buffer_json_add_array_item_boolean(BUFFER *wb, bool value) {
+ buffer_print_json_comma_newline_spacing(wb);
+
+ buffer_strcat(wb, value ? "true" : "false");
+ wb->json.stack[wb->json.depth].count++;
+}
+
static inline void buffer_json_add_array_item_time_t(BUFFER *wb, time_t value) {
buffer_print_json_comma_newline_spacing(wb);
@@ -959,12 +998,14 @@ typedef enum __attribute__((packed)) {
RRDF_FIELD_OPTS_STICKY = (1 << 2), // the field should be sticky
RRDF_FIELD_OPTS_FULL_WIDTH = (1 << 3), // the field should get full width
RRDF_FIELD_OPTS_WRAP = (1 << 4), // the field should wrap
- RRDR_FIELD_OPTS_DUMMY = (1 << 5), // not a presentable field
+ RRDF_FIELD_OPTS_DUMMY = (1 << 5), // not a presentable field
+ RRDF_FIELD_OPTS_EXPANDED_FILTER = (1 << 6), // show the filter expanded
} RRDF_FIELD_OPTIONS;
typedef enum __attribute__((packed)) {
RRDF_FIELD_TYPE_NONE,
RRDF_FIELD_TYPE_INTEGER,
+ RRDF_FIELD_TYPE_BOOLEAN,
RRDF_FIELD_TYPE_STRING,
RRDF_FIELD_TYPE_DETAIL_STRING,
RRDF_FIELD_TYPE_BAR_WITH_INTEGER,
@@ -982,6 +1023,9 @@ static inline const char *rrdf_field_type_to_string(RRDF_FIELD_TYPE type) {
case RRDF_FIELD_TYPE_INTEGER:
return "integer";
+ case RRDF_FIELD_TYPE_BOOLEAN:
+ return "boolean";
+
case RRDF_FIELD_TYPE_STRING:
return "string";
@@ -1112,7 +1156,7 @@ static inline const char *rrdf_field_summary_to_string(RRDF_FIELD_SUMMARY summar
}
typedef enum __attribute__((packed)) {
- RRDF_FIELD_FILTER_NONE,
+ RRDF_FIELD_FILTER_NONE = 0,
RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_FILTER_MULTISELECT,
RRDF_FIELD_FILTER_FACET,
@@ -1173,8 +1217,9 @@ buffer_rrdf_table_add_field(BUFFER *wb, size_t field_id, const char *key, const
buffer_json_member_add_boolean(wb, "full_width", options & RRDF_FIELD_OPTS_FULL_WIDTH);
buffer_json_member_add_boolean(wb, "wrap", options & RRDF_FIELD_OPTS_WRAP);
+ buffer_json_member_add_boolean(wb, "default_expanded_filter", options & RRDF_FIELD_OPTS_EXPANDED_FILTER);
- if(options & RRDR_FIELD_OPTS_DUMMY)
+ if(options & RRDF_FIELD_OPTS_DUMMY)
buffer_json_member_add_boolean(wb, "dummy", true);
}
buffer_json_object_close(wb);
diff --git a/libnetdata/buffered_reader/Makefile.am b/libnetdata/buffered_reader/Makefile.am
new file mode 100644
index 00000000..161784b8
--- /dev/null
+++ b/libnetdata/buffered_reader/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/buffered_reader/README.md b/libnetdata/buffered_reader/README.md
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/libnetdata/buffered_reader/README.md
diff --git a/libnetdata/buffered_reader/buffered_reader.c b/libnetdata/buffered_reader/buffered_reader.c
new file mode 100644
index 00000000..7cd17abf
--- /dev/null
+++ b/libnetdata/buffered_reader/buffered_reader.c
@@ -0,0 +1,3 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
diff --git a/libnetdata/buffered_reader/buffered_reader.h b/libnetdata/buffered_reader/buffered_reader.h
new file mode 100644
index 00000000..4db57cd2
--- /dev/null
+++ b/libnetdata/buffered_reader/buffered_reader.h
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#ifndef NETDATA_BUFFERED_READER_H
+#define NETDATA_BUFFERED_READER_H
+
+struct buffered_reader {
+ ssize_t read_len;
+ ssize_t pos;
+ char read_buffer[PLUGINSD_LINE_MAX + 1];
+};
+
+static inline void buffered_reader_init(struct buffered_reader *reader) {
+ reader->read_buffer[0] = '\0';
+ reader->read_len = 0;
+ reader->pos = 0;
+}
+
+typedef enum {
+ BUFFERED_READER_READ_OK = 0,
+ BUFFERED_READER_READ_FAILED = -1,
+ BUFFERED_READER_READ_BUFFER_FULL = -2,
+ BUFFERED_READER_READ_POLLERR = -3,
+ BUFFERED_READER_READ_POLLHUP = -4,
+ BUFFERED_READER_READ_POLLNVAL = -5,
+ BUFFERED_READER_READ_POLL_UNKNOWN = -6,
+ BUFFERED_READER_READ_POLL_TIMEOUT = -7,
+ BUFFERED_READER_READ_POLL_FAILED = -8,
+} buffered_reader_ret_t;
+
+
+static inline buffered_reader_ret_t buffered_reader_read(struct buffered_reader *reader, int fd) {
+#ifdef NETDATA_INTERNAL_CHECKS
+ if(reader->read_buffer[reader->read_len] != '\0')
+ fatal("read_buffer does not start with zero");
+#endif
+
+ char *read_at = reader->read_buffer + reader->read_len;
+ ssize_t remaining = sizeof(reader->read_buffer) - reader->read_len - 1;
+
+ if(unlikely(remaining <= 0))
+ return BUFFERED_READER_READ_BUFFER_FULL;
+
+ ssize_t bytes_read = read(fd, read_at, remaining);
+ if(unlikely(bytes_read <= 0))
+ return BUFFERED_READER_READ_FAILED;
+
+ reader->read_len += bytes_read;
+ reader->read_buffer[reader->read_len] = '\0';
+
+ return BUFFERED_READER_READ_OK;
+}
+
+static inline buffered_reader_ret_t buffered_reader_read_timeout(struct buffered_reader *reader, int fd, int timeout_ms, bool log_error) {
+ errno = 0;
+ struct pollfd fds[1];
+
+ fds[0].fd = fd;
+ fds[0].events = POLLIN;
+
+ int ret = poll(fds, 1, timeout_ms);
+
+ if (ret > 0) {
+ /* There is data to read */
+ if (fds[0].revents & POLLIN)
+ return buffered_reader_read(reader, fd);
+
+ else if(fds[0].revents & POLLERR) {
+ if(log_error)
+ netdata_log_error("PARSER: read failed: POLLERR.");
+ return BUFFERED_READER_READ_POLLERR;
+ }
+ else if(fds[0].revents & POLLHUP) {
+ if(log_error)
+ netdata_log_error("PARSER: read failed: POLLHUP.");
+ return BUFFERED_READER_READ_POLLHUP;
+ }
+ else if(fds[0].revents & POLLNVAL) {
+ if(log_error)
+ netdata_log_error("PARSER: read failed: POLLNVAL.");
+ return BUFFERED_READER_READ_POLLNVAL;
+ }
+
+ if(log_error)
+ netdata_log_error("PARSER: poll() returned positive number, but POLLIN|POLLERR|POLLHUP|POLLNVAL are not set.");
+ return BUFFERED_READER_READ_POLL_UNKNOWN;
+ }
+ else if (ret == 0) {
+ if(log_error)
+ netdata_log_error("PARSER: timeout while waiting for data.");
+ return BUFFERED_READER_READ_POLL_TIMEOUT;
+ }
+
+ if(log_error)
+ netdata_log_error("PARSER: poll() failed with code %d.", ret);
+ return BUFFERED_READER_READ_POLL_FAILED;
+}
+
+/* Produce a full line if one exists, statefully return where we start next time.
+ * When we hit the end of the buffer with a partial line move it to the beginning for the next fill.
+ */
+static inline bool buffered_reader_next_line(struct buffered_reader *reader, BUFFER *dst) {
+ buffer_need_bytes(dst, reader->read_len - reader->pos + 2);
+
+ size_t start = reader->pos;
+
+ char *ss = &reader->read_buffer[start];
+ char *se = &reader->read_buffer[reader->read_len];
+ char *ds = &dst->buffer[dst->len];
+ char *de = &ds[dst->size - dst->len - 2];
+
+ if(ss >= se) {
+ *ds = '\0';
+ reader->pos = 0;
+ reader->read_len = 0;
+ reader->read_buffer[reader->read_len] = '\0';
+ return false;
+ }
+
+ // copy all bytes to buffer
+ while(ss < se && ds < de && *ss != '\n') {
+ *ds++ = *ss++;
+ dst->len++;
+ }
+
+ // if we have a newline, return the buffer
+ if(ss < se && ds < de && *ss == '\n') {
+ // newline found in the r->read_buffer
+
+ *ds++ = *ss++; // copy the newline too
+ dst->len++;
+
+ *ds = '\0';
+
+ reader->pos = ss - reader->read_buffer;
+ return true;
+ }
+
+ reader->pos = 0;
+ reader->read_len = 0;
+ reader->read_buffer[reader->read_len] = '\0';
+ return false;
+}
+
+#endif //NETDATA_BUFFERED_READER_H
diff --git a/libnetdata/clocks/clocks.c b/libnetdata/clocks/clocks.c
index 489e9685..adbad045 100644
--- a/libnetdata/clocks/clocks.c
+++ b/libnetdata/clocks/clocks.c
@@ -7,8 +7,14 @@
static clockid_t clock_boottime_to_use = CLOCK_MONOTONIC;
static clockid_t clock_monotonic_to_use = CLOCK_MONOTONIC;
-usec_t clock_monotonic_resolution = 1000;
-usec_t clock_realtime_resolution = 1000;
+// the default clock resolution is 1ms
+#define DEFAULT_CLOCK_RESOLUTION_UT ((usec_t)0 * USEC_PER_SEC + (usec_t)1 * USEC_PER_MS)
+
+// the max clock resolution is 10ms
+#define MAX_CLOCK_RESOLUTION_UT ((usec_t)0 * USEC_PER_SEC + (usec_t)10 * USEC_PER_MS)
+
+usec_t clock_monotonic_resolution = DEFAULT_CLOCK_RESOLUTION_UT;
+usec_t clock_realtime_resolution = DEFAULT_CLOCK_RESOLUTION_UT;
#ifndef HAVE_CLOCK_GETTIME
inline int clock_gettime(clockid_t clk_id __maybe_unused, struct timespec *ts) {
@@ -50,9 +56,24 @@ static void test_clock_boottime(void) {
}
static usec_t get_clock_resolution(clockid_t clock) {
- struct timespec ts;
- clock_getres(clock, &ts);
- return ts.tv_sec * USEC_PER_SEC + ts.tv_nsec * NSEC_PER_USEC;
+ struct timespec ts = { 0 };
+
+ if(clock_getres(clock, &ts) == 0) {
+ usec_t ret = (usec_t)ts.tv_sec * USEC_PER_SEC + (usec_t)ts.tv_nsec / NSEC_PER_USEC;
+ if(!ret && ts.tv_nsec > 0 && ts.tv_nsec < NSEC_PER_USEC)
+ return (usec_t)1;
+
+ else if(ret > MAX_CLOCK_RESOLUTION_UT) {
+ nd_log(NDLS_DAEMON, NDLP_ERR, "clock_getres(%d) returned %"PRIu64" usec is out of range, using defaults for clock resolution.", (int)clock, ret);
+ return DEFAULT_CLOCK_RESOLUTION_UT;
+ }
+
+ return ret;
+ }
+ else {
+ nd_log(NDLS_DAEMON, NDLP_ERR, "clock_getres(%d) failed, using defaults for clock resolution.", (int)clock);
+ return DEFAULT_CLOCK_RESOLUTION_UT;
+ }
}
// perform any initializations required for clocks
@@ -66,14 +87,6 @@ void clocks_init(void) {
clock_monotonic_resolution = get_clock_resolution(clock_monotonic_to_use);
clock_realtime_resolution = get_clock_resolution(CLOCK_REALTIME);
-
- // if for any reason these are zero, netdata will crash
- // since we use them as modulo to calculations
- if(!clock_realtime_resolution)
- clock_realtime_resolution = 1000;
-
- if(!clock_monotonic_resolution)
- clock_monotonic_resolution = 1000;
}
inline time_t now_sec(clockid_t clk_id) {
@@ -91,7 +104,7 @@ inline usec_t now_usec(clockid_t clk_id) {
netdata_log_error("clock_gettime(%d, &timespec) failed.", clk_id);
return 0;
}
- return (usec_t)ts.tv_sec * USEC_PER_SEC + (ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC;
+ return (usec_t)ts.tv_sec * USEC_PER_SEC + (usec_t)(ts.tv_nsec % NSEC_PER_SEC) / NSEC_PER_USEC;
}
inline int now_timeval(clockid_t clk_id, struct timeval *tv) {
@@ -113,6 +126,10 @@ inline time_t now_realtime_sec(void) {
return now_sec(CLOCK_REALTIME);
}
+inline msec_t now_realtime_msec(void) {
+ return now_usec(CLOCK_REALTIME) / USEC_PER_MS;
+}
+
inline usec_t now_realtime_usec(void) {
return now_usec(CLOCK_REALTIME);
}
@@ -275,7 +292,7 @@ void heartbeat_statistics(usec_t *min_ptr, usec_t *max_ptr, usec_t *average_ptr,
inline void heartbeat_init(heartbeat_t *hb) {
hb->realtime = 0ULL;
- hb->randomness = 250 * USEC_PER_MS + ((now_realtime_usec() * clock_realtime_resolution) % (250 * USEC_PER_MS));
+ hb->randomness = (usec_t)250 * USEC_PER_MS + ((usec_t)(now_realtime_usec() * clock_realtime_resolution) % (250 * USEC_PER_MS));
hb->randomness -= (hb->randomness % clock_realtime_resolution);
netdata_mutex_lock(&heartbeat_alignment_mutex);
@@ -298,8 +315,10 @@ usec_t heartbeat_next(heartbeat_t *hb, usec_t tick) {
// TODO: The heartbeat tick should be specified at the heartbeat_init() function
usec_t tmp = (now_realtime_usec() * clock_realtime_resolution) % (tick / 2);
- error_limit_static_global_var(erl, 10, 0);
- error_limit(&erl, "heartbeat randomness of %"PRIu64" is too big for a tick of %"PRIu64" - setting it to %"PRIu64"", hb->randomness, tick, tmp);
+ nd_log_limit_static_global_var(erl, 10, 0);
+ nd_log_limit(&erl, NDLS_DAEMON, NDLP_NOTICE,
+ "heartbeat randomness of %"PRIu64" is too big for a tick of %"PRIu64" - setting it to %"PRIu64"",
+ hb->randomness, tick, tmp);
hb->randomness = tmp;
}
@@ -325,13 +344,19 @@ usec_t heartbeat_next(heartbeat_t *hb, usec_t tick) {
if(unlikely(now < next)) {
errno = 0;
- error_limit_static_global_var(erl, 10, 0);
- error_limit(&erl, "heartbeat clock: woke up %"PRIu64" microseconds earlier than expected (can be due to the CLOCK_REALTIME set to the past).", next - now);
+ nd_log_limit_static_global_var(erl, 10, 0);
+ nd_log_limit(&erl, NDLS_DAEMON, NDLP_NOTICE,
+ "heartbeat clock: woke up %"PRIu64" microseconds earlier than expected "
+ "(can be due to the CLOCK_REALTIME set to the past).",
+ next - now);
}
else if(unlikely(now - next > tick / 2)) {
errno = 0;
- error_limit_static_global_var(erl, 10, 0);
- error_limit(&erl, "heartbeat clock: woke up %"PRIu64" microseconds later than expected (can be due to system load or the CLOCK_REALTIME set to the future).", now - next);
+ nd_log_limit_static_global_var(erl, 10, 0);
+ nd_log_limit(&erl, NDLS_DAEMON, NDLP_NOTICE,
+ "heartbeat clock: woke up %"PRIu64" microseconds later than expected "
+ "(can be due to system load or the CLOCK_REALTIME set to the future).",
+ now - next);
}
if(unlikely(!hb->realtime)) {
diff --git a/libnetdata/clocks/clocks.h b/libnetdata/clocks/clocks.h
index 5b88a457..2beb14ed 100644
--- a/libnetdata/clocks/clocks.h
+++ b/libnetdata/clocks/clocks.h
@@ -117,6 +117,7 @@ usec_t now_realtime_usec(void);
int now_monotonic_timeval(struct timeval *tv);
time_t now_monotonic_sec(void);
+msec_t now_realtime_msec(void);
usec_t now_monotonic_usec(void);
int now_monotonic_high_precision_timeval(struct timeval *tv);
time_t now_monotonic_high_precision_sec(void);
diff --git a/libnetdata/config/appconfig.h b/libnetdata/config/appconfig.h
index b3a09024..214a15ed 100644
--- a/libnetdata/config/appconfig.h
+++ b/libnetdata/config/appconfig.h
@@ -217,4 +217,4 @@ typedef struct _connector_instance {
_CONNECTOR_INSTANCE *add_connector_instance(struct section *connector, struct section *instance);
-#endif /* NETDATA_CONFIG_H */
+#endif /* NETDATA_CONFIG_H */ \ No newline at end of file
diff --git a/libnetdata/datetime/Makefile.am b/libnetdata/datetime/Makefile.am
new file mode 100644
index 00000000..161784b8
--- /dev/null
+++ b/libnetdata/datetime/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/datetime/README.md b/libnetdata/datetime/README.md
new file mode 100644
index 00000000..ba236631
--- /dev/null
+++ b/libnetdata/datetime/README.md
@@ -0,0 +1,11 @@
+<!--
+title: "Datetime"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/datetime/README.md
+sidebar_label: "Datetime"
+learn_topic_type: "Tasks"
+learn_rel_path: "Developers/libnetdata"
+-->
+
+# Datetime
+
+Formatting dates and timestamps.
diff --git a/libnetdata/datetime/iso8601.c b/libnetdata/datetime/iso8601.c
new file mode 100644
index 00000000..8e3f4e02
--- /dev/null
+++ b/libnetdata/datetime/iso8601.c
@@ -0,0 +1,81 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+size_t iso8601_datetime_ut(char *buffer, size_t len, usec_t now_ut, ISO8601_OPTIONS options) {
+ if(unlikely(!buffer || len == 0))
+ return 0;
+
+ time_t t = (time_t)(now_ut / USEC_PER_SEC);
+ struct tm *tmp, tmbuf;
+
+ if(options & ISO8601_UTC)
+ // Use gmtime_r for UTC time conversion.
+ tmp = gmtime_r(&t, &tmbuf);
+ else
+ // Use localtime_r for local time conversion.
+ tmp = localtime_r(&t, &tmbuf);
+
+ if (unlikely(!tmp)) {
+ buffer[0] = '\0';
+ return 0;
+ }
+
+ // Format the date and time according to the ISO 8601 format.
+ size_t used_length = strftime(buffer, len, "%Y-%m-%dT%H:%M:%S", tmp);
+ if (unlikely(used_length == 0)) {
+ buffer[0] = '\0';
+ return 0;
+ }
+
+ if(options & ISO8601_MILLISECONDS) {
+ // Calculate the remaining microseconds
+ int milliseconds = (int) ((now_ut % USEC_PER_SEC) / USEC_PER_MS);
+ if(milliseconds && len - used_length > 4)
+ used_length += snprintfz(buffer + used_length, len - used_length, ".%03d", milliseconds);
+ }
+ else if(options & ISO8601_MICROSECONDS) {
+ // Calculate the remaining microseconds
+ int microseconds = (int) (now_ut % USEC_PER_SEC);
+ if(microseconds && len - used_length > 7)
+ used_length += snprintfz(buffer + used_length, len - used_length, ".%06d", microseconds);
+ }
+
+ if(options & ISO8601_UTC) {
+ if(used_length + 1 < len) {
+ buffer[used_length++] = 'Z';
+ buffer[used_length] = '\0'; // null-terminate the string.
+ }
+ }
+ else {
+ // Calculate the timezone offset in hours and minutes from UTC.
+ long offset = tmbuf.tm_gmtoff;
+ int hours = (int) (offset / 3600); // Convert offset seconds to hours.
+ int minutes = (int) ((offset % 3600) / 60); // Convert remainder to minutes (keep the sign for minutes).
+
+ // Check if timezone is UTC.
+ if(hours == 0 && minutes == 0) {
+ // For UTC, append 'Z' to the timestamp.
+ if(used_length + 1 < len) {
+ buffer[used_length++] = 'Z';
+ buffer[used_length] = '\0'; // null-terminate the string.
+ }
+ }
+ else {
+ // For non-UTC, format the timezone offset. Omit minutes if they are zero.
+ if(minutes == 0) {
+ // Check enough space is available for the timezone offset string.
+ if(used_length + 3 < len) // "+hh\0"
+ used_length += snprintfz(buffer + used_length, len - used_length, "%+03d", hours);
+ }
+ else {
+ // Check enough space is available for the timezone offset string.
+ if(used_length + 6 < len) // "+hh:mm\0"
+ used_length += snprintfz(buffer + used_length, len - used_length,
+ "%+03d:%02d", hours, abs(minutes));
+ }
+ }
+ }
+
+ return used_length;
+}
diff --git a/libnetdata/datetime/iso8601.h b/libnetdata/datetime/iso8601.h
new file mode 100644
index 00000000..ce480096
--- /dev/null
+++ b/libnetdata/datetime/iso8601.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#ifndef NETDATA_ISO8601_H
+#define NETDATA_ISO8601_H
+
+typedef enum __attribute__((__packed__)) {
+ ISO8601_UTC = (1 << 0),
+ ISO8601_LOCAL_TIMEZONE = (1 << 1),
+ ISO8601_MILLISECONDS = (1 << 2),
+ ISO8601_MICROSECONDS = (1 << 3),
+} ISO8601_OPTIONS;
+
+#define ISO8601_MAX_LENGTH 64
+size_t iso8601_datetime_ut(char *buffer, size_t len, usec_t now_ut, ISO8601_OPTIONS options);
+
+#endif //NETDATA_ISO8601_H
diff --git a/libnetdata/datetime/rfc3339.c b/libnetdata/datetime/rfc3339.c
new file mode 100644
index 00000000..157e340d
--- /dev/null
+++ b/libnetdata/datetime/rfc3339.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#include "rfc3339.h"
+
+size_t rfc3339_datetime_ut(char *buffer, size_t len, usec_t now_ut, size_t fractional_digits, bool utc) {
+ if (!buffer || len == 0)
+ return 0;
+
+ time_t t = (time_t)(now_ut / USEC_PER_SEC);
+ struct tm *tmp, tmbuf;
+
+ if (utc)
+ tmp = gmtime_r(&t, &tmbuf);
+ else
+ tmp = localtime_r(&t, &tmbuf);
+
+ if (!tmp) {
+ buffer[0] = '\0';
+ return 0;
+ }
+
+ size_t used_length = strftime(buffer, len, "%Y-%m-%dT%H:%M:%S", tmp);
+ if (used_length == 0) {
+ buffer[0] = '\0';
+ return 0;
+ }
+
+ if (fractional_digits >= 0 && fractional_digits <= 9) {
+ int fractional_part = (int)(now_ut % USEC_PER_SEC);
+ if (fractional_part && len - used_length > fractional_digits + 1) {
+ char format[] = ".%01d";
+ format[3] = (char)('0' + fractional_digits);
+
+ // Adjust fractional part
+ fractional_part /= (int)pow(10, 6 - fractional_digits);
+
+ used_length += snprintf(buffer + used_length, len - used_length,
+ format, fractional_part);
+ }
+ }
+
+ if (utc) {
+ if (used_length + 1 < len) {
+ buffer[used_length++] = 'Z';
+ buffer[used_length] = '\0';
+ }
+ }
+ else {
+ long offset = tmbuf.tm_gmtoff;
+ int hours = (int)(offset / 3600);
+ int minutes = abs((int)((offset % 3600) / 60));
+
+ if (used_length + 7 < len) { // Space for "+HH:MM\0"
+ used_length += snprintf(buffer + used_length, len - used_length, "%+03d:%02d", hours, minutes);
+ }
+ }
+
+ return used_length;
+}
+
+usec_t rfc3339_parse_ut(const char *rfc3339, char **endptr) {
+ struct tm tm = { 0 };
+ int tz_hours = 0, tz_mins = 0;
+ char *s;
+ usec_t timestamp, usec = 0;
+
+ // Use strptime to parse up to seconds
+ s = strptime(rfc3339, "%Y-%m-%dT%H:%M:%S", &tm);
+ if (!s)
+ return 0; // Parsing error
+
+ // Parse fractional seconds if present
+ if (*s == '.') {
+ char *next;
+ usec = strtoul(s + 1, &next, 10);
+ int digits_parsed = (int)(next - (s + 1));
+
+ if (digits_parsed < 1 || digits_parsed > 9)
+ return 0; // parsing error
+
+ static const usec_t fix_usec[] = {
+ 1000000, // 0 digits (not used)
+ 100000, // 1 digit
+ 10000, // 2 digits
+ 1000, // 3 digits
+ 100, // 4 digits
+ 10, // 5 digits
+ 1, // 6 digits
+ 10, // 7 digits
+ 100, // 8 digits
+ 1000, // 9 digits
+ };
+ usec = digits_parsed <= 6 ? usec * fix_usec[digits_parsed] : usec / fix_usec[digits_parsed];
+
+ s = next;
+ }
+
+ // Check and parse timezone if present
+ int tz_offset = 0;
+ if (*s == '+' || *s == '-') {
+ // Parse the hours:mins part of the timezone
+
+ if (!isdigit(s[1]) || !isdigit(s[2]) || s[3] != ':' ||
+ !isdigit(s[4]) || !isdigit(s[5]))
+ return 0; // Parsing error
+
+ char tz_sign = *s;
+ tz_hours = (s[1] - '0') * 10 + (s[2] - '0');
+ tz_mins = (s[4] - '0') * 10 + (s[5] - '0');
+
+ tz_offset = tz_hours * 3600 + tz_mins * 60;
+ tz_offset *= (tz_sign == '+' ? 1 : -1);
+
+ s += 6; // Move past the timezone part
+ }
+ else if (*s == 'Z')
+ s++;
+ else
+ return 0; // Invalid RFC 3339 format
+
+ // Convert to time_t (assuming local time, then adjusting for timezone later)
+ time_t epoch_s = mktime(&tm);
+ if (epoch_s == -1)
+ return 0; // Error in time conversion
+
+ timestamp = (usec_t)epoch_s * USEC_PER_SEC + usec;
+ timestamp -= tz_offset * USEC_PER_SEC;
+
+ if(endptr)
+ *endptr = s;
+
+ return timestamp;
+}
diff --git a/libnetdata/datetime/rfc3339.h b/libnetdata/datetime/rfc3339.h
new file mode 100644
index 00000000..88ebb3ec
--- /dev/null
+++ b/libnetdata/datetime/rfc3339.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#ifndef NETDATA_RFC3339_H
+#define NETDATA_RFC3339_H
+
+#define RFC3339_MAX_LENGTH 36
+size_t rfc3339_datetime_ut(char *buffer, size_t len, usec_t now_ut, size_t fractional_digits, bool utc);
+usec_t rfc3339_parse_ut(const char *rfc3339, char **endptr);
+
+#endif //NETDATA_RFC3339_H
diff --git a/libnetdata/datetime/rfc7231.c b/libnetdata/datetime/rfc7231.c
new file mode 100644
index 00000000..4925ed2c
--- /dev/null
+++ b/libnetdata/datetime/rfc7231.c
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+inline size_t rfc7231_datetime(char *buffer, size_t len, time_t now_t) {
+ if (unlikely(!buffer || !len))
+ return 0;
+
+ struct tm *tmp, tmbuf;
+
+ // Use gmtime_r for UTC time conversion.
+ tmp = gmtime_r(&now_t, &tmbuf);
+
+ if (unlikely(!tmp)) {
+ buffer[0] = '\0';
+ return 0;
+ }
+
+ // Format the date and time according to the RFC 7231 format.
+ size_t ret = strftime(buffer, len, "%a, %d %b %Y %H:%M:%S GMT", tmp);
+ if (unlikely(ret == 0))
+ buffer[0] = '\0';
+
+ return ret;
+}
+
+size_t rfc7231_datetime_ut(char *buffer, size_t len, usec_t now_ut) {
+ return rfc7231_datetime(buffer, len, (time_t) (now_ut / USEC_PER_SEC));
+}
diff --git a/libnetdata/datetime/rfc7231.h b/libnetdata/datetime/rfc7231.h
new file mode 100644
index 00000000..5ba93053
--- /dev/null
+++ b/libnetdata/datetime/rfc7231.h
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#ifndef NETDATA_RFC7231_H
+#define NETDATA_RFC7231_H
+
+#define RFC7231_MAX_LENGTH 30
+size_t rfc7231_datetime(char *buffer, size_t len, time_t now_t);
+size_t rfc7231_datetime_ut(char *buffer, size_t len, usec_t now_ut);
+
+#endif //NETDATA_RFC7231_H
diff --git a/libnetdata/dictionary/dictionary.c b/libnetdata/dictionary/dictionary.c
index 2d5313c3..45922abf 100644
--- a/libnetdata/dictionary/dictionary.c
+++ b/libnetdata/dictionary/dictionary.c
@@ -422,7 +422,7 @@ static inline void DICTIONARY_ENTRIES_MINUS1(DICTIONARY *dict) {
size_t entries; (void)entries;
if(unlikely(is_dictionary_single_threaded(dict))) {
dict->version++;
- entries = dict->entries++;
+ entries = dict->entries--;
}
else {
__atomic_fetch_add(&dict->version, 1, __ATOMIC_RELAXED);
@@ -2627,7 +2627,7 @@ static char **dictionary_unittest_generate_names(size_t entries) {
char **names = mallocz(sizeof(char *) * entries);
for(size_t i = 0; i < entries ;i++) {
char buf[25 + 1] = "";
- snprintfz(buf, 25, "name.%zu.0123456789.%zu!@#$%%^&*(),./[]{}\\|~`", i, entries / 2 + i);
+ snprintfz(buf, sizeof(buf) - 1, "name.%zu.0123456789.%zu!@#$%%^&*(),./[]{}\\|~`", i, entries / 2 + i);
names[i] = strdupz(buf);
}
return names;
@@ -2637,7 +2637,7 @@ static char **dictionary_unittest_generate_values(size_t entries) {
char **values = mallocz(sizeof(char *) * entries);
for(size_t i = 0; i < entries ;i++) {
char buf[25 + 1] = "";
- snprintfz(buf, 25, "value-%zu-0987654321.%zu%%^&*(),. \t !@#$/[]{}\\|~`", i, entries / 2 + i);
+ snprintfz(buf, sizeof(buf) - 1, "value-%zu-0987654321.%zu%%^&*(),. \t !@#$/[]{}\\|~`", i, entries / 2 + i);
values[i] = strdupz(buf);
}
return values;
@@ -3253,13 +3253,13 @@ static void *unittest_dict_thread(void *arg) {
char buf [256 + 1];
for (int i = 0; i < 1000; i++) {
- snprintfz(buf, 256, "del/flush test %d", i);
+ snprintfz(buf, sizeof(buf) - 1, "del/flush test %d", i);
dictionary_set(tu->dict, buf, NULL, 0);
tu->stats.ops.inserts++;
}
for (int i = 0; i < 1000; i++) {
- snprintfz(buf, 256, "del/flush test %d", i);
+ snprintfz(buf, sizeof(buf) - 1, "del/flush test %d", i);
dictionary_del(tu->dict, buf);
tu->stats.ops.deletes++;
}
diff --git a/libnetdata/dyn_conf/README.md b/libnetdata/dyn_conf/README.md
index 6c812740..17d059b0 100644
--- a/libnetdata/dyn_conf/README.md
+++ b/libnetdata/dyn_conf/README.md
@@ -1,6 +1,6 @@
# Netdata Dynamic Configuration
-Purpose of Netdata Dynamic Configuration is to allow configuration of select Netdata plugins and options through the
+Purpose of Netdata Dynamic Configuration is to allow configuration of select Netdata plugins and options through the
Netdata API and by extension by UI.
## HTTP API documentation
@@ -9,159 +9,180 @@ Netdata API and by extension by UI.
For summary of all jobs and their statuses (for all children that stream to parent) use the following URL:
-| Method | Endpoint | Description |
-|:-------:|-------------------------------|------------------------------------------------------------|
-| **GET** | `api/v2/job_statuses` | list of Jobs |
-| **GET** | `api/v2/job_statuses?grouped` | list of Jobs (hierarchical, grouped by host/plugin/module) |
+| Method | Endpoint | Description |
+|:-------:|-------------------------------|------------------------------------------------------------|
+| **GET** | `api/v2/job_statuses` | list of Jobs |
+| **GET** | `api/v2/job_statuses?grouped` | list of Jobs (hierarchical, grouped by host/plugin/module) |
### Dyncfg API
### Top level
-| Method | Endpoint | Description |
-|:-------:|------------------|-----------------------------------------|
-| **GET** | `/api/v2/config` | registered Plugins (sent DYNCFG_ENABLE) |
+| Method | Endpoint | Description |
+|:-------:|------------------|-----------------------------------------|
+| **GET** | `/api/v2/config` | registered Plugins (sent DYNCFG_ENABLE) |
### Plugin level
-| Method | Endpoint | Description |
-|:-------:|-----------------------------------|------------------------------|
-| **GET** | `/api/v2/config/[plugin]` | Plugin config |
-| **PUT** | `/api/v2/config/[plugin]` | update Plugin config |
-| **GET** | `/api/v2/config/[plugin]/modules` | Modules registered by Plugin |
-| **GET** | `/api/v2/config/[plugin]/schema` | Plugin config schema |
+| Method | Endpoint | Description |
+|:-------:|-----------------------------------|------------------------------|
+| **GET** | `/api/v2/config/[plugin]` | Plugin config |
+| **PUT** | `/api/v2/config/[plugin]` | update Plugin config |
+| **GET** | `/api/v2/config/[plugin]/modules` | Modules registered by Plugin |
+| **GET** | `/api/v2/config/[plugin]/schema` | Plugin config schema |
### Module level
-| Method | Endpoint | Description |
-|:-------:|-----------------------------------------------|---------------------------|
-| **GET** | `/api/v2/config/<plugin>/[module]` | Module config |
-| **PUT** | `/api/v2/config/[plugin]/[module]` | update Module config |
-| **GET** | `/api/v2/config/[plugin]/[module]/jobs` | Jobs registered by Module |
-| **GET** | `/api/v2/config/[plugin]/[module]/job_schema` | Job config schema |
-| **GET** | `/api/v2/config/[plugin]/[module]/schema` | Module config schema |
+| Method | Endpoint | Description |
+|:-------:|-----------------------------------------------|---------------------------|
+| **GET** | `/api/v2/config/<plugin>/[module]` | Module config |
+| **PUT** | `/api/v2/config/[plugin]/[module]` | update Module config |
+| **GET** | `/api/v2/config/[plugin]/[module]/jobs` | Jobs registered by Module |
+| **GET** | `/api/v2/config/[plugin]/[module]/job_schema` | Job config schema |
+| **GET** | `/api/v2/config/[plugin]/[module]/schema` | Module config schema |
### Job level - only for modules where `module_type == job_array`
-| Method | Endpoint | Description |
-|:----------:|------------------------------------------|--------------------------------|
-| **GET** | `/api/v2/config/[plugin]/[module]/[job]` | Job config |
-| **PUT** | `/api/v2/config/[plugin]/[module]/[job]` | update Job config |
-| **POST** | `/api/v2/config/[plugin]/[module]/[job]` | create Job |
-| **DELETE** | `/api/v2/config/[plugin]/[module]/[job]` | delete Job (created by Dyncfg) |
+| Method | Endpoint | Description |
+|:----------:|------------------------------------------|--------------------------------|
+| **GET** | `/api/v2/config/[plugin]/[module]/[job]` | Job config |
+| **PUT** | `/api/v2/config/[plugin]/[module]/[job]` | update Job config |
+| **POST** | `/api/v2/config/[plugin]/[module]/[job]` | create Job |
+| **DELETE** | `/api/v2/config/[plugin]/[module]/[job]` | delete Job (created by Dyncfg) |
-## AGENT<->PLUGIN interface documentation
+## Internal Plugins API
-### 1. DYNCFG_ENABLE
+TBD
+
+## External Plugins API
+
+### Commands plugins can use
+
+#### DYNCFG_ENABLE
Plugin signifies to agent its ability to use new dynamic config and the name it wishes to use by sending
-```
-plugin->agent:
-=============
-DYNCFG_ENABLE [plugin_url_name]
-```
+```
+DYNCFG_ENABLE [{PLUGIN_NAME}]
+```
-This can be sent only once per lifetime of the plugin (at startup or later) sending it multiple times is considered a
+This can be sent only once per lifetime of the plugin (at startup or later) sending it multiple times is considered a
protocol violation and plugin might get terminated.
-After this command is sent the plugin has to be ready to accept all the new commands/keywords related to dynamic
+
+After this command is sent the plugin has to be ready to accept all the new commands/keywords related to dynamic
configuration (this command lets agent know this plugin is dyncfg capable and wishes to use dyncfg functionality).
-After this command agent can call
+#### DYNCFG_RESET
-```
-agent->plugin:
-=============
-FUNCTION_PAYLOAD [UUID] 1 "set_plugin_config"
-the new configuration
-blah blah blah
-FUNCTION_PAYLOAD_END
-
-plugin->agent:
-=============
-FUNCTION_RESULT_BEGIN [UUID] [(1/0)(accept/reject)] [text/plain] 5
-FUNCTION_RESULT_END
-```
+Sending this, will reset the internal state of the agent, considering this a `DYNCFG_ENABLE`.
-to set the new config which can be accepted/rejected by plugin by sending answer for this FUNCTION as it would with any
-other regular function.
+```
+DYNCFG_RESET
+```
-The new `FUNCTION_PAYLOAD` command differs from regular `FUNCTION` command exclusively in its ability to send bigger
-payloads (configuration file contents) to the plugin (not just parameters list).
-Agent can also call (after `DYNCFG_ENABLE`)
+#### DYNCFG_REGISTER_MODULE
```
-Agent->plugin:
-=============
-FUNCTION [UID] 1 "get_plugin_config"
-
-Plugin->agent:
-=============
-FUNCTION_RESULT_BEGIN [UID] 1 text/plain 5
-{
- "the currently used config from plugin" : "nice"
-}
-FUNCTION_RESULT_END
+DYNCFG_REGISTER_MODULE {MODULE_NAME} {MODULE_TYPE}
```
-and
+Module has to choose one of following types at registration:
+
+- `single` - module itself has configuration but does not accept any jobs *(this is useful mainly for internal netdata
+ configurable things like webserver etc.)*
+
+- `job_array` - module itself **can** *(not must)* have configuration and it has an array of jobs which can be added,
+ modified and deleted. **this is what plugin developer needs in most cases**
+
+After a module has been registered agent can call `set_module_config`, `get_module_config` and `get_module_config_schema`.
+
+When `MODULE_TYPE` is `job_array` the agent may also send `set_job_config`, `get_job_config` and `get_job_config_schema`.
+
+#### DYNCFG_REGISTER_JOB
+
+The plugin can use `DYNCFG_REGISTER_JOB` to register its own configuration jobs. It should not register jobs configured
+via DYNCFG (doing so, the agent will shutdown the plugin).
+
```
-Agent->plugin:
-=============
-FUNCTION [UID] 1 "get_plugin_config_schema"
-
-Plugin->agent:
-=============
-FUNCTION_RESULT_BEGIN [UID] 1 text/plain 5
-{
- "the schema of plugin configuration" : "splendid"
-}
-FUNCTION_RESULT_END
+DYNCFG_REGISTER_JOB {MODULE_NAME} {JOB_NAME} {JOB_TYPE} {FLAGS}
```
-Plugin can also register zero, one or more configurable modules using:
+Where:
+
+- `MODULE_NAME` is the name of the module.
+- `JOB_NAME` is the name of the job.
+- `JOB_TYPE` is either `stock` or `autodiscovered`.
+- `FLAGS`, just send zero.
+
+#### REPORT_JOB_STATUS
```
-plugin->agent:
-=============
-DYNCFG_REGISTER_MODULE [module_url_name] (job_array|single)
+REPORT_JOB_STATUS {MODULE_NAME} {JOB_NAME} {STATUS} {STATE} ["REASON"]
```
-modules can be added any time during plugins lifetime (you are not required to add them all at startup).
+Note the REASON parameter is optional and can be entirelly ommited (for example when state is OK there is no need to send any reason).
-### 2. DYNCFG_REGISTER_MODULE
+Where:
-Module has to choose one of following types at registration:
+- `MODULE_NAME` is the name of the module.
+- `JOB_NAME` is the name of the job.
+- `STATUS` is one of `stopped`, `running`, or `error`.
+- `STATE`, just send zero.
+- `REASON` is a message describing the status. In case you don't want to send any reason string it is preferable to omit this parameter altogether (as opposed to sending empty string `""`).
-- `single` - module itself has configuration but does not accept any jobs *(this is useful mainly for internal netdata
- configurable things like webserver etc.)*
-- `job_array` - module itself **can** *(not must)* have configuration and it has an array of jobs which can be added,
- modified and deleted. **this is what plugin developer needs in most cases**
-After module has been registered agent can call
+### Commands plugins must serve
+
+Once a plugin calls `DYNCFG_ENABLE`, the must be able to handle these calls.
-- `set_module_config [module]` FUNCTION_PAYLOAD
-- `get_module_config [module]` FUNCTION
-- `get_module_config_schema [module]` FUNCTION
+function|parameters|prerequisites|request payload|response payload|
+:---:|:---:|:---:|:---:|:---:|
+`set_plugin_config`|none|`DYNCFG_ENABLE`|plugin configuration|none|
+`get_plugin_config`|none|`DYNCFG_ENABLE`|none|plugin configuration|
+`get_plugin_config_schema`|none|`DYNCFG_ENABLE`|none|plugin configuration schema|
+`set_module_config`|`module_name`|`DYNCFG_REGISTER_MODULE`|module configuration|none|
+`get_module_config`|`module_name`|`DYNCFG_REGISTER_MODULE`|none|module configuration|
+`get_module_config_schema`|`module_name`|`DYNCFG_REGISTER_MODULE`|none|module configuration schema|
+`set_job_config`|`module_name`, `job_name`|`DYNCFG_REGISTER_MODULE`|job configuration|none|
+`get_job_config`|`module_name`, `job_name`|`DYNCFG_REGISTER_MODULE`|none|job configuration|
+`get_job_config_schema`|`module_name`, `job_name`|`DYNCFG_REGISTER_MODULE`|none|job configuration schema|
-with same syntax as `set_plugin_config` and `get_plugin_config`. In case of `set` command the plugin has ability to
-reject the new configuration pushed to it.
+All of them work like this:
-In a case the module was registered as `job_array` type following commands can be used to manage jobs:
+If the request payload is `none`, then the request looks like this:
-### 3. Job interface for job_array modules
+```bash
+FUNCTION {TRANSACTION_UUID} {TIMEOUT_SECONDS} "{function} {parameters}"
+```
+
+When there is payload, the request looks like this:
+
+```bash
+FUNCTION_PAYLOAD {TRANSACTION_UUID} {TIMEOUT_SECONDS} "{function} {parameters}"
+<payload>
+FUNCTION_PAYLOAD_END
+```
+
+In all cases, the response is like this:
+
+```bash
+FUNCTION_RESULT_BEGIN {TRANSACTION_UUID} {HTTP_RESPONSE_CODE} "{CONTENT_TYPE}" {EXPIRATION_TIMESTAMP}
+<payload>
+FUNCTION_RESULT_END
+```
+Where:
+- `TRANSACTION_UUID` is the same UUID received with the request.
+- `HTTP_RESPONSE_CODE` is either `0` (rejected) or `1` (accepted).
+- `CONTENT_TYPE` should reflect the `payload` returned.
+- `EXPIRATION_TIMESTAMP` can be zero.
-- `get_job_config_schema [module]` - FUNCTION
-- `get_job_config [module] [job]` - FUNCTION
-- `set_job_config [module] [job]` - FUNCTION_PAYLOAD
-- `delete_job_name [module] [job]` - FUNCTION
-### 4. Streaming
+## DYNCFG with streaming
-When above commands are transferred trough streaming additionally `plugin_name` is prefixed as first parameter. This is
+When above commands are transferred trough streaming additionally `plugin_name` is prefixed as first parameter. This is
done to allow routing to appropriate plugin @child.
-As a plugin developer you don't need to concern yourself with this detail as that parameter is stripped when sent to the
+As a plugin developer you don't need to concern yourself with this detail as that parameter is stripped when sent to the
plugin *(and added when sent trough streaming)* automagically.
diff --git a/libnetdata/dyn_conf/dyn_conf.h b/libnetdata/dyn_conf/dyn_conf.h
index f6a5fe49..d584343b 100644
--- a/libnetdata/dyn_conf/dyn_conf.h
+++ b/libnetdata/dyn_conf/dyn_conf.h
@@ -112,7 +112,7 @@ static inline const char* job_type2str(enum job_type type)
}
}
-static inline enum job_type str2job_type(const char *type_name)
+static inline enum job_type dyncfg_str2job_type(const char *type_name)
{
if (strcmp(type_name, "stock") == 0)
return JOB_TYPE_STOCK;
diff --git a/libnetdata/ebpf/ebpf.c b/libnetdata/ebpf/ebpf.c
index 1bd45ef2..99f23b9c 100644
--- a/libnetdata/ebpf/ebpf.c
+++ b/libnetdata/ebpf/ebpf.c
@@ -1367,6 +1367,8 @@ void ebpf_load_addresses(ebpf_addresses_t *fa, int fd)
char *fcnt = procfile_lineword(ff, l, 2);
uint32_t hash = simple_hash(fcnt);
if (fa->hash == hash && !strcmp(fcnt, fa->function)) {
+ char *type = procfile_lineword(ff, l, 2);
+ fa->type = type[0];
if (fd > 0) {
char addr[128];
snprintf(addr, 127, "0x%s", procfile_lineword(ff, l, 0));
diff --git a/libnetdata/ebpf/ebpf.h b/libnetdata/ebpf/ebpf.h
index 6708f669..9218cb9d 100644
--- a/libnetdata/ebpf/ebpf.h
+++ b/libnetdata/ebpf/ebpf.h
@@ -148,6 +148,7 @@ typedef struct ebpf_addresses {
uint32_t hash;
// We use long as address, because it matches system length
unsigned long addr;
+ uint32_t type;
} ebpf_addresses_t;
extern char *ebpf_user_config_dir;
diff --git a/libnetdata/endian.h b/libnetdata/endian.h
new file mode 100644
index 00000000..66f75cb1
--- /dev/null
+++ b/libnetdata/endian.h
@@ -0,0 +1,32 @@
+#ifndef LIBNETDATA_ENDIAN_H
+#define LIBNETDATA_ENDIAN_H
+
+/** compatibility header for endian.h
+ * This is a simple compatibility shim to convert
+ * BSD/Linux endian macros to the Mac OS X equivalents.
+ * It is public domain.
+ * */
+
+#ifndef __APPLE__
+#error "This header file (endian.h) is MacOS X specific.\n"
+#endif /* __APPLE__ */
+
+
+#include <libkern/OSByteOrder.h>
+
+#define htobe16(x) OSSwapHostToBigInt16(x)
+#define htole16(x) OSSwapHostToLittleInt16(x)
+#define be16toh(x) OSSwapBigToHostInt16(x)
+#define le16toh(x) OSSwapLittleToHostInt16(x)
+
+#define htobe32(x) OSSwapHostToBigInt32(x)
+#define htole32(x) OSSwapHostToLittleInt32(x)
+#define be32toh(x) OSSwapBigToHostInt32(x)
+#define le32toh(x) OSSwapLittleToHostInt32(x)
+
+#define htobe64(x) OSSwapHostToBigInt64(x)
+#define htole64(x) OSSwapHostToLittleInt64(x)
+#define be64toh(x) OSSwapBigToHostInt64(x)
+#define le64toh(x) OSSwapLittleToHostInt64(x)
+
+#endif /* LIBNETDATA_ENDIAN_H */
diff --git a/libnetdata/eval/eval.c b/libnetdata/eval/eval.c
index 236be932..a1ac4483 100644
--- a/libnetdata/eval/eval.c
+++ b/libnetdata/eval/eval.c
@@ -384,7 +384,7 @@ static inline void print_parsed_as_constant(BUFFER *out, NETDATA_DOUBLE n) {
}
char b[100+1], *s;
- snprintfz(b, 100, NETDATA_DOUBLE_FORMAT, n);
+ snprintfz(b, sizeof(b) - 1, NETDATA_DOUBLE_FORMAT, n);
s = &b[strlen(b) - 1];
while(s > b && *s == '0') {
diff --git a/libnetdata/facets/facets.c b/libnetdata/facets/facets.c
index e72cb732..4a5f5442 100644
--- a/libnetdata/facets/facets.c
+++ b/libnetdata/facets/facets.c
@@ -1,12 +1,14 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "facets.h"
-#define HISTOGRAM_COLUMNS 150 // the target number of points in a histogram
+#define FACETS_HISTOGRAM_COLUMNS 150 // the target number of points in a histogram
#define FACETS_KEYS_WITH_VALUES_MAX 200 // the max number of keys that can be facets
#define FACETS_KEYS_IN_ROW_MAX 500 // the max number of keys in a row
-#define FACETS_KEYS_HASHTABLE_ENTRIES 127
-#define FACETS_VALUES_HASHTABLE_ENTRIES 31
+#define FACETS_KEYS_HASHTABLE_ENTRIES 15
+#define FACETS_VALUES_HASHTABLE_ENTRIES 15
+
+static inline void facets_reset_key(FACET_KEY *k);
// ----------------------------------------------------------------------------
@@ -30,14 +32,12 @@ static const uint8_t id_encoding_characters_reverse[256] = {
['6'] = 60, ['7'] = 61, ['8'] = 62, ['9'] = 63
};
-__attribute__((constructor)) void initialize_facets_id_encoding_characters_reverse(void) {
-
-}
-
#define FACET_STRING_HASH_SIZE 12
#define FACETS_HASH XXH64_hash_t
#define FACETS_HASH_FUNCTION(src, len) XXH3_64bits(src, len)
#define FACETS_HASH_ZERO (FACETS_HASH)0
+#define FACETS_HASH_UNSAMPLED (FACETS_HASH)(UINT64_MAX - 1)
+#define FACETS_HASH_ESTIMATED (FACETS_HASH)UINT64_MAX
static inline void facets_hash_to_str(FACETS_HASH num, char *out) {
out[11] = '\0';
@@ -99,76 +99,47 @@ static inline bool is_valid_string_hash(const char *s) {
}
// ----------------------------------------------------------------------------
+// hashtable for FACET_VALUE
-typedef uint64_t SIMPLE_HASHTABLE_HASH;
-#define SIMPLE_HASHTABLE_HASH_SECOND_HASH_SHIFTS 32
-
-typedef struct simple_hashtable_slot {
- SIMPLE_HASHTABLE_HASH hash;
- void *data;
-} SIMPLE_HASHTABLE_SLOT;
+// cleanup hashtable defines
+#undef SIMPLE_HASHTABLE_SORT_FUNCTION
+#undef SIMPLE_HASHTABLE_VALUE_TYPE
+#undef SIMPLE_HASHTABLE_NAME
+#undef NETDATA_SIMPLE_HASHTABLE_H
-typedef struct simple_hashtable {
- size_t size;
- SIMPLE_HASHTABLE_SLOT *hashtable;
-} SIMPLE_HASHTABLE;
-
-static void simple_hashtable_init(SIMPLE_HASHTABLE *ht, size_t size) {
- ht->size = size;
- ht->hashtable = callocz(ht->size, sizeof(*ht->hashtable));
-}
-
-static void simple_hashtable_free(SIMPLE_HASHTABLE *ht) {
- freez(ht->hashtable);
- ht->hashtable = NULL;
- ht->size = 0;
-}
-
-static inline SIMPLE_HASHTABLE_SLOT *simple_hashtable_get_slot(SIMPLE_HASHTABLE *ht, SIMPLE_HASHTABLE_HASH hash) {
- // IMPORTANT:
- // If the hashtable supported deletions, we would need to have a special slot.data value
- // to mark deleted values and assume they are occupied during lookup, but empty during insert.
- // But for our case, we don't need it, since we never delete items from the hashtable.
-
- size_t slot = hash % ht->size;
- if(!ht->hashtable[slot].data || ht->hashtable[slot].hash == hash)
- return &ht->hashtable[slot];
-
- slot = ((hash >> SIMPLE_HASHTABLE_HASH_SECOND_HASH_SHIFTS) + 1) % ht->size;
- // Linear probing until we find it
- while (ht->hashtable[slot].data && ht->hashtable[slot].hash != hash)
- slot = (slot + 1) % ht->size; // Wrap around if necessary
-
- return &ht->hashtable[slot];
-}
-
-static void simple_hashtable_resize_double(SIMPLE_HASHTABLE *ht) {
- SIMPLE_HASHTABLE_SLOT *old = ht->hashtable;
- size_t old_size = ht->size;
-
- ht->size = (ht->size * 2) + 1;
- ht->hashtable = callocz(ht->size, sizeof(*ht->hashtable));
- for(size_t i = 0 ; i < old_size ; i++) {
- if(!old[i].data)
- continue;
+struct facet_value;
+// #define SIMPLE_HASHTABLE_SORT_FUNCTION compare_facet_value
+#define SIMPLE_HASHTABLE_VALUE_TYPE struct facet_value
+#define SIMPLE_HASHTABLE_NAME _VALUE
+#include "../simple_hashtable.h"
- SIMPLE_HASHTABLE_SLOT *slot = simple_hashtable_get_slot(ht, old[i].hash);
- *slot = old[i];
- }
+// ----------------------------------------------------------------------------
+// hashtable for FACET_KEY
- freez(old);
-}
+// cleanup hashtable defines
+#undef SIMPLE_HASHTABLE_SORT_FUNCTION
+#undef SIMPLE_HASHTABLE_VALUE_TYPE
+#undef SIMPLE_HASHTABLE_NAME
+#undef NETDATA_SIMPLE_HASHTABLE_H
+struct facet_key;
+// #define SIMPLE_HASHTABLE_SORT_FUNCTION compare_facet_key
+#define SIMPLE_HASHTABLE_VALUE_TYPE struct facet_key
+#define SIMPLE_HASHTABLE_NAME _KEY
+#include "../simple_hashtable.h"
// ----------------------------------------------------------------------------
typedef struct facet_value {
FACETS_HASH hash;
const char *name;
+ const char *color;
uint32_t name_len;
bool selected;
bool empty;
+ bool unsampled;
+ bool estimated;
uint32_t rows_matching_facet_value;
uint32_t final_facet_value_counter;
@@ -181,14 +152,19 @@ typedef struct facet_value {
} FACET_VALUE;
typedef enum {
- FACET_KEY_VALUE_NONE = 0,
- FACET_KEY_VALUE_UPDATED = (1 << 0),
- FACET_KEY_VALUE_EMPTY = (1 << 1),
- FACET_KEY_VALUE_COPIED = (1 << 2),
+ FACET_KEY_VALUE_NONE = 0,
+ FACET_KEY_VALUE_UPDATED = (1 << 0),
+ FACET_KEY_VALUE_EMPTY = (1 << 1),
+ FACET_KEY_VALUE_UNSAMPLED = (1 << 2),
+ FACET_KEY_VALUE_ESTIMATED = (1 << 3),
+ FACET_KEY_VALUE_COPIED = (1 << 4),
} FACET_KEY_VALUE_FLAGS;
#define facet_key_value_updated(k) ((k)->current_value.flags & FACET_KEY_VALUE_UPDATED)
#define facet_key_value_empty(k) ((k)->current_value.flags & FACET_KEY_VALUE_EMPTY)
+#define facet_key_value_unsampled(k) ((k)->current_value.flags & FACET_KEY_VALUE_UNSAMPLED)
+#define facet_key_value_estimated(k) ((k)->current_value.flags & FACET_KEY_VALUE_ESTIMATED)
+#define facet_key_value_empty_or_unsampled_or_estimated(k) ((k)->current_value.flags & (FACET_KEY_VALUE_EMPTY|FACET_KEY_VALUE_UNSAMPLED|FACET_KEY_VALUE_ESTIMATED))
#define facet_key_value_copied(k) ((k)->current_value.flags & FACET_KEY_VALUE_COPIED)
struct facet_key {
@@ -210,7 +186,7 @@ struct facet_key {
bool enabled;
uint32_t used;
FACET_VALUE *ll;
- SIMPLE_HASHTABLE ht;
+ SIMPLE_HASHTABLE_VALUE ht;
} values;
struct {
@@ -227,6 +203,14 @@ struct facet_key {
} empty_value;
struct {
+ FACET_VALUE *v;
+ } unsampled_value;
+
+ struct {
+ FACET_VALUE *v;
+ } estimated_value;
+
+ struct {
facet_dynamic_row_t cb;
void *data;
} dynamic;
@@ -261,7 +245,7 @@ struct facets {
struct {
size_t count;
FACET_KEY *ll;
- SIMPLE_HASHTABLE ht;
+ SIMPLE_HASHTABLE_KEY ht;
} keys;
struct {
@@ -324,6 +308,8 @@ struct facets {
struct {
size_t evaluated;
size_t matched;
+ size_t unsampled;
+ size_t estimated;
size_t created;
size_t reused;
} rows;
@@ -331,7 +317,6 @@ struct facets {
struct {
size_t registered;
size_t unique;
- size_t hashtable_increases;
} keys;
struct {
@@ -339,16 +324,21 @@ struct facets {
size_t transformed;
size_t dynamic;
size_t empty;
+ size_t unsampled;
+ size_t estimated;
size_t indexed;
size_t inserts;
size_t conflicts;
- size_t hashtable_increases;
} values;
struct {
size_t searches;
} fts;
} operations;
+
+ struct {
+ DICTIONARY *used_hashes_registry;
+ } report;
};
usec_t facets_row_oldest_ut(FACETS *facets) {
@@ -386,7 +376,7 @@ static inline bool facets_key_is_facet(FACETS *facets, FACET_KEY *k);
static inline void FACETS_VALUES_INDEX_CREATE(FACET_KEY *k) {
k->values.ll = NULL;
k->values.used = 0;
- simple_hashtable_init(&k->values.ht, FACETS_VALUES_HASHTABLE_ENTRIES);
+ simple_hashtable_init_VALUE(&k->values.ht, FACETS_VALUES_HASHTABLE_ENTRIES);
}
static inline void FACETS_VALUES_INDEX_DESTROY(FACET_KEY *k) {
@@ -402,7 +392,7 @@ static inline void FACETS_VALUES_INDEX_DESTROY(FACET_KEY *k) {
k->values.used = 0;
k->values.enabled = false;
- simple_hashtable_free(&k->values.ht);
+ simple_hashtable_destroy_VALUE(&k->values.ht);
}
static inline const char *facets_key_get_value(FACET_KEY *k) {
@@ -449,17 +439,17 @@ static inline void FACET_VALUE_ADD_CONFLICT(FACET_KEY *k, FACET_VALUE *v, const
}
static inline FACET_VALUE *FACET_VALUE_GET_FROM_INDEX(FACET_KEY *k, FACETS_HASH hash) {
- SIMPLE_HASHTABLE_SLOT *slot = simple_hashtable_get_slot(&k->values.ht, hash);
- return slot->data;
+ SIMPLE_HASHTABLE_SLOT_VALUE *slot = simple_hashtable_get_slot_VALUE(&k->values.ht, hash, true);
+ return SIMPLE_HASHTABLE_SLOT_DATA(slot);
}
static inline FACET_VALUE *FACET_VALUE_ADD_TO_INDEX(FACET_KEY *k, const FACET_VALUE * const tv) {
- SIMPLE_HASHTABLE_SLOT *slot = simple_hashtable_get_slot(&k->values.ht, tv->hash);
+ SIMPLE_HASHTABLE_SLOT_VALUE *slot = simple_hashtable_get_slot_VALUE(&k->values.ht, tv->hash, true);
- if(slot->data) {
+ if(SIMPLE_HASHTABLE_SLOT_DATA(slot)) {
// already exists
- FACET_VALUE *v = slot->data;
+ FACET_VALUE *v = SIMPLE_HASHTABLE_SLOT_DATA(slot);
FACET_VALUE_ADD_CONFLICT(k, v, tv);
return v;
}
@@ -467,12 +457,21 @@ static inline FACET_VALUE *FACET_VALUE_ADD_TO_INDEX(FACET_KEY *k, const FACET_VA
// we have to add it
FACET_VALUE *v = mallocz(sizeof(*v));
- slot->hash = tv->hash;
- slot->data = v;
+ simple_hashtable_set_slot_VALUE(&k->values.ht, slot, tv->hash, v);
memcpy(v, tv, sizeof(*v));
- DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(k->values.ll, v, prev, next);
+ if(v->estimated || v->unsampled) {
+ if(k->values.ll && k->values.ll->estimated) {
+ FACET_VALUE *estimated = k->values.ll;
+ DOUBLE_LINKED_LIST_INSERT_ITEM_AFTER_UNSAFE(k->values.ll, estimated, v, prev, next);
+ }
+ else
+ DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(k->values.ll, v, prev, next);
+ }
+ else
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(k->values.ll, v, prev, next);
+
k->values.used++;
if(!v->selected)
@@ -490,12 +489,53 @@ static inline FACET_VALUE *FACET_VALUE_ADD_TO_INDEX(FACET_KEY *k, const FACET_VA
k->facets->operations.values.inserts++;
- if(unlikely(k->values.used > k->values.ht.size / 2)) {
- simple_hashtable_resize_double(&k->values.ht);
- k->facets->operations.values.hashtable_increases++;
+ return v;
+}
+
+static inline void FACET_VALUE_ADD_UNSAMPLED_VALUE_TO_INDEX(FACET_KEY *k) {
+ static const FACET_VALUE tv = {
+ .hash = FACETS_HASH_UNSAMPLED,
+ .name = FACET_VALUE_UNSAMPLED,
+ .name_len = sizeof(FACET_VALUE_UNSAMPLED) - 1,
+ .unsampled = true,
+ .color = "offline",
+ };
+
+ k->current_value.hash = FACETS_HASH_UNSAMPLED;
+
+ if(k->unsampled_value.v) {
+ FACET_VALUE_ADD_CONFLICT(k, k->unsampled_value.v, &tv);
+ k->current_value.v = k->unsampled_value.v;
}
+ else {
+ FACET_VALUE *v = FACET_VALUE_ADD_TO_INDEX(k, &tv);
+ v->unsampled = true;
+ k->unsampled_value.v = v;
+ k->current_value.v = v;
+ }
+}
- return v;
+static inline void FACET_VALUE_ADD_ESTIMATED_VALUE_TO_INDEX(FACET_KEY *k) {
+ static const FACET_VALUE tv = {
+ .hash = FACETS_HASH_ESTIMATED,
+ .name = FACET_VALUE_ESTIMATED,
+ .name_len = sizeof(FACET_VALUE_ESTIMATED) - 1,
+ .estimated = true,
+ .color = "generic",
+ };
+
+ k->current_value.hash = FACETS_HASH_ESTIMATED;
+
+ if(k->estimated_value.v) {
+ FACET_VALUE_ADD_CONFLICT(k, k->estimated_value.v, &tv);
+ k->current_value.v = k->estimated_value.v;
+ }
+ else {
+ FACET_VALUE *v = FACET_VALUE_ADD_TO_INDEX(k, &tv);
+ v->estimated = true;
+ k->estimated_value.v = v;
+ k->current_value.v = v;
+ }
}
static inline void FACET_VALUE_ADD_EMPTY_VALUE_TO_INDEX(FACET_KEY *k) {
@@ -503,6 +543,7 @@ static inline void FACET_VALUE_ADD_EMPTY_VALUE_TO_INDEX(FACET_KEY *k) {
.hash = FACETS_HASH_ZERO,
.name = FACET_VALUE_UNSET,
.name_len = sizeof(FACET_VALUE_UNSET) - 1,
+ .empty = true,
};
k->current_value.hash = FACETS_HASH_ZERO;
@@ -527,6 +568,9 @@ static inline void FACET_VALUE_ADD_CURRENT_VALUE_TO_INDEX(FACET_KEY *k) {
tv.name = facets_key_get_value(k);
tv.name_len = facets_key_get_value_length(k);
tv.hash = FACETS_HASH_FUNCTION(tv.name, tv.name_len);
+ tv.empty = false;
+ tv.estimated = false;
+ tv.unsampled = false;
k->current_value.v = FACET_VALUE_ADD_TO_INDEX(k, &tv);
k->facets->operations.values.indexed++;
@@ -567,7 +611,7 @@ static inline void FACETS_KEYS_INDEX_CREATE(FACETS *facets) {
facets->keys.count = 0;
facets->keys_with_values.used = 0;
- simple_hashtable_init(&facets->keys.ht, FACETS_KEYS_HASHTABLE_ENTRIES);
+ simple_hashtable_init_KEY(&facets->keys.ht, FACETS_KEYS_HASHTABLE_ENTRIES);
}
static inline void FACETS_KEYS_INDEX_DESTROY(FACETS *facets) {
@@ -586,12 +630,12 @@ static inline void FACETS_KEYS_INDEX_DESTROY(FACETS *facets) {
facets->keys.count = 0;
facets->keys_with_values.used = 0;
- simple_hashtable_free(&facets->keys.ht);
+ simple_hashtable_destroy_KEY(&facets->keys.ht);
}
static inline FACET_KEY *FACETS_KEY_GET_FROM_INDEX(FACETS *facets, FACETS_HASH hash) {
- SIMPLE_HASHTABLE_SLOT *slot = simple_hashtable_get_slot(&facets->keys.ht, hash);
- return slot->data;
+ SIMPLE_HASHTABLE_SLOT_KEY *slot = simple_hashtable_get_slot_KEY(&facets->keys.ht, hash, true);
+ return SIMPLE_HASHTABLE_SLOT_DATA(slot);
}
bool facets_key_name_value_length_is_selected(FACETS *facets, const char *key, size_t key_length, const char *value, size_t value_length) {
@@ -670,26 +714,20 @@ static inline FACET_KEY *FACETS_KEY_CREATE(FACETS *facets, FACETS_HASH hash, con
static inline FACET_KEY *FACETS_KEY_ADD_TO_INDEX(FACETS *facets, FACETS_HASH hash, const char *name, size_t name_length, FACET_KEY_OPTIONS options) {
facets->operations.keys.registered++;
- SIMPLE_HASHTABLE_SLOT *slot = simple_hashtable_get_slot(&facets->keys.ht, hash);
+ SIMPLE_HASHTABLE_SLOT_KEY *slot = simple_hashtable_get_slot_KEY(&facets->keys.ht, hash, true);
- if(unlikely(!slot->data)) {
+ if(unlikely(!SIMPLE_HASHTABLE_SLOT_DATA(slot))) {
// we have to add it
FACET_KEY *k = FACETS_KEY_CREATE(facets, hash, name, name_length, options);
- slot->hash = hash;
- slot->data = k;
-
- if(facets->keys.count > facets->keys.ht.size / 2) {
- simple_hashtable_resize_double(&facets->keys.ht);
- facets->operations.keys.hashtable_increases++;
- }
+ simple_hashtable_set_slot_KEY(&facets->keys.ht, slot, hash, k);
return k;
}
// already in the index
- FACET_KEY *k = slot->data;
+ FACET_KEY *k = SIMPLE_HASHTABLE_SLOT_DATA(slot);
facet_key_set_name(k, name, name_length);
@@ -716,6 +754,10 @@ bool facets_key_name_is_facet(FACETS *facets, const char *key) {
// ----------------------------------------------------------------------------
+size_t facets_histogram_slots(FACETS *facets) {
+ return facets->histogram.slots;
+}
+
static usec_t calculate_histogram_bar_width(usec_t after_ut, usec_t before_ut) {
// Array of valid durations in seconds
static time_t valid_durations_s[] = {
@@ -731,7 +773,7 @@ static usec_t calculate_histogram_bar_width(usec_t after_ut, usec_t before_ut) {
usec_t bar_width_ut = 1 * USEC_PER_SEC;
for (int i = array_size - 1; i >= 0; --i) {
- if (duration_ut / (valid_durations_s[i] * USEC_PER_SEC) >= HISTOGRAM_COLUMNS) {
+ if (duration_ut / (valid_durations_s[i] * USEC_PER_SEC) >= FACETS_HISTOGRAM_COLUMNS) {
bar_width_ut = valid_durations_s[i] * USEC_PER_SEC;
break;
}
@@ -788,17 +830,7 @@ void facets_set_timeframe_and_histogram_by_name(FACETS *facets, const char *key_
facets_set_timeframe_and_histogram_by_id(facets, hash_str, after_ut, before_ut);
}
-static inline void facets_histogram_update_value(FACETS *facets, usec_t usec) {
- if(!facets->histogram.enabled ||
- !facets->histogram.key ||
- !facets->histogram.key->values.enabled ||
- !facet_key_value_updated(facets->histogram.key) ||
- usec < facets->histogram.after_ut ||
- usec > facets->histogram.before_ut)
- return;
-
- FACET_VALUE *v = facets->histogram.key->current_value.v;
-
+static inline uint32_t facets_histogram_slot_at_time_ut(FACETS *facets, usec_t usec, FACET_VALUE *v) {
if(unlikely(!v->histogram))
v->histogram = callocz(facets->histogram.slots, sizeof(*v->histogram));
@@ -815,11 +847,177 @@ static inline void facets_histogram_update_value(FACETS *facets, usec_t usec) {
if(unlikely(slot >= facets->histogram.slots))
slot = facets->histogram.slots - 1;
+ return slot;
+}
+
+static inline void facets_histogram_update_value_slot(FACETS *facets, usec_t usec, FACET_VALUE *v) {
+ uint32_t slot = facets_histogram_slot_at_time_ut(facets, usec, v);
v->histogram[slot]++;
}
+static inline void facets_histogram_update_value(FACETS *facets, usec_t usec) {
+ if(!facets->histogram.enabled ||
+ !facets->histogram.key ||
+ !facets->histogram.key->values.enabled ||
+ !facet_key_value_updated(facets->histogram.key) ||
+ usec < facets->histogram.after_ut ||
+ usec > facets->histogram.before_ut)
+ return;
+
+ FACET_VALUE *v = facets->histogram.key->current_value.v;
+ facets_histogram_update_value_slot(facets, usec, v);
+}
+
+static usec_t overlap_duration_ut(usec_t start1, usec_t end1, usec_t start2, usec_t end2) {
+ usec_t overlap_start = MAX(start1, start2);
+ usec_t overlap_end = MIN(end1, end2);
+
+ if (overlap_start < overlap_end)
+ return overlap_end - overlap_start;
+ else
+ return 0; // No overlap
+}
+
+void facets_update_estimations(FACETS *facets, usec_t from_ut, usec_t to_ut, size_t entries) {
+ if(unlikely(!facets->histogram.enabled))
+ return;
+
+ if(unlikely(!overlap_duration_ut(facets->histogram.after_ut, facets->histogram.before_ut, from_ut, to_ut)))
+ return;
+
+ facets->operations.rows.evaluated += entries;
+ facets->operations.rows.matched += entries;
+ facets->operations.rows.estimated += entries;
+
+ if (!facets->histogram.enabled ||
+ !facets->histogram.key ||
+ !facets->histogram.key->values.enabled)
+ return;
+
+ if (from_ut < facets->histogram.after_ut)
+ from_ut = facets->histogram.after_ut;
+
+ if (to_ut > facets->histogram.before_ut)
+ to_ut = facets->histogram.before_ut;
+
+ if (!facets->histogram.key->estimated_value.v)
+ FACET_VALUE_ADD_ESTIMATED_VALUE_TO_INDEX(facets->histogram.key);
+
+ FACET_VALUE *v = facets->histogram.key->estimated_value.v;
+
+ size_t slots = 0;
+ size_t total_ut = to_ut - from_ut;
+ ssize_t remaining_entries = (ssize_t)entries;
+ size_t slot = facets_histogram_slot_at_time_ut(facets, from_ut, v);
+ for(; slot < facets->histogram.slots ;slot++) {
+ usec_t slot_start_ut = facets->histogram.after_ut + slot * facets->histogram.slot_width_ut;
+ usec_t slot_end_ut = slot_start_ut + facets->histogram.slot_width_ut;
+
+ if(slot_start_ut > to_ut)
+ break;
+
+ usec_t overlap_ut = overlap_duration_ut(from_ut, to_ut, slot_start_ut, slot_end_ut);
+
+ size_t slot_entries = (overlap_ut * entries) / total_ut;
+ v->histogram[slot] += slot_entries;
+ remaining_entries -= (ssize_t)slot_entries;
+ slots++;
+ }
+
+ // Check if all entries are assigned
+ // This should always be true if the distribution is correct
+ internal_fatal(remaining_entries < 0 || remaining_entries >= (ssize_t)(slots),
+ "distribution of estimations is not accurate - there are %zd remaining entries",
+ remaining_entries);
+}
+
+void facets_row_finished_unsampled(FACETS *facets, usec_t usec) {
+ facets->operations.rows.evaluated++;
+ facets->operations.rows.matched++;
+ facets->operations.rows.unsampled++;
+
+ if(!facets->histogram.enabled ||
+ !facets->histogram.key ||
+ !facets->histogram.key->values.enabled ||
+ usec < facets->histogram.after_ut ||
+ usec > facets->histogram.before_ut)
+ return;
+
+ if(!facets->histogram.key->unsampled_value.v)
+ FACET_VALUE_ADD_UNSAMPLED_VALUE_TO_INDEX(facets->histogram.key);
+
+ FACET_VALUE *v = facets->histogram.key->unsampled_value.v;
+ facets_histogram_update_value_slot(facets, usec, v);
+
+ facets_reset_key(facets->histogram.key);
+}
+
+static const char *facets_key_name_cached(FACET_KEY *k, DICTIONARY *used_hashes_registry) {
+ if(k->name) {
+ if(used_hashes_registry && !k->default_selected_for_values) {
+ char hash_str[FACET_STRING_HASH_SIZE];
+ facets_hash_to_str(k->hash, hash_str);
+ dictionary_set(used_hashes_registry, hash_str, (void *)k->name, strlen(k->name) + 1);
+ }
+
+ return k->name;
+ }
+
+ // key has no name
+ const char *name = "[UNAVAILABLE_FIELD]";
+
+ if(used_hashes_registry) {
+ char hash_str[FACET_STRING_HASH_SIZE];
+ facets_hash_to_str(k->hash, hash_str);
+ const char *s = dictionary_get(used_hashes_registry, hash_str);
+ if(s) name = s;
+ }
+
+ return name;
+}
+
+static const char *facets_key_value_cached(FACET_KEY *k, FACET_VALUE *v, DICTIONARY *used_hashes_registry) {
+ if(v->empty || v->estimated || v->unsampled)
+ return v->name;
+
+ if(v->name && v->name_len) {
+ if(used_hashes_registry && !k->default_selected_for_values && v->selected) {
+ char hash_str[FACET_STRING_HASH_SIZE];
+ facets_hash_to_str(v->hash, hash_str);
+ dictionary_set(used_hashes_registry, hash_str, (void *)v->name, v->name_len + 1);
+ }
+
+ return v->name;
+ }
+
+ // key has no name
+ const char *name = "[unavailable field]";
+
+ if(used_hashes_registry) {
+ char hash_str[FACET_STRING_HASH_SIZE];
+ facets_hash_to_str(v->hash, hash_str);
+ const char *s = dictionary_get(used_hashes_registry, hash_str);
+ if(s) name = s;
+ }
+
+ return name;
+}
+
+static inline void facets_key_value_transformed(FACETS *facets, FACET_KEY *k, FACET_VALUE *v, BUFFER *dst, FACETS_TRANSFORMATION_SCOPE scope) {
+ buffer_flush(dst);
+
+ if(v->empty || v->unsampled || v->estimated)
+ buffer_strcat(dst, v->name);
+ else if(k->transform.cb && k->transform.view_only) {
+ buffer_contents_replace(dst, v->name, v->name_len);
+ k->transform.cb(facets, dst, scope, k->transform.data);
+ }
+ else
+ buffer_strcat(dst, facets_key_value_cached(k, v, facets->report.used_hashes_registry));
+}
+
static inline void facets_histogram_value_names(BUFFER *wb, FACETS *facets __maybe_unused, FACET_KEY *k, const char *key, const char *first_key) {
- BUFFER *tb = NULL;
+ CLEAN_BUFFER *tb = buffer_create(0, NULL);
buffer_json_member_add_array(wb, key);
{
@@ -832,23 +1030,30 @@ static inline void facets_histogram_value_names(BUFFER *wb, FACETS *facets __may
if (unlikely(!v->histogram))
continue;
- if(!v->empty && k->transform.cb && k->transform.view_only) {
- if(!tb)
- tb = buffer_create(0, NULL);
-
- buffer_contents_replace(tb, v->name, v->name_len);
- k->transform.cb(facets, tb, FACETS_TRANSFORM_HISTOGRAM, k->transform.data);
- buffer_json_add_array_item_string(wb, buffer_tostring(tb));
- }
- else
- buffer_json_add_array_item_string(wb, v->name);
+ facets_key_value_transformed(facets, k, v, tb, FACETS_TRANSFORM_HISTOGRAM);
+ buffer_json_add_array_item_string(wb, buffer_tostring(tb));
}
foreach_value_in_key_done(v);
}
}
buffer_json_array_close(wb); // key
+}
- buffer_free(tb);
+static inline void facets_histogram_value_colors(BUFFER *wb, FACETS *facets __maybe_unused, FACET_KEY *k, const char *key) {
+ buffer_json_member_add_array(wb, key);
+ {
+ if(k && k->values.enabled) {
+ FACET_VALUE *v;
+ foreach_value_in_key(k, v) {
+ if (unlikely(!v->histogram))
+ continue;
+
+ buffer_json_add_array_item_string(wb, v->color);
+ }
+ foreach_value_in_key_done(v);
+ }
+ }
+ buffer_json_array_close(wb); // key
}
static inline void facets_histogram_value_units(BUFFER *wb, FACETS *facets __maybe_unused, FACET_KEY *k, const char *key) {
@@ -954,6 +1159,8 @@ static inline void facets_histogram_value_con(BUFFER *wb, FACETS *facets __maybe
}
static void facets_histogram_generate(FACETS *facets, FACET_KEY *k, BUFFER *wb) {
+ CLEAN_BUFFER *tmp = buffer_create(0, NULL);
+
size_t dimensions = 0;
uint32_t min = UINT32_MAX, max = 0, sum = 0, count = 0;
@@ -995,6 +1202,7 @@ static void facets_histogram_generate(FACETS *facets, FACET_KEY *k, BUFFER *wb)
buffer_json_member_add_object(wb, "summary");
{
+ // summary.nodes
buffer_json_member_add_array(wb, "nodes");
{
buffer_json_add_array_item_object(wb); // node
@@ -1041,6 +1249,7 @@ static void facets_histogram_generate(FACETS *facets, FACET_KEY *k, BUFFER *wb)
}
buffer_json_array_close(wb); // nodes
+ // summary.contexts
buffer_json_member_add_array(wb, "contexts");
{
buffer_json_add_array_item_object(wb); // context
@@ -1078,6 +1287,7 @@ static void facets_histogram_generate(FACETS *facets, FACET_KEY *k, BUFFER *wb)
}
buffer_json_array_close(wb); // contexts
+ // summary.instances
buffer_json_member_add_array(wb, "instances");
{
buffer_json_add_array_item_object(wb); // instance
@@ -1109,17 +1319,20 @@ static void facets_histogram_generate(FACETS *facets, FACET_KEY *k, BUFFER *wb)
}
buffer_json_array_close(wb); // instances
+ // summary.dimensions
buffer_json_member_add_array(wb, "dimensions");
if(dimensions && k && k->values.enabled) {
size_t pri = 0;
FACET_VALUE *v;
+
foreach_value_in_key(k, v) {
if(unlikely(!v->histogram))
continue;
buffer_json_add_array_item_object(wb); // dimension
{
- buffer_json_member_add_string(wb, "id", v->name);
+ facets_key_value_transformed(facets, k, v, tmp, FACETS_TRANSFORM_HISTOGRAM);
+ buffer_json_member_add_string(wb, "id", buffer_tostring(tmp));
buffer_json_member_add_object(wb, "ds");
{
buffer_json_member_add_uint64(wb, "sl", 1);
@@ -1275,7 +1488,7 @@ static void facets_histogram_generate(FACETS *facets, FACET_KEY *k, BUFFER *wb)
char title[1024 + 1] = "Events Distribution";
FACET_KEY *kt = FACETS_KEY_GET_FROM_INDEX(facets, facets->histogram.hash);
if(kt && kt->name)
- snprintfz(title, 1024, "Events Distribution by %s", kt->name);
+ snprintfz(title, sizeof(title) - 1, "Events Distribution by %s", kt->name);
buffer_json_member_add_string(wb, "title", title);
buffer_json_member_add_time_t(wb, "update_every", facets->histogram.slot_width_ut / USEC_PER_SEC);
@@ -1293,6 +1506,7 @@ static void facets_histogram_generate(FACETS *facets, FACET_KEY *k, BUFFER *wb)
facets_histogram_value_names(wb, facets, k, "ids", NULL);
facets_histogram_value_names(wb, facets, k, "names", NULL);
+ facets_histogram_value_colors(wb, facets, k, "colors");
facets_histogram_value_units(wb, facets, k, "units");
buffer_json_member_add_object(wb, "sts");
@@ -1525,6 +1739,30 @@ void facets_set_additional_options(FACETS *facets, FACETS_OPTIONS options) {
// ----------------------------------------------------------------------------
+static inline void facets_key_set_unsampled_value(FACETS *facets, FACET_KEY *k) {
+ if(likely(!facet_key_value_updated(k) && facets->keys_in_row.used < FACETS_KEYS_IN_ROW_MAX))
+ facets->keys_in_row.array[facets->keys_in_row.used++] = k;
+
+ k->current_value.flags |= FACET_KEY_VALUE_UPDATED | FACET_KEY_VALUE_UNSAMPLED;
+
+ facets->operations.values.registered++;
+ facets->operations.values.unsampled++;
+
+ // no need to copy the UNSET value
+ // empty values are exported as empty
+ k->current_value.raw = NULL;
+ k->current_value.raw_len = 0;
+ k->current_value.b->len = 0;
+ k->current_value.flags &= ~FACET_KEY_VALUE_COPIED;
+
+ if(unlikely(k->values.enabled))
+ FACET_VALUE_ADD_UNSAMPLED_VALUE_TO_INDEX(k);
+ else {
+ k->key_found_in_row++;
+ k->key_values_selected_in_row++;
+ }
+}
+
static inline void facets_key_set_empty_value(FACETS *facets, FACET_KEY *k) {
if(likely(!facet_key_value_updated(k) && facets->keys_in_row.used < FACETS_KEYS_IN_ROW_MAX))
facets->keys_in_row.array[facets->keys_in_row.used++] = k;
@@ -1554,7 +1792,7 @@ static inline void facets_key_check_value(FACETS *facets, FACET_KEY *k) {
facets->keys_in_row.array[facets->keys_in_row.used++] = k;
k->current_value.flags |= FACET_KEY_VALUE_UPDATED;
- k->current_value.flags &= ~FACET_KEY_VALUE_EMPTY;
+ k->current_value.flags &= ~(FACET_KEY_VALUE_EMPTY|FACET_KEY_VALUE_UNSAMPLED|FACET_KEY_VALUE_ESTIMATED);
facets->operations.values.registered++;
@@ -1568,7 +1806,7 @@ static inline void facets_key_check_value(FACETS *facets, FACET_KEY *k) {
// if(strstr(buffer_tostring(k->current_value), "fprintd") != NULL)
// found = true;
- if(facets->query && !facet_key_value_empty(k) && ((k->options & FACET_KEY_OPTION_FTS) || facets->options & FACETS_OPTION_ALL_KEYS_FTS)) {
+ if(facets->query && !facet_key_value_empty_or_unsampled_or_estimated(k) && ((k->options & FACET_KEY_OPTION_FTS) || facets->options & FACETS_OPTION_ALL_KEYS_FTS)) {
facets->operations.fts.searches++;
facets_key_value_copy_to_buffer(k);
switch(simple_pattern_matches_extract(facets->query, buffer_tostring(k->current_value.b), NULL, 0)) {
@@ -1679,7 +1917,7 @@ static FACET_ROW *facets_row_create(FACETS *facets, usec_t usec, FACET_ROW *into
.empty = true,
};
- if(facet_key_value_updated(k) && !facet_key_value_empty(k)) {
+ if(facet_key_value_updated(k) && !facet_key_value_empty_or_unsampled_or_estimated(k)) {
t.tmp = facets_key_get_value(k);
t.tmp_len = facets_key_get_value_length(k);
t.empty = false;
@@ -1758,6 +1996,12 @@ static inline bool facets_is_entry_within_anchor(FACETS *facets, usec_t usec) {
return true;
}
+bool facets_row_candidate_to_keep(FACETS *facets, usec_t usec) {
+ return !facets->base ||
+ (usec >= facets->base->prev->usec && usec <= facets->base->usec && facets_is_entry_within_anchor(facets, usec)) ||
+ facets->items_to_return < facets->max_items_to_return;
+}
+
static void facets_row_keep(FACETS *facets, usec_t usec) {
facets->operations.rows.matched++;
@@ -1885,9 +2129,10 @@ bool facets_row_finished(FACETS *facets, usec_t usec) {
for(size_t p = 0; p < entries ;p++) {
FACET_KEY *k = facets->keys_with_values.array[p];
- if(!facet_key_value_updated(k))
+ if(!facet_key_value_updated(k)) {
// put the FACET_VALUE_UNSET value into it
facets_key_set_empty_value(facets, k);
+ }
total_keys++;
@@ -1935,7 +2180,7 @@ bool facets_row_finished(FACETS *facets, usec_t usec) {
// ----------------------------------------------------------------------------
// output
-static const char *facets_severity_to_string(FACET_ROW_SEVERITY severity) {
+const char *facets_severity_to_string(FACET_ROW_SEVERITY severity) {
switch(severity) {
default:
case FACET_ROW_SEVERITY_NORMAL:
@@ -2104,7 +2349,7 @@ static uint32_t facets_sort_and_reorder_values(FACET_KEY *k) {
if(!k->values.enabled || !k->values.ll || !k->values.used)
return 0;
- if(!k->transform.cb || !(k->facets->options & FACETS_OPTION_SORT_FACETS_ALPHABETICALLY))
+ if(!k->transform.cb || !k->transform.view_only || !(k->facets->options & FACETS_OPTION_SORT_FACETS_ALPHABETICALLY))
return facets_sort_and_reorder_values_internal(k);
// we have a transformation and has to be sorted alphabetically
@@ -2128,8 +2373,7 @@ static uint32_t facets_sort_and_reorder_values(FACET_KEY *k) {
values[used].name_len = v->name_len;
used++;
- buffer_contents_replace(tb, v->name, v->name_len);
- k->transform.cb(k->facets, tb, FACETS_TRANSFORM_FACET_SORT, k->transform.data);
+ facets_key_value_transformed(k->facets, k, v, tb, FACETS_TRANSFORM_FACET_SORT);
v->name = strdupz(buffer_tostring(tb));
v->name_len = buffer_strlen(tb);
}
@@ -2167,55 +2411,9 @@ void facets_table_config(BUFFER *wb) {
buffer_json_object_close(wb); // pagination
}
-static const char *facets_json_key_name_string(FACET_KEY *k, DICTIONARY *used_hashes_registry) {
- if(k->name) {
- if(used_hashes_registry && !k->default_selected_for_values) {
- char hash_str[FACET_STRING_HASH_SIZE];
- facets_hash_to_str(k->hash, hash_str);
- dictionary_set(used_hashes_registry, hash_str, (void *)k->name, strlen(k->name) + 1);
- }
-
- return k->name;
- }
-
- // key has no name
- const char *name = "[UNAVAILABLE_FIELD]";
-
- if(used_hashes_registry) {
- char hash_str[FACET_STRING_HASH_SIZE];
- facets_hash_to_str(k->hash, hash_str);
- const char *s = dictionary_get(used_hashes_registry, hash_str);
- if(s) name = s;
- }
-
- return name;
-}
-
-static const char *facets_json_key_value_string(FACET_KEY *k, FACET_VALUE *v, DICTIONARY *used_hashes_registry) {
- if(v->name && v->name_len) {
- if(used_hashes_registry && !k->default_selected_for_values && v->selected) {
- char hash_str[FACET_STRING_HASH_SIZE];
- facets_hash_to_str(v->hash, hash_str);
- dictionary_set(used_hashes_registry, hash_str, (void *)v->name, v->name_len + 1);
- }
-
- return v->name;
- }
-
- // key has no name
- const char *name = "[unavailable field]";
-
- if(used_hashes_registry) {
- char hash_str[FACET_STRING_HASH_SIZE];
- facets_hash_to_str(v->hash, hash_str);
- const char *s = dictionary_get(used_hashes_registry, hash_str);
- if(s) name = s;
- }
-
- return name;
-}
-
void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry) {
+ facets->report.used_hashes_registry = used_hashes_registry;
+
if(!(facets->options & FACETS_OPTION_DATA_ONLY)) {
facets_table_config(wb);
facets_accepted_parameters_to_json_array(facets, wb, true);
@@ -2239,7 +2437,7 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry)
}
if(show_facets) {
- BUFFER *tb = NULL;
+ CLEAN_BUFFER *tb = buffer_create(0, NULL);
FACET_KEY *k;
foreach_key_in_facets(facets, k) {
if(!k->values.enabled)
@@ -2252,7 +2450,9 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry)
buffer_json_add_array_item_object(wb); // key
{
buffer_json_member_add_string(wb, "id", hash_to_static_string(k->hash));
- buffer_json_member_add_string(wb, "name", facets_json_key_name_string(k, used_hashes_registry));
+ buffer_json_member_add_string(wb, "name", facets_key_name_cached(k
+ , facets->report.used_hashes_registry
+ ));
if(!k->order) k->order = facets->order++;
buffer_json_member_add_uint64(wb, "order", k->order);
@@ -2264,21 +2464,15 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry)
if((facets->options & FACETS_OPTION_DONT_SEND_EMPTY_VALUE_FACETS) && v->empty)
continue;
+ if(v->unsampled || v->estimated)
+ continue;
+
buffer_json_add_array_item_object(wb);
{
buffer_json_member_add_string(wb, "id", hash_to_static_string(v->hash));
- if(!v->empty && k->transform.cb && k->transform.view_only) {
- if(!tb)
- tb = buffer_create(0, NULL);
-
- buffer_contents_replace(tb, v->name, v->name_len);
- k->transform.cb(facets, tb, FACETS_TRANSFORM_FACET, k->transform.data);
- buffer_json_member_add_string(wb, "name", buffer_tostring(tb));
- }
- else
- buffer_json_member_add_string(wb, "name", facets_json_key_value_string(k, v, used_hashes_registry));
-
+ facets_key_value_transformed(facets, k, v, tb, FACETS_TRANSFORM_FACET);
+ buffer_json_member_add_string(wb, "name", buffer_tostring(tb));
buffer_json_member_add_uint64(wb, "count", v->final_facet_value_counter);
buffer_json_member_add_uint64(wb, "order", v->order);
}
@@ -2291,7 +2485,6 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry)
buffer_json_object_close(wb); // key
}
foreach_key_in_facets_done(k);
- buffer_free(tb);
buffer_json_array_close(wb); // facets
}
}
@@ -2325,7 +2518,7 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry)
NULL,
RRDF_FIELD_SUMMARY_COUNT,
RRDF_FIELD_FILTER_NONE,
- RRDR_FIELD_OPTS_DUMMY,
+ RRDF_FIELD_OPTS_DUMMY,
NULL);
FACET_KEY *k;
@@ -2345,6 +2538,9 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry)
if (k->options & FACET_KEY_OPTION_MAIN_TEXT)
options |= RRDF_FIELD_OPTS_FULL_WIDTH | RRDF_FIELD_OPTS_WRAP;
+ if (k->options & FACET_KEY_OPTION_EXPANDED_FILTER)
+ options |= RRDF_FIELD_OPTS_EXPANDED_FILTER;
+
const char *hash_str = hash_to_static_string(k->hash);
buffer_rrdf_table_add_field(
@@ -2356,8 +2552,7 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry)
RRDF_FIELD_SORT_FIXED,
NULL,
RRDF_FIELD_SUMMARY_COUNT,
- (k->options & FACET_KEY_OPTION_NEVER_FACET) ? RRDF_FIELD_FILTER_NONE
- : RRDF_FIELD_FILTER_FACET,
+ (k->options & FACET_KEY_OPTION_NEVER_FACET) ? RRDF_FIELD_FILTER_NONE : RRDF_FIELD_FILTER_FACET,
options, FACET_VALUE_UNSET);
}
foreach_key_in_facets_done(k);
@@ -2503,6 +2698,8 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry)
if(show_items) {
buffer_json_member_add_uint64(wb, "evaluated", facets->operations.rows.evaluated);
buffer_json_member_add_uint64(wb, "matched", facets->operations.rows.matched);
+ buffer_json_member_add_uint64(wb, "unsampled", facets->operations.rows.unsampled);
+ buffer_json_member_add_uint64(wb, "estimated", facets->operations.rows.estimated);
buffer_json_member_add_uint64(wb, "returned", facets->items_to_return);
buffer_json_member_add_uint64(wb, "max_to_return", facets->max_items_to_return);
buffer_json_member_add_uint64(wb, "before", facets->operations.skips_before);
@@ -2533,21 +2730,51 @@ void facets_report(FACETS *facets, BUFFER *wb, DICTIONARY *used_hashes_registry)
buffer_json_object_close(wb); // rows
buffer_json_member_add_object(wb, "keys");
{
+ size_t resizes = 0, searches = 0, collisions = 0, used = 0, size = 0, count = 0;
+ count++;
+ used += facets->keys.ht.used;
+ size += facets->keys.ht.size;
+ resizes += facets->keys.ht.resizes;
+ searches += facets->keys.ht.searches;
+ collisions += facets->keys.ht.collisions;
+
buffer_json_member_add_uint64(wb, "registered", facets->operations.keys.registered);
buffer_json_member_add_uint64(wb, "unique", facets->operations.keys.unique);
- buffer_json_member_add_uint64(wb, "hashtable_increases", facets->operations.keys.hashtable_increases);
+ buffer_json_member_add_uint64(wb, "hashtables", count);
+ buffer_json_member_add_uint64(wb, "hashtable_used", used);
+ buffer_json_member_add_uint64(wb, "hashtable_size", size);
+ buffer_json_member_add_uint64(wb, "hashtable_searches", searches);
+ buffer_json_member_add_uint64(wb, "hashtable_collisions", collisions);
+ buffer_json_member_add_uint64(wb, "hashtable_resizes", resizes);
}
buffer_json_object_close(wb); // keys
buffer_json_member_add_object(wb, "values");
{
+ size_t resizes = 0, searches = 0, collisions = 0, used = 0, size = 0, count = 0;
+ for(FACET_KEY *k = facets->keys.ll; k ; k = k->next) {
+ count++;
+ used += k->values.ht.used;
+ size += k->values.ht.size;
+ resizes += k->values.ht.resizes;
+ searches += k->values.ht.searches;
+ collisions += k->values.ht.collisions;
+ }
+
buffer_json_member_add_uint64(wb, "registered", facets->operations.values.registered);
buffer_json_member_add_uint64(wb, "transformed", facets->operations.values.transformed);
buffer_json_member_add_uint64(wb, "dynamic", facets->operations.values.dynamic);
buffer_json_member_add_uint64(wb, "empty", facets->operations.values.empty);
+ buffer_json_member_add_uint64(wb, "unsampled", facets->operations.values.unsampled);
+ buffer_json_member_add_uint64(wb, "estimated", facets->operations.values.estimated);
buffer_json_member_add_uint64(wb, "indexed", facets->operations.values.indexed);
buffer_json_member_add_uint64(wb, "inserts", facets->operations.values.inserts);
buffer_json_member_add_uint64(wb, "conflicts", facets->operations.values.conflicts);
- buffer_json_member_add_uint64(wb, "hashtable_increases", facets->operations.values.hashtable_increases);
+ buffer_json_member_add_uint64(wb, "hashtables", count);
+ buffer_json_member_add_uint64(wb, "hashtable_used", used);
+ buffer_json_member_add_uint64(wb, "hashtable_size", size);
+ buffer_json_member_add_uint64(wb, "hashtable_searches", searches);
+ buffer_json_member_add_uint64(wb, "hashtable_collisions", collisions);
+ buffer_json_member_add_uint64(wb, "hashtable_resizes", resizes);
}
buffer_json_object_close(wb); // values
buffer_json_member_add_object(wb, "fts");
diff --git a/libnetdata/facets/facets.h b/libnetdata/facets/facets.h
index 75972561..8364d861 100644
--- a/libnetdata/facets/facets.h
+++ b/libnetdata/facets/facets.h
@@ -6,6 +6,8 @@
#include "../libnetdata.h"
#define FACET_VALUE_UNSET "-"
+#define FACET_VALUE_UNSAMPLED "[unsampled]"
+#define FACET_VALUE_ESTIMATED "[estimated]"
typedef enum __attribute__((packed)) {
FACETS_ANCHOR_DIRECTION_FORWARD,
@@ -31,6 +33,7 @@ typedef enum __attribute__((packed)) {
FACET_KEY_OPTION_RICH_TEXT = (1 << 7),
FACET_KEY_OPTION_REORDER = (1 << 8), // give the key a new order id on first encounter
FACET_KEY_OPTION_TRANSFORM_VIEW = (1 << 9), // when registering the transformation, do it only at the view, not on all data
+ FACET_KEY_OPTION_EXPANDED_FILTER = (1 << 10), // the presentation should have this filter expanded by default
} FACET_KEY_OPTIONS;
typedef enum __attribute__((packed)) {
@@ -84,11 +87,16 @@ void facets_accepted_param(FACETS *facets, const char *param);
void facets_rows_begin(FACETS *facets);
bool facets_row_finished(FACETS *facets, usec_t usec);
+void facets_row_finished_unsampled(FACETS *facets, usec_t usec);
+void facets_update_estimations(FACETS *facets, usec_t from_ut, usec_t to_ut, size_t entries);
+size_t facets_histogram_slots(FACETS *facets);
+
FACET_KEY *facets_register_key_name(FACETS *facets, const char *key, FACET_KEY_OPTIONS options);
void facets_set_query(FACETS *facets, const char *query);
void facets_set_items(FACETS *facets, uint32_t items);
void facets_set_anchor(FACETS *facets, usec_t start_ut, usec_t stop_ut, FACETS_ANCHOR_DIRECTION direction);
void facets_enable_slice_mode(FACETS *facets);
+bool facets_row_candidate_to_keep(FACETS *facets, usec_t usec);
FACET_KEY *facets_register_facet_id(FACETS *facets, const char *key_id, FACET_KEY_OPTIONS options);
void facets_register_facet_id_filter(FACETS *facets, const char *key_id, char *value_id, FACET_KEY_OPTIONS options);
@@ -115,4 +123,6 @@ uint32_t facets_rows(FACETS *facets);
void facets_table_config(BUFFER *wb);
+const char *facets_severity_to_string(FACET_ROW_SEVERITY severity);
+
#endif
diff --git a/libnetdata/functions_evloop/functions_evloop.c b/libnetdata/functions_evloop/functions_evloop.c
index 3fcd70aa..044556ac 100644
--- a/libnetdata/functions_evloop/functions_evloop.c
+++ b/libnetdata/functions_evloop/functions_evloop.c
@@ -64,6 +64,12 @@ static void *rrd_functions_worker_globals_worker_main(void *arg) {
pthread_mutex_unlock(&wg->worker_mutex);
if(acquired) {
+ ND_LOG_STACK lgs[] = {
+ ND_LOG_FIELD_TXT(NDF_REQUEST, j->cmd),
+ ND_LOG_FIELD_END(),
+ };
+ ND_LOG_STACK_PUSH(lgs);
+
last_acquired = true;
j = dictionary_acquired_item_value(acquired);
j->cb(j->transaction, j->cmd, j->timeout, &j->cancelled);
@@ -208,3 +214,10 @@ void functions_evloop_add_function(struct functions_evloop_globals *wg, const ch
we->default_timeout = default_timeout;
DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(wg->expectations, we, prev, next);
}
+
+void functions_evloop_cancel_threads(struct functions_evloop_globals *wg){
+ for(size_t i = 0; i < wg->workers ; i++)
+ netdata_thread_cancel(wg->worker_threads[i]);
+
+ netdata_thread_cancel(wg->reader_thread);
+}
diff --git a/libnetdata/functions_evloop/functions_evloop.h b/libnetdata/functions_evloop/functions_evloop.h
index ee0f72cb..e5e83e95 100644
--- a/libnetdata/functions_evloop/functions_evloop.h
+++ b/libnetdata/functions_evloop/functions_evloop.h
@@ -46,12 +46,15 @@
#define PLUGINSD_KEYWORD_EXIT "EXIT"
+#define PLUGINSD_KEYWORD_SLOT "SLOT" // to change the length of this, update pluginsd_extract_chart_slot() too
+
#define PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT 10 // seconds
typedef void (*functions_evloop_worker_execute_t)(const char *transaction, char *function, int timeout, bool *cancelled);
struct functions_evloop_worker_job;
struct functions_evloop_globals *functions_evloop_init(size_t worker_threads, const char *tag, netdata_mutex_t *stdout_mutex, bool *plugin_should_exit);
void functions_evloop_add_function(struct functions_evloop_globals *wg, const char *function, functions_evloop_worker_execute_t cb, time_t default_timeout);
+void functions_evloop_cancel_threads(struct functions_evloop_globals *wg);
#define pluginsd_function_result_begin_to_buffer(wb, transaction, code, content_type, expires) \
diff --git a/libnetdata/gorilla/README.md b/libnetdata/gorilla/README.md
new file mode 100644
index 00000000..dc3718d1
--- /dev/null
+++ b/libnetdata/gorilla/README.md
@@ -0,0 +1,39 @@
+# Gorilla compression and decompression
+
+This provides an alternative way of representing values stored in database
+pages. Instead of allocating and using a page of fixed size, ie. 4096 bytes,
+the Gorilla implementation adds support for dynamically sized pages that
+contain a variable number of Gorilla buffers.
+
+Each buffer takes 512 bytes and compresses incoming data using the Gorilla
+compression:
+
+- The very first value is stored as it is.
+- For each new value, Gorilla compression doesn't store the value itself. Instead,
+it computes the difference (XOR) between the new value and the previous value.
+- If the XOR result is zero (meaning the new value is identical to the previous
+value), we store just a single bit set to `1`.
+- If the XOR result is not zero (meaning the new value differs from the previous):
+ - We store a `0` bit to indicate the change.
+ - We compute the leading-zero count (LZC) of the XOR result, and compare it
+ with the previous LZC. If the two LZCs are equal we store a `1` bit.
+ - If the LZCs are different we use 5 bits to store the new LZC, and we store
+ the rest of the value (ie. without its LZC) in the buffer.
+
+A Gorilla page can have multiple Gorilla buffers. If the values of a metric
+are highly compressible, just one Gorilla buffer is able to store all the values
+that otherwise would require a regular 4096 byte page, ie. we can use just 512
+bytes instead. In the worst case scenario (for metrics whose values are not
+compressible at all), a Gorilla page might end up having `9` Gorilla buffers,
+consuming 4608 bytes. In practice, this is pretty rare and does not negate
+the effect of compression for the metrics.
+
+When a gorilla page is full, ie. it contains 1024 slots/values, we serialize
+the linked-list of gorilla buffers directly to disk. During deserialization,
+eg. when performing a DBEngine query, the Gorilla page is loaded from the disk and
+its linked-list entries are patched to point to the new memory allocated for
+serving the query results.
+
+Overall, on a real-agent the Gorilla compression scheme reduces memory
+consumption approximately by ~30%, which can be several GiB of RAM for parents
+having hundreds, or even thousands of children streaming to them.
diff --git a/libnetdata/gorilla/fuzzer.sh b/libnetdata/gorilla/fuzzer.sh
index 9dfdec05..19098a61 100755
--- a/libnetdata/gorilla/fuzzer.sh
+++ b/libnetdata/gorilla/fuzzer.sh
@@ -11,4 +11,4 @@ clang++ \
-fsanitize=fuzzer \
-o gorilla_fuzzer gorilla.cc
-./gorilla_fuzzer -workers=8 -jobs=8
+./gorilla_fuzzer -workers=12 -jobs=16
diff --git a/libnetdata/gorilla/gorilla.cc b/libnetdata/gorilla/gorilla.cc
index af2f7400..e6138ce3 100644
--- a/libnetdata/gorilla/gorilla.cc
+++ b/libnetdata/gorilla/gorilla.cc
@@ -17,413 +17,344 @@ static constexpr size_t bit_size() noexcept
return (sizeof(T) * CHAR_BIT);
}
-/*
- * Low-level bitstream operations, allowing us to read/write individual bits.
-*/
-
-template<typename Word>
-struct bit_stream_t {
- Word *buffer;
- size_t capacity;
- size_t position;
-};
-
-template<typename Word>
-static bit_stream_t<Word> bit_stream_new(Word *buffer, Word capacity) {
- bit_stream_t<Word> bs;
-
- bs.buffer = buffer;
- bs.capacity = capacity * bit_size<Word>();
- bs.position = 0;
-
- return bs;
-}
-
-template<typename Word>
-static bool bit_stream_write(bit_stream_t<Word> *bs, Word value, size_t nbits) {
- assert(nbits > 0 && nbits <= bit_size<Word>());
- assert(bs->capacity >= (bs->position + nbits));
+static void bit_buffer_write(uint32_t *buf, size_t pos, uint32_t v, size_t nbits)
+{
+ assert(nbits > 0 && nbits <= bit_size<uint32_t>());
- if (bs->position + nbits > bs->capacity) {
- return false;
- }
+ const size_t index = pos / bit_size<uint32_t>();
+ const size_t offset = pos % bit_size<uint32_t>();
- const size_t index = bs->position / bit_size<Word>();
- const size_t offset = bs->position % bit_size<Word>();
- bs->position += nbits;
+ pos += nbits;
if (offset == 0) {
- bs->buffer[index] = value;
+ buf[index] = v;
} else {
- const size_t remaining_bits = bit_size<Word>() - offset;
+ const size_t remaining_bits = bit_size<uint32_t>() - offset;
// write the lower part of the value
- const Word low_bits_mask = ((Word) 1 << remaining_bits) - 1;
- const Word lowest_bits_in_value = value & low_bits_mask;
- bs->buffer[index] |= (lowest_bits_in_value << offset);
+ const uint32_t low_bits_mask = ((uint32_t) 1 << remaining_bits) - 1;
+ const uint32_t lowest_bits_in_value = v & low_bits_mask;
+ buf[index] |= (lowest_bits_in_value << offset);
if (nbits > remaining_bits) {
// write the upper part of the value
- const Word high_bits_mask = ~low_bits_mask;
- const Word highest_bits_in_value = (value & high_bits_mask) >> (remaining_bits);
- bs->buffer[index + 1] = highest_bits_in_value;
+ const uint32_t high_bits_mask = ~low_bits_mask;
+ const uint32_t highest_bits_in_value = (v & high_bits_mask) >> (remaining_bits);
+ buf[index + 1] = highest_bits_in_value;
}
}
-
- return true;
}
-template<typename Word>
-static bool bit_stream_read(bit_stream_t<Word> *bs, Word *value, size_t nbits) {
- assert(nbits > 0 && nbits <= bit_size<Word>());
- assert(bs->capacity >= (bs->position + nbits));
+static void bit_buffer_read(const uint32_t *buf, size_t pos, uint32_t *v, size_t nbits)
+{
+ assert(nbits > 0 && nbits <= bit_size<uint32_t>());
- if (bs->position + nbits > bs->capacity) {
- return false;
- }
+ const size_t index = pos / bit_size<uint32_t>();
+ const size_t offset = pos % bit_size<uint32_t>();
- const size_t index = bs->position / bit_size<Word>();
- const size_t offset = bs->position % bit_size<Word>();
- bs->position += nbits;
+ pos += nbits;
if (offset == 0) {
- *value = (nbits == bit_size<Word>()) ?
- bs->buffer[index] :
- bs->buffer[index] & (((Word) 1 << nbits) - 1);
+ *v = (nbits == bit_size<uint32_t>()) ?
+ buf[index] :
+ buf[index] & (((uint32_t) 1 << nbits) - 1);
} else {
- const size_t remaining_bits = bit_size<Word>() - offset;
+ const size_t remaining_bits = bit_size<uint32_t>() - offset;
// extract the lower part of the value
if (nbits < remaining_bits) {
- *value = (bs->buffer[index] >> offset) & (((Word) 1 << nbits) - 1);
+ *v = (buf[index] >> offset) & (((uint32_t) 1 << nbits) - 1);
} else {
- *value = (bs->buffer[index] >> offset) & (((Word) 1 << remaining_bits) - 1);
+ *v = (buf[index] >> offset) & (((uint32_t) 1 << remaining_bits) - 1);
nbits -= remaining_bits;
- *value |= (bs->buffer[index + 1] & (((Word) 1 << nbits) - 1)) << remaining_bits;
+ *v |= (buf[index + 1] & (((uint32_t) 1 << nbits) - 1)) << remaining_bits;
}
}
-
- return true;
}
-/*
- * High-level Gorilla codec implementation
-*/
-
-template<typename Word>
-struct bit_code_t {
- bit_stream_t<Word> bs;
- Word entries;
- Word prev_number;
- Word prev_xor;
- Word prev_xor_lzc;
-};
-
-template<typename Word>
-static void bit_code_init(bit_code_t<Word> *bc, Word *buffer, Word capacity) {
- bc->bs = bit_stream_new(buffer, capacity);
-
- bc->entries = 0;
- bc->prev_number = 0;
- bc->prev_xor = 0;
- bc->prev_xor_lzc = 0;
-
- // reserved two words:
- // Buffer[0] -> number of entries written
- // Buffer[1] -> number of bits written
+gorilla_writer_t gorilla_writer_init(gorilla_buffer_t *gbuf, size_t n)
+{
+ gorilla_writer_t gw = gorilla_writer_t {
+ .head_buffer = gbuf,
+ .last_buffer = NULL,
+ .prev_number = 0,
+ .prev_xor_lzc = 0,
+ .capacity = 0
+ };
- bc->bs.position += 2 * bit_size<Word>();
+ gorilla_writer_add_buffer(&gw, gbuf, n);
+ return gw;
}
-template<typename Word>
-static bool bit_code_read(bit_code_t<Word> *bc, Word *number) {
- bit_stream_t<Word> *bs = &bc->bs;
-
- bc->entries++;
+void gorilla_writer_add_buffer(gorilla_writer_t *gw, gorilla_buffer_t *gbuf, size_t n)
+{
+ gbuf->header.next = NULL;
+ gbuf->header.entries = 0;
+ gbuf->header.nbits = 0;
- // read the first number
- if (bc->entries == 1) {
- bool ok = bit_stream_read(bs, number, bit_size<Word>());
- bc->prev_number = *number;
- return ok;
- }
+ uint32_t capacity = (n * bit_size<uint32_t>()) - (sizeof(gorilla_header_t) * CHAR_BIT);
- // process same-number bit
- Word is_same_number;
- if (!bit_stream_read(bs, &is_same_number, 1)) {
- return false;
- }
+ gw->prev_number = 0;
+ gw->prev_xor_lzc = 0;
+ gw->capacity = capacity;
- if (is_same_number) {
- *number = bc->prev_number;
- return true;
- }
+ if (gw->last_buffer)
+ gw->last_buffer->header.next = gbuf;
- // proceess same-xor-lzc bit
- Word xor_lzc = bc->prev_xor_lzc;
-
- Word same_xor_lzc;
- if (!bit_stream_read(bs, &same_xor_lzc, 1)) {
- return false;
- }
+ __atomic_store_n(&gw->last_buffer, gbuf, __ATOMIC_RELAXED);
+}
- if (!same_xor_lzc) {
- if (!bit_stream_read(bs, &xor_lzc, (bit_size<Word>() == 32) ? 5 : 6)) {
- return false;
- }
- }
+uint32_t gorilla_writer_entries(const gorilla_writer_t *gw) {
+ uint32_t entries = 0;
- // process the non-lzc suffix
- Word xor_value = 0;
- if (!bit_stream_read(bs, &xor_value, bit_size<Word>() - xor_lzc)) {
- return false;
- }
+ const gorilla_buffer_t *curr_gbuf = __atomic_load_n(&gw->head_buffer, __ATOMIC_SEQ_CST);
+ do {
+ const gorilla_buffer_t *next_gbuf = __atomic_load_n(&curr_gbuf->header.next, __ATOMIC_SEQ_CST);
- *number = (bc->prev_number ^ xor_value);
+ entries += __atomic_load_n(&curr_gbuf->header.entries, __ATOMIC_SEQ_CST);
- bc->prev_number = *number;
- bc->prev_xor_lzc = xor_lzc;
- bc->prev_xor = xor_value;
+ curr_gbuf = next_gbuf;
+ } while (curr_gbuf);
- return true;
+ return entries;
}
-template<typename Word>
-static bool bit_code_write(bit_code_t<Word> *bc, const Word number) {
- bit_stream_t<Word> *bs = &bc->bs;
- Word position = bs->position;
-
- bc->entries++;
+bool gorilla_writer_write(gorilla_writer_t *gw, uint32_t number)
+{
+ gorilla_header_t *hdr = &gw->last_buffer->header;
+ uint32_t *data = gw->last_buffer->data;
// this is the first number we are writing
- if (bc->entries == 1) {
- bc->prev_number = number;
- return bit_stream_write(bs, number, bit_size<Word>());
- }
-
- // write true/false based on whether we got the same number or not.
- if (number == bc->prev_number) {
- return bit_stream_write(bs, static_cast<Word>(1), 1);
- } else {
- if (bit_stream_write(bs, static_cast<Word>(0), 1) == false) {
+ if (hdr->entries == 0) {
+ if (hdr->nbits + bit_size<uint32_t>() >= gw->capacity)
return false;
- }
- }
-
- // otherwise:
- // - compute the non-zero xor
- // - find its leading-zero count
-
- Word xor_value = bc->prev_number ^ number;
- // FIXME: Use SFINAE
- Word xor_lzc = (bit_size<Word>() == 32) ? __builtin_clz(xor_value) : __builtin_clzll(xor_value);
- Word is_xor_lzc_same = (xor_lzc == bc->prev_xor_lzc) ? 1 : 0;
-
- if (is_xor_lzc_same) {
- // xor-lzc is same
- if (bit_stream_write(bs, static_cast<Word>(1), 1) == false) {
- goto RET_FALSE;
- }
- } else {
- // xor-lzc is different
- if (bit_stream_write(bs, static_cast<Word>(0), 1) == false) {
- goto RET_FALSE;
- }
-
- if (bit_stream_write(bs, xor_lzc, (bit_size<Word>() == 32) ? 5 : 6) == false) {
- goto RET_FALSE;
- }
- }
+ bit_buffer_write(data, hdr->nbits, number, bit_size<uint32_t>());
- // write the bits of the XOR value without the LZC prefix
- if (bit_stream_write(bs, xor_value, bit_size<Word>() - xor_lzc) == false) {
- goto RET_FALSE;
+ __atomic_fetch_add(&hdr->nbits, bit_size<uint32_t>(), __ATOMIC_RELAXED);
+ __atomic_fetch_add(&hdr->entries, 1, __ATOMIC_RELAXED);
+ gw->prev_number = number;
+ return true;
}
- bc->prev_number = number;
- bc->prev_xor_lzc = xor_lzc;
- return true;
-
-RET_FALSE:
- bc->bs.position = position;
- return false;
-}
+ // write true/false based on whether we got the same number or not.
+ if (number == gw->prev_number) {
+ if (hdr->nbits + 1 >= gw->capacity)
+ return false;
-// only valid for writers
-template<typename Word>
-static bool bit_code_flush(bit_code_t<Word> *bc) {
- bit_stream_t<Word> *bs = &bc->bs;
+ bit_buffer_write(data, hdr->nbits, static_cast<uint32_t>(1), 1);
+ __atomic_fetch_add(&hdr->nbits, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&hdr->entries, 1, __ATOMIC_RELAXED);
+ return true;
+ }
- Word num_entries_written = bc->entries;
- Word num_bits_written = bs->position;
+ if (hdr->nbits + 1 >= gw->capacity)
+ return false;
+ bit_buffer_write(data, hdr->nbits, static_cast<uint32_t>(0), 1);
+ __atomic_fetch_add(&hdr->nbits, 1, __ATOMIC_RELAXED);
- // we want to write these at the beginning
- bs->position = 0;
+ uint32_t xor_value = gw->prev_number ^ number;
+ uint32_t xor_lzc = (bit_size<uint32_t>() == 32) ? __builtin_clz(xor_value) : __builtin_clzll(xor_value);
+ uint32_t is_xor_lzc_same = (xor_lzc == gw->prev_xor_lzc) ? 1 : 0;
- if (!bit_stream_write(bs, num_entries_written, bit_size<Word>())) {
+ if (hdr->nbits + 1 >= gw->capacity)
return false;
+ bit_buffer_write(data, hdr->nbits, is_xor_lzc_same, 1);
+ __atomic_fetch_add(&hdr->nbits, 1, __ATOMIC_RELAXED);
+
+ if (!is_xor_lzc_same) {
+ if (hdr->nbits + 1 >= gw->capacity)
+ return false;
+ bit_buffer_write(data, hdr->nbits, xor_lzc, (bit_size<uint32_t>() == 32) ? 5 : 6);
+ __atomic_fetch_add(&hdr->nbits, (bit_size<uint32_t>() == 32) ? 5 : 6, __ATOMIC_RELAXED);
}
- if (!bit_stream_write(bs, num_bits_written, bit_size<Word>())) {
+ // write the bits of the XOR'd value without the LZC prefix
+ if (hdr->nbits + (bit_size<uint32_t>() - xor_lzc) >= gw->capacity)
return false;
- }
+ bit_buffer_write(data, hdr->nbits, xor_value, bit_size<uint32_t>() - xor_lzc);
+ __atomic_fetch_add(&hdr->nbits, bit_size<uint32_t>() - xor_lzc, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&hdr->entries, 1, __ATOMIC_RELAXED);
- bs->position = num_bits_written;
+ gw->prev_number = number;
+ gw->prev_xor_lzc = xor_lzc;
return true;
}
-// only valid for readers
-template<typename Word>
-static bool bit_code_info(bit_code_t<Word> *bc, Word *num_entries_written,
- Word *num_bits_written) {
- bit_stream_t<Word> *bs = &bc->bs;
+gorilla_buffer_t *gorilla_writer_drop_head_buffer(gorilla_writer_t *gw) {
+ if (!gw->head_buffer)
+ return NULL;
- assert(bs->position == 2 * bit_size<Word>());
- if (bs->capacity < (2 * bit_size<Word>())) {
- return false;
- }
-
- if (num_entries_written) {
- *num_entries_written = bs->buffer[0];
- }
- if (num_bits_written) {
- *num_bits_written = bs->buffer[1];
- }
-
- return true;
+ gorilla_buffer_t *curr_head = gw->head_buffer;
+ gorilla_buffer_t *next_head = gw->head_buffer->header.next;
+ __atomic_store_n(&gw->head_buffer, next_head, __ATOMIC_RELAXED);
+ return curr_head;
}
-template<typename Word>
-static size_t gorilla_encode(Word *dst, Word dst_len, const Word *src, Word src_len) {
- bit_code_t<Word> bcw;
+uint32_t gorilla_writer_nbytes(const gorilla_writer_t *gw)
+{
+ uint32_t nbits = 0;
- bit_code_init(&bcw, dst, dst_len);
+ const gorilla_buffer_t *curr_gbuf = __atomic_load_n(&gw->head_buffer, __ATOMIC_SEQ_CST);
+ do {
+ const gorilla_buffer_t *next_gbuf = __atomic_load_n(&curr_gbuf->header.next, __ATOMIC_SEQ_CST);
- for (size_t i = 0; i != src_len; i++) {
- if (!bit_code_write(&bcw, src[i]))
- return 0;
- }
+ nbits += __atomic_load_n(&curr_gbuf->header.nbits, __ATOMIC_SEQ_CST);
- if (!bit_code_flush(&bcw))
- return 0;
+ curr_gbuf = next_gbuf;
+ } while (curr_gbuf);
- return src_len;
+ return (nbits + (CHAR_BIT - 1)) / CHAR_BIT;
}
-template<typename Word>
-static size_t gorilla_decode(Word *dst, Word dst_len, const Word *src, Word src_len) {
- bit_code_t<Word> bcr;
+bool gorilla_writer_serialize(const gorilla_writer_t *gw, uint8_t *dst, uint32_t dst_size) {
+ const gorilla_buffer_t *curr_gbuf = gw->head_buffer;
- bit_code_init(&bcr, (Word *) src, src_len);
+ do {
+ const gorilla_buffer_t *next_gbuf = curr_gbuf->header.next;
- Word num_entries;
- if (!bit_code_info(&bcr, &num_entries, (Word *) NULL)) {
- return 0;
- }
- if (num_entries > dst_len) {
- return 0;
- }
-
- for (size_t i = 0; i != num_entries; i++) {
- if (!bit_code_read(&bcr, &dst[i]))
- return 0;
- }
+ size_t bytes = GORILLA_BUFFER_SIZE;
+ if (bytes > dst_size)
+ return false;
- return num_entries;
-}
+ memcpy(dst, curr_gbuf, bytes);
+ dst += bytes;
+ dst_size -= bytes;
-/*
- * Low-level public API
-*/
-
-// 32-bit API
+ curr_gbuf = next_gbuf;
+ } while (curr_gbuf);
-void bit_code_writer_u32_init(bit_code_writer_u32_t *bcw, uint32_t *buffer, uint32_t capacity) {
- bit_code_t<uint32_t> *bc = (bit_code_t<uint32_t> *) bcw;
- bit_code_init(bc, buffer, capacity);
+ return true;
}
-bool bit_code_writer_u32_write(bit_code_writer_u32_t *bcw, const uint32_t number) {
- bit_code_t<uint32_t> *bc = (bit_code_t<uint32_t> *) bcw;
- return bit_code_write(bc, number);
-}
+uint32_t gorilla_buffer_patch(gorilla_buffer_t *gbuf) {
+ gorilla_buffer_t *curr_gbuf = gbuf;
+ uint32_t n = curr_gbuf->header.entries;
-bool bit_code_writer_u32_flush(bit_code_writer_u32_t *bcw) {
- bit_code_t<uint32_t> *bc = (bit_code_t<uint32_t> *) bcw;
- return bit_code_flush(bc);
-}
+ while (curr_gbuf->header.next) {
+ uint32_t *buf = reinterpret_cast<uint32_t *>(gbuf);
+ gbuf = reinterpret_cast<gorilla_buffer_t *>(&buf[GORILLA_BUFFER_SLOTS]);
-void bit_code_reader_u32_init(bit_code_reader_u32_t *bcr, uint32_t *buffer, uint32_t capacity) {
- bit_code_t<uint32_t> *bc = (bit_code_t<uint32_t> *) bcr;
- bit_code_init(bc, buffer, capacity);
-}
+ assert(((uintptr_t) (gbuf) % sizeof(uintptr_t)) == 0 &&
+ "Gorilla buffer not aligned to uintptr_t");
-bool bit_code_reader_u32_read(bit_code_reader_u32_t *bcr, uint32_t *number) {
- bit_code_t<uint32_t> *bc = (bit_code_t<uint32_t> *) bcr;
- return bit_code_read(bc, number);
-}
+ curr_gbuf->header.next = gbuf;
+ curr_gbuf = curr_gbuf->header.next;
-bool bit_code_reader_u32_info(bit_code_reader_u32_t *bcr, uint32_t *num_entries_written,
- uint32_t *num_bits_written) {
- bit_code_t<uint32_t> *bc = (bit_code_t<uint32_t> *) bcr;
- return bit_code_info(bc, num_entries_written, num_bits_written);
+ n += curr_gbuf->header.entries;
+ }
+
+ return n;
}
-// 64-bit API
+gorilla_reader_t gorilla_writer_get_reader(const gorilla_writer_t *gw)
+{
+ const gorilla_buffer_t *buffer = __atomic_load_n(&gw->head_buffer, __ATOMIC_SEQ_CST);
-void bit_code_writer_u64_init(bit_code_writer_u64_t *bcw, uint64_t *buffer, uint64_t capacity) {
- bit_code_t<uint64_t> *bc = (bit_code_t<uint64_t> *) bcw;
- bit_code_init(bc, buffer, capacity);
-}
+ uint32_t entries = __atomic_load_n(&buffer->header.entries, __ATOMIC_SEQ_CST);
+ uint32_t capacity = __atomic_load_n(&buffer->header.nbits, __ATOMIC_SEQ_CST);
-bool bit_code_writer_u64_write(bit_code_writer_u64_t *bcw, const uint64_t number) {
- bit_code_t<uint64_t> *bc = (bit_code_t<uint64_t> *) bcw;
- return bit_code_write(bc, number);
+ return gorilla_reader_t {
+ .buffer = buffer,
+ .entries = entries,
+ .index = 0,
+ .capacity = capacity,
+ .position = 0,
+ .prev_number = 0,
+ .prev_xor_lzc = 0,
+ .prev_xor = 0,
+ };
}
-bool bit_code_writer_u64_flush(bit_code_writer_u64_t *bcw) {
- bit_code_t<uint64_t> *bc = (bit_code_t<uint64_t> *) bcw;
- return bit_code_flush(bc);
-}
+gorilla_reader_t gorilla_reader_init(gorilla_buffer_t *gbuf)
+{
+ uint32_t entries = __atomic_load_n(&gbuf->header.entries, __ATOMIC_SEQ_CST);
+ uint32_t capacity = __atomic_load_n(&gbuf->header.nbits, __ATOMIC_SEQ_CST);
+
+ return gorilla_reader_t {
+ .buffer = gbuf,
+ .entries = entries,
+ .index = 0,
+ .capacity = capacity,
+ .position = 0,
+ .prev_number = 0,
+ .prev_xor_lzc = 0,
+ .prev_xor = 0,
+ };
+}
+
+bool gorilla_reader_read(gorilla_reader_t *gr, uint32_t *number)
+{
+ const uint32_t *data = gr->buffer->data;
+
+ if (gr->index + 1 > gr->entries) {
+ // We don't have any more entries to return. However, the writer
+ // might have updated the buffer's entries. We need to check once
+ // more in case more elements were added.
+ gr->entries = __atomic_load_n(&gr->buffer->header.entries, __ATOMIC_SEQ_CST);
+ gr->capacity = __atomic_load_n(&gr->buffer->header.nbits, __ATOMIC_SEQ_CST);
+
+ // if the reader's current buffer has not been updated, we need to
+ // check if it has a pointer to a next buffer.
+ if (gr->index + 1 > gr->entries) {
+ gorilla_buffer_t *next_buffer = __atomic_load_n(&gr->buffer->header.next, __ATOMIC_SEQ_CST);
+
+ if (!next_buffer) {
+ // fprintf(stderr, "Consumed reader with %zu entries from buffer %p\n (No more buffers to read from)", gr->length, gr->buffer);
+ return false;
+ }
+
+ // fprintf(stderr, "Consumed reader with %zu entries from buffer %p\n", gr->length, gr->buffer);
+ *gr = gorilla_reader_init(next_buffer);
+ return gorilla_reader_read(gr, number);
+ }
+ }
-void bit_code_reader_u64_init(bit_code_reader_u64_t *bcr, uint64_t *buffer, uint64_t capacity) {
- bit_code_t<uint64_t> *bc = (bit_code_t<uint64_t> *) bcr;
- bit_code_init(bc, buffer, capacity);
-}
+ // read the first number
+ if (gr->index == 0) {
+ bit_buffer_read(data, gr->position, number, bit_size<uint32_t>());
-bool bit_code_reader_u64_read(bit_code_reader_u64_t *bcr, uint64_t *number) {
- bit_code_t<uint64_t> *bc = (bit_code_t<uint64_t> *) bcr;
- return bit_code_read(bc, number);
-}
+ gr->index++;
+ gr->position += bit_size<uint32_t>();
+ gr->prev_number = *number;
+ return true;
+ }
-bool bit_code_reader_u64_info(bit_code_reader_u64_t *bcr, uint64_t *num_entries_written,
- uint64_t *num_bits_written) {
- bit_code_t<uint64_t> *bc = (bit_code_t<uint64_t> *) bcr;
- return bit_code_info(bc, num_entries_written, num_bits_written);
-}
+ // process same-number bit
+ uint32_t is_same_number;
+ bit_buffer_read(data, gr->position, &is_same_number, 1);
+ gr->position++;
-/*
- * High-level public API
-*/
+ if (is_same_number) {
+ *number = gr->prev_number;
+ gr->index++;
+ return true;
+ }
-// 32-bit API
+ // proceess same-xor-lzc bit
+ uint32_t xor_lzc = gr->prev_xor_lzc;
-size_t gorilla_encode_u32(uint32_t *dst, size_t dst_len, const uint32_t *src, size_t src_len) {
- return gorilla_encode(dst, (uint32_t) dst_len, src, (uint32_t) src_len);
-}
+ uint32_t same_xor_lzc;
+ bit_buffer_read(data, gr->position, &same_xor_lzc, 1);
+ gr->position++;
-size_t gorilla_decode_u32(uint32_t *dst, size_t dst_len, const uint32_t *src, size_t src_len) {
- return gorilla_decode(dst, (uint32_t) dst_len, src, (uint32_t) src_len);
-}
+ if (!same_xor_lzc) {
+ bit_buffer_read(data, gr->position, &xor_lzc, (bit_size<uint32_t>() == 32) ? 5 : 6);
+ gr->position += (bit_size<uint32_t>() == 32) ? 5 : 6;
+ }
-// 64-bit API
+ // process the non-lzc suffix
+ uint32_t xor_value = 0;
+ bit_buffer_read(data, gr->position, &xor_value, bit_size<uint32_t>() - xor_lzc);
+ gr->position += bit_size<uint32_t>() - xor_lzc;
-size_t gorilla_encode_u64(uint64_t *dst, size_t dst_len, const uint64_t *src, size_t src_len) {
- return gorilla_encode(dst, (uint64_t) dst_len, src, (uint64_t) src_len);
-}
+ *number = (gr->prev_number ^ xor_value);
+
+ gr->index++;
+ gr->prev_number = *number;
+ gr->prev_xor_lzc = xor_lzc;
+ gr->prev_xor = xor_value;
-size_t gorilla_decode_u64(uint64_t *dst, size_t dst_len, const uint64_t *src, size_t src_len) {
- return gorilla_decode(dst, (uint64_t) dst_len, src, (uint64_t) src_len);
+ return true;
}
/*
@@ -451,54 +382,69 @@ static std::vector<Word> random_vector(const uint8_t *data, size_t size) {
return V;
}
-template<typename Word>
-static void check_equal_buffers(Word *lhs, Word lhs_size, Word *rhs, Word rhs_size) {
- assert((lhs_size == rhs_size) && "Buffers have different size.");
+class Storage {
+public:
+ gorilla_buffer_t *alloc_buffer(size_t words) {
+ uint32_t *new_buffer = new uint32_t[words]();
+ assert(((((uintptr_t) new_buffer) % 8u) == 0) && "Unaligned buffer...");
+ Buffers.push_back(new_buffer);
+ return reinterpret_cast<gorilla_buffer_t *>(new_buffer);
+ }
- for (size_t i = 0; i != lhs_size; i++) {
- assert((lhs[i] == rhs[i]) && "Buffers differ");
+ void free_buffers() {
+ for (uint32_t *buffer : Buffers) {
+ delete[] buffer;
+ }
}
-}
+
+private:
+ std::vector<uint32_t *> Buffers;
+};
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
- // 32-bit tests
- {
- if (Size < 4)
- return 0;
-
- std::vector<uint32_t> RandomData = random_vector<uint32_t>(Data, Size);
- std::vector<uint32_t> EncodedData(10 * RandomData.capacity(), 0);
- std::vector<uint32_t> DecodedData(10 * RandomData.capacity(), 0);
-
- size_t num_entries_written = gorilla_encode_u32(EncodedData.data(), EncodedData.size(),
- RandomData.data(), RandomData.size());
- size_t num_entries_read = gorilla_decode_u32(DecodedData.data(), DecodedData.size(),
- EncodedData.data(), EncodedData.size());
-
- assert(num_entries_written == num_entries_read);
- check_equal_buffers(RandomData.data(), (uint32_t) RandomData.size(),
- DecodedData.data(), (uint32_t) RandomData.size());
+ if (Size < 4)
+ return 0;
+
+ std::vector<uint32_t> RandomData = random_vector<uint32_t>(Data, Size);
+
+ Storage S;
+ size_t words_per_buffer = 8;
+
+ /*
+ * write data
+ */
+ gorilla_buffer_t *first_buffer = S.alloc_buffer(words_per_buffer);
+ gorilla_writer_t gw = gorilla_writer_init(first_buffer, words_per_buffer);
+
+ for (size_t i = 0; i != RandomData.size(); i++) {
+ bool ok = gorilla_writer_write(&gw, RandomData[i]);
+ if (ok)
+ continue;
+
+ // add new buffer
+ gorilla_buffer_t *buffer = S.alloc_buffer(words_per_buffer);
+ gorilla_writer_add_buffer(&gw, buffer, words_per_buffer);
+
+ ok = gorilla_writer_write(&gw, RandomData[i]);
+ assert(ok && "Could not write data to new buffer!!!");
}
- // 64-bit tests
- {
- if (Size < 8)
- return 0;
- std::vector<uint64_t> RandomData = random_vector<uint64_t>(Data, Size);
- std::vector<uint64_t> EncodedData(10 * RandomData.capacity(), 0);
- std::vector<uint64_t> DecodedData(10 * RandomData.capacity(), 0);
+ /*
+ * read data
+ */
+ gorilla_reader_t gr = gorilla_writer_get_reader(&gw);
- size_t num_entries_written = gorilla_encode_u64(EncodedData.data(), EncodedData.size(),
- RandomData.data(), RandomData.size());
- size_t num_entries_read = gorilla_decode_u64(DecodedData.data(), DecodedData.size(),
- EncodedData.data(), EncodedData.size());
+ for (size_t i = 0; i != RandomData.size(); i++) {
+ uint32_t number = 0;
+ bool ok = gorilla_reader_read(&gr, &number);
+ assert(ok && "Failed to read number from gorilla buffer");
- assert(num_entries_written == num_entries_read);
- check_equal_buffers(RandomData.data(), (uint64_t) RandomData.size(),
- DecodedData.data(), (uint64_t) RandomData.size());
+ assert((number == RandomData[i])
+ && "Read wrong number from gorilla buffer");
}
+ S.free_buffers();
return 0;
}
@@ -523,17 +469,20 @@ static void BM_EncodeU32Numbers(benchmark::State& state) {
std::vector<uint32_t> EncodedData(10 * RandomData.capacity(), 0);
for (auto _ : state) {
- benchmark::DoNotOptimize(
- gorilla_encode_u32(EncodedData.data(), EncodedData.size(),
- RandomData.data(), RandomData.size())
- );
+ gorilla_writer_t gw = gorilla_writer_init(
+ reinterpret_cast<gorilla_buffer_t *>(EncodedData.data()),
+ EncodedData.size());
+
+ for (size_t i = 0; i != RandomData.size(); i++)
+ benchmark::DoNotOptimize(gorilla_writer_write(&gw, RandomData[i]));
+
benchmark::ClobberMemory();
}
state.SetItemsProcessed(NumItems * state.iterations());
state.SetBytesProcessed(NumItems * state.iterations() * sizeof(uint32_t));
}
-BENCHMARK(BM_EncodeU32Numbers);
+BENCHMARK(BM_EncodeU32Numbers)->ThreadRange(1, 16)->UseRealTime();
static void BM_DecodeU32Numbers(benchmark::State& state) {
std::random_device rd;
@@ -547,74 +496,27 @@ static void BM_DecodeU32Numbers(benchmark::State& state) {
std::vector<uint32_t> EncodedData(10 * RandomData.capacity(), 0);
std::vector<uint32_t> DecodedData(10 * RandomData.capacity(), 0);
- gorilla_encode_u32(EncodedData.data(), EncodedData.size(),
- RandomData.data(), RandomData.size());
-
- for (auto _ : state) {
- benchmark::DoNotOptimize(
- gorilla_decode_u32(DecodedData.data(), DecodedData.size(),
- EncodedData.data(), EncodedData.size())
- );
- benchmark::ClobberMemory();
- }
-
- state.SetItemsProcessed(NumItems * state.iterations());
- state.SetBytesProcessed(NumItems * state.iterations() * sizeof(uint32_t));
-}
-// Register the function as a benchmark
-BENCHMARK(BM_DecodeU32Numbers);
-
-static void BM_EncodeU64Numbers(benchmark::State& state) {
- std::random_device rd;
- std::mt19937 mt(rd());
- std::uniform_int_distribution<uint64_t> dist(0x0, 0x0000FFFF);
+ gorilla_writer_t gw = gorilla_writer_init(
+ reinterpret_cast<gorilla_buffer_t *>(EncodedData.data()),
+ EncodedData.size());
- std::vector<uint64_t> RandomData;
- for (size_t idx = 0; idx != 1024; idx++) {
- RandomData.push_back(dist(mt));
- }
- std::vector<uint64_t> EncodedData(10 * RandomData.capacity(), 0);
+ for (size_t i = 0; i != RandomData.size(); i++)
+ gorilla_writer_write(&gw, RandomData[i]);
for (auto _ : state) {
- benchmark::DoNotOptimize(
- gorilla_encode_u64(EncodedData.data(), EncodedData.size(),
- RandomData.data(), RandomData.size())
- );
- benchmark::ClobberMemory();
- }
+ gorilla_reader_t gr = gorilla_reader_init(reinterpret_cast<gorilla_buffer_t *>(EncodedData.data()));
- state.SetItemsProcessed(NumItems * state.iterations());
- state.SetBytesProcessed(NumItems * state.iterations() * sizeof(uint64_t));
-}
-BENCHMARK(BM_EncodeU64Numbers);
-
-static void BM_DecodeU64Numbers(benchmark::State& state) {
- std::random_device rd;
- std::mt19937 mt(rd());
- std::uniform_int_distribution<uint64_t> dist(0x0, 0xFFFFFFFF);
-
- std::vector<uint64_t> RandomData;
- for (size_t idx = 0; idx != 1024; idx++) {
- RandomData.push_back(dist(mt));
- }
- std::vector<uint64_t> EncodedData(10 * RandomData.capacity(), 0);
- std::vector<uint64_t> DecodedData(10 * RandomData.capacity(), 0);
-
- gorilla_encode_u64(EncodedData.data(), EncodedData.size(),
- RandomData.data(), RandomData.size());
+ for (size_t i = 0; i != RandomData.size(); i++) {
+ uint32_t number = 0;
+ benchmark::DoNotOptimize(gorilla_reader_read(&gr, &number));
+ }
- for (auto _ : state) {
- benchmark::DoNotOptimize(
- gorilla_decode_u64(DecodedData.data(), DecodedData.size(),
- EncodedData.data(), EncodedData.size())
- );
benchmark::ClobberMemory();
}
state.SetItemsProcessed(NumItems * state.iterations());
- state.SetBytesProcessed(NumItems * state.iterations() * sizeof(uint64_t));
+ state.SetBytesProcessed(NumItems * state.iterations() * sizeof(uint32_t));
}
-// Register the function as a benchmark
-BENCHMARK(BM_DecodeU64Numbers);
+BENCHMARK(BM_DecodeU32Numbers)->ThreadRange(1, 16)->UseRealTime();
#endif /* ENABLE_BENCHMARK */
diff --git a/libnetdata/gorilla/gorilla.h b/libnetdata/gorilla/gorilla.h
index 12bec42c..d57c07cf 100644
--- a/libnetdata/gorilla/gorilla.h
+++ b/libnetdata/gorilla/gorilla.h
@@ -11,47 +11,64 @@
extern "C" {
#endif
-/*
- * Low-level public API
-*/
+struct gorilla_buffer;
-// 32-bit API
+typedef struct {
+ struct gorilla_buffer *next;
+ uint32_t entries;
+ uint32_t nbits;
+} gorilla_header_t;
-typedef struct bit_code_writer_u32 bit_code_writer_u32_t;
-typedef struct bit_code_reader_u32 bit_code_reader_u32_t;
+typedef struct gorilla_buffer {
+ gorilla_header_t header;
+ uint32_t data[];
+} gorilla_buffer_t;
-void bit_code_writer_u32_init(bit_code_writer_u32_t *bcw, uint32_t *buffer, uint32_t capacity);
-bool bit_code_writer_u32_write(bit_code_writer_u32_t *bcw, const uint32_t number);
-bool bit_code_writer_u32_flush(bit_code_writer_u32_t *bcw);
+typedef struct {
+ gorilla_buffer_t *head_buffer;
+ gorilla_buffer_t *last_buffer;
-void bit_code_reader_u32_init(bit_code_reader_u32_t *bcr, uint32_t *buffer, uint32_t capacity);
-bool bit_code_reader_u32_read(bit_code_reader_u32_t *bcr, uint32_t *number);
-bool bit_code_reader_u32_info(bit_code_reader_u32_t *bcr, uint32_t *num_entries_written,
- uint64_t *num_bits_written);
+ uint32_t prev_number;
+ uint32_t prev_xor_lzc;
-// 64-bit API
+ // in bits
+ uint32_t capacity;
+} gorilla_writer_t;
-typedef struct bit_code_writer_u64 bit_code_writer_u64_t;
-typedef struct bit_code_reader_u64 bit_code_reader_u64_t;
+typedef struct {
+ const gorilla_buffer_t *buffer;
-void bit_code_writer_u64_init(bit_code_writer_u64_t *bcw, uint64_t *buffer, uint64_t capacity);
-bool bit_code_writer_u64_write(bit_code_writer_u64_t *bcw, const uint64_t number);
-bool bit_code_writer_u64_flush(bit_code_writer_u64_t *bcw);
+ // number of values
+ size_t entries;
+ size_t index;
-void bit_code_reader_u64_init(bit_code_reader_u64_t *bcr, uint64_t *buffer, uint64_t capacity);
-bool bit_code_reader_u64_read(bit_code_reader_u64_t *bcr, uint64_t *number);
-bool bit_code_reader_u64_info(bit_code_reader_u64_t *bcr, uint64_t *num_entries_written,
- uint64_t *num_bits_written);
+ // in bits
+ size_t capacity; // FIXME: this not needed on the reader's side
+ size_t position;
-/*
- * High-level public API
-*/
+ uint32_t prev_number;
+ uint32_t prev_xor_lzc;
+ uint32_t prev_xor;
+} gorilla_reader_t;
-size_t gorilla_encode_u32(uint32_t *dst, size_t dst_len, const uint32_t *src, size_t src_len);
-size_t gorilla_decode_u32(uint32_t *dst, size_t dst_len, const uint32_t *src, size_t src_len);
+gorilla_writer_t gorilla_writer_init(gorilla_buffer_t *gbuf, size_t n);
+void gorilla_writer_add_buffer(gorilla_writer_t *gw, gorilla_buffer_t *gbuf, size_t n);
+bool gorilla_writer_write(gorilla_writer_t *gw, uint32_t number);
+uint32_t gorilla_writer_entries(const gorilla_writer_t *gw);
-size_t gorilla_encode_u64(uint64_t *dst, size_t dst_len, const uint64_t *src, size_t src_len);
-size_t gorilla_decode_u64(uint64_t *dst, size_t dst_len, const uint64_t *src, size_t src_len);
+gorilla_reader_t gorilla_writer_get_reader(const gorilla_writer_t *gw);
+
+gorilla_buffer_t *gorilla_writer_drop_head_buffer(gorilla_writer_t *gw);
+
+uint32_t gorilla_writer_nbytes(const gorilla_writer_t *gw);
+bool gorilla_writer_serialize(const gorilla_writer_t *gw, uint8_t *dst, uint32_t dst_size);
+
+uint32_t gorilla_buffer_patch(gorilla_buffer_t *buf);
+gorilla_reader_t gorilla_reader_init(gorilla_buffer_t *buf);
+bool gorilla_reader_read(gorilla_reader_t *gr, uint32_t *number);
+
+#define GORILLA_BUFFER_SLOTS 128
+#define GORILLA_BUFFER_SIZE (GORILLA_BUFFER_SLOTS * sizeof(uint32_t))
#ifdef __cplusplus
}
diff --git a/libnetdata/health/health.c b/libnetdata/health/health.c
index f2dc46e3..2b3b8f7d 100644
--- a/libnetdata/health/health.c
+++ b/libnetdata/health/health.c
@@ -29,9 +29,13 @@ void health_silencers_add(SILENCER *silencer) {
// Add the created instance to the linked list in silencers
silencer->next = silencers->silencers;
silencers->silencers = silencer;
- netdata_log_debug(D_HEALTH, "HEALTH command API: Added silencer %s:%s:%s:%s", silencer->alarms,
- silencer->charts, silencer->contexts, silencer->hosts
- );
+ netdata_log_debug(
+ D_HEALTH,
+ "HEALTH command API: Added silencer %s:%s:%s:%s",
+ silencer->alarms,
+ silencer->charts,
+ silencer->contexts,
+ silencer->hosts);
}
/**
@@ -71,10 +75,6 @@ SILENCER *health_silencers_addparam(SILENCER *silencer, char *key, char *value)
(hash == hash_host && !strcasecmp(key, HEALTH_HOST_KEY))
) {
silencer = create_silencer();
- if(!silencer) {
- netdata_log_error("Cannot add a new silencer to Netdata");
- return NULL;
- }
}
}
@@ -160,10 +160,10 @@ int health_silencers_json_read_callback(JSON_ENTRY *e)
* @return It returns 0 on success and -1 otherwise
*/
int health_initialize_global_silencers() {
- silencers = mallocz(sizeof(SILENCERS));
- silencers->all_alarms=0;
- silencers->stype=STYPE_NONE;
- silencers->silencers=NULL;
+ silencers = mallocz(sizeof(SILENCERS));
+ silencers->all_alarms = 0;
+ silencers->stype = STYPE_NONE;
+ silencers->silencers = NULL;
return 0;
}
diff --git a/libnetdata/http/http_defs.h b/libnetdata/http/http_defs.h
index 71bd442c..635f703e 100644
--- a/libnetdata/http/http_defs.h
+++ b/libnetdata/http/http_defs.h
@@ -3,6 +3,13 @@
#ifndef NETDATA_HTTP_DEFS_H
#define NETDATA_HTTP_DEFS_H
+#define HTTP_1_1 " HTTP/1.1"
+#define HTTP_HDR_END "\r\n\r\n"
+#define HTTP_ENDL "\r\n"
+
+// HTTP_CODES 1XX
+#define HTTP_RESP_SWITCH_PROTO 101
+
// HTTP_CODES 2XX Success
#define HTTP_RESP_OK 200
diff --git a/libnetdata/inlined.h b/libnetdata/inlined.h
index 9c07d97b..535b791e 100644
--- a/libnetdata/inlined.h
+++ b/libnetdata/inlined.h
@@ -426,13 +426,24 @@ static inline void sanitize_json_string(char *dst, const char *src, size_t dst_s
}
static inline bool sanitize_command_argument_string(char *dst, const char *src, size_t dst_size) {
+ if(dst_size)
+ *dst = '\0';
+
// skip leading dashes
- while (src[0] == '-')
+ while (*src == '-')
src++;
- // escape single quotes
- while (src[0] != '\0') {
- if (src[0] == '\'') {
+ while (*src != '\0') {
+ if (dst_size < 1)
+ return false;
+
+ if (iscntrl(*src) || *src == '$') {
+ // remove control characters and characters that are expanded by bash
+ *dst++ = '_';
+ dst_size--;
+ }
+ else if (*src == '\'' || *src == '`') {
+ // escape single quotes
if (dst_size < 4)
return false;
@@ -440,14 +451,10 @@ static inline bool sanitize_command_argument_string(char *dst, const char *src,
dst += 4;
dst_size -= 4;
- } else {
- if (dst_size < 1)
- return false;
-
- dst[0] = src[0];
-
- dst += 1;
- dst_size -= 1;
+ }
+ else {
+ *dst++ = *src;
+ dst_size--;
}
src++;
@@ -456,6 +463,7 @@ static inline bool sanitize_command_argument_string(char *dst, const char *src,
// make sure we have space to terminate the string
if (dst_size == 0)
return false;
+
*dst = '\0';
return true;
@@ -531,10 +539,6 @@ static inline int read_single_base64_or_hex_number_file(const char *filename, un
}
}
-static inline int uuid_memcmp(const uuid_t *uu1, const uuid_t *uu2) {
- return memcmp(uu1, uu2, sizeof(uuid_t));
-}
-
static inline char *strsep_skip_consecutive_separators(char **ptr, char *s) {
char *p = (char *)"";
while (p && !p[0] && *ptr) p = strsep(ptr, s);
diff --git a/libnetdata/libnetdata.c b/libnetdata/libnetdata.c
index 159e8995..37319a88 100644
--- a/libnetdata/libnetdata.c
+++ b/libnetdata/libnetdata.c
@@ -42,7 +42,7 @@ void aral_judy_init(void) {
for(size_t Words = 0; Words <= MAX_JUDY_SIZE_TO_ARAL; Words++)
if(judy_sizes_config[Words]) {
char buf[30+1];
- snprintfz(buf, 30, "judy-%zu", Words * sizeof(Word_t));
+ snprintfz(buf, sizeof(buf) - 1, "judy-%zu", Words * sizeof(Word_t));
judy_sizes_aral[Words] = aral_create(
buf,
Words * sizeof(Word_t),
@@ -1269,17 +1269,19 @@ char *fgets_trim_len(char *buf, size_t buf_size, FILE *fp, size_t *len) {
return s;
}
+// vsnprintfz() returns the number of bytes actually written - after possible truncation
int vsnprintfz(char *dst, size_t n, const char *fmt, va_list args) {
if(unlikely(!n)) return 0;
int size = vsnprintf(dst, n, fmt, args);
dst[n - 1] = '\0';
- if (unlikely((size_t) size > n)) size = (int)n;
+ if (unlikely((size_t) size >= n)) size = (int)(n - 1);
return size;
}
+// snprintfz() returns the number of bytes actually written - after possible truncation
int snprintfz(char *dst, size_t n, const char *fmt, ...) {
va_list args;
@@ -1375,7 +1377,7 @@ static int is_virtual_filesystem(const char *path, char **reason) {
return 0;
}
-int verify_netdata_host_prefix() {
+int verify_netdata_host_prefix(bool log_msg) {
if(!netdata_configured_host_prefix)
netdata_configured_host_prefix = "";
@@ -1408,13 +1410,16 @@ int verify_netdata_host_prefix() {
if(is_virtual_filesystem(path, &reason) == -1)
goto failed;
- if(netdata_configured_host_prefix && *netdata_configured_host_prefix)
- netdata_log_info("Using host prefix directory '%s'", netdata_configured_host_prefix);
+ if (netdata_configured_host_prefix && *netdata_configured_host_prefix) {
+ if (log_msg)
+ netdata_log_info("Using host prefix directory '%s'", netdata_configured_host_prefix);
+ }
return 0;
failed:
- netdata_log_error("Ignoring host prefix '%s': path '%s' %s", netdata_configured_host_prefix, path, reason);
+ if (log_msg)
+ netdata_log_error("Ignoring host prefix '%s': path '%s' %s", netdata_configured_host_prefix, path, reason);
netdata_configured_host_prefix = "";
return -1;
}
@@ -1694,51 +1699,28 @@ char *find_and_replace(const char *src, const char *find, const char *replace, c
return value;
}
-inline int pluginsd_isspace(char c) {
- switch(c) {
- case ' ':
- case '\t':
- case '\r':
- case '\n':
- case '=':
- return 1;
- default:
- return 0;
- }
-}
+BUFFER *run_command_and_get_output_to_buffer(const char *command, int max_line_length) {
+ BUFFER *wb = buffer_create(0, NULL);
-inline int config_isspace(char c) {
- switch (c) {
- case ' ':
- case '\t':
- case '\r':
- case '\n':
- case ',':
- return 1;
+ pid_t pid;
+ FILE *fp = netdata_popen(command, &pid, NULL);
- default:
- return 0;
+ if(fp) {
+ char buffer[max_line_length + 1];
+ while (fgets(buffer, max_line_length, fp)) {
+ buffer[max_line_length] = '\0';
+ buffer_strcat(wb, buffer);
+ }
}
-}
-
-inline int group_by_label_isspace(char c) {
- if(c == ',' || c == '|')
- return 1;
-
- return 0;
-}
-
-bool isspace_map_pluginsd[256] = {};
-bool isspace_map_config[256] = {};
-bool isspace_map_group_by_label[256] = {};
-
-__attribute__((constructor)) void initialize_is_space_arrays(void) {
- for(int c = 0; c < 256 ; c++) {
- isspace_map_pluginsd[c] = pluginsd_isspace((char) c);
- isspace_map_config[c] = config_isspace((char) c);
- isspace_map_group_by_label[c] = group_by_label_isspace((char) c);
+ else {
+ buffer_free(wb);
+ netdata_log_error("Failed to execute command '%s'.", command);
+ return NULL;
}
+
+ netdata_pclose(NULL, fp, pid);
+ return wb;
}
bool run_command_and_copy_output_to_stdout(const char *command, int max_line_length) {
diff --git a/libnetdata/libnetdata.h b/libnetdata/libnetdata.h
index c337b3b5..d2ecfa15 100644
--- a/libnetdata/libnetdata.h
+++ b/libnetdata/libnetdata.h
@@ -11,8 +11,8 @@ extern "C" {
#include <config.h>
#endif
-#ifdef ENABLE_LZ4
-#define ENABLE_RRDPUSH_COMPRESSION 1
+#if defined(ENABLE_BROTLIENC) && defined(ENABLE_BROTLIDEC)
+#define ENABLE_BROTLI 1
#endif
#ifdef ENABLE_OPENSSL
@@ -201,6 +201,8 @@ extern "C" {
// ----------------------------------------------------------------------------
// netdata common definitions
+#define _cleanup_(x) __attribute__((__cleanup__(x)))
+
#ifdef __GNUC__
#define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
#endif // __GNUC__
@@ -559,7 +561,7 @@ extern int enable_ksm;
char *fgets_trim_len(char *buf, size_t buf_size, FILE *fp, size_t *len);
-int verify_netdata_host_prefix();
+int verify_netdata_host_prefix(bool log_msg);
int recursively_delete_dir(const char *path, const char *reason);
@@ -681,108 +683,13 @@ static inline BITMAPX *bitmapX_create(uint32_t bits) {
#define bitmap1024_get_bit(ptr, idx) bitmapX_get_bit((BITMAPX *)ptr, idx)
#define bitmap1024_set_bit(ptr, idx, value) bitmapX_set_bit((BITMAPX *)ptr, idx, value)
-
-#define COMPRESSION_MAX_MSG_SIZE 0x4000
-#define PLUGINSD_LINE_MAX (COMPRESSION_MAX_MSG_SIZE - 1024)
-int pluginsd_isspace(char c);
-int config_isspace(char c);
-int group_by_label_isspace(char c);
-
-extern bool isspace_map_pluginsd[256];
-extern bool isspace_map_config[256];
-extern bool isspace_map_group_by_label[256];
-
-static inline size_t quoted_strings_splitter(char *str, char **words, size_t max_words, bool *isspace_map) {
- char *s = str, quote = 0;
- size_t i = 0;
-
- // skip all white space
- while (unlikely(isspace_map[(uint8_t)*s]))
- s++;
-
- if(unlikely(!*s)) {
- words[i] = NULL;
- return 0;
- }
-
- // check for quote
- if (unlikely(*s == '\'' || *s == '"')) {
- quote = *s; // remember the quote
- s++; // skip the quote
- }
-
- // store the first word
- words[i++] = s;
-
- // while we have something
- while (likely(*s)) {
- // if it is an escape
- if (unlikely(*s == '\\' && s[1])) {
- s += 2;
- continue;
- }
-
- // if it is a quote
- else if (unlikely(*s == quote)) {
- quote = 0;
- *s = ' ';
- continue;
- }
-
- // if it is a space
- else if (unlikely(quote == 0 && isspace_map[(uint8_t)*s])) {
- // terminate the word
- *s++ = '\0';
-
- // skip all white space
- while (likely(isspace_map[(uint8_t)*s]))
- s++;
-
- // check for a quote
- if (unlikely(*s == '\'' || *s == '"')) {
- quote = *s; // remember the quote
- s++; // skip the quote
- }
-
- // if we reached the end, stop
- if (unlikely(!*s))
- break;
-
- // store the next word
- if (likely(i < max_words))
- words[i++] = s;
- else
- break;
- }
-
- // anything else
- else
- s++;
- }
-
- if (likely(i < max_words))
- words[i] = NULL;
-
- return i;
-}
-
-#define quoted_strings_splitter_query_group_by_label(str, words, max_words) \
- quoted_strings_splitter(str, words, max_words, isspace_map_group_by_label)
-
-#define quoted_strings_splitter_config(str, words, max_words) \
- quoted_strings_splitter(str, words, max_words, isspace_map_config)
-
-#define quoted_strings_splitter_pluginsd(str, words, max_words) \
- quoted_strings_splitter(str, words, max_words, isspace_map_pluginsd)
-
-static inline char *get_word(char **words, size_t num_words, size_t index) {
- if (unlikely(index >= num_words))
- return NULL;
-
- return words[index];
-}
+#define COMPRESSION_MAX_CHUNK 0x4000
+#define COMPRESSION_MAX_OVERHEAD 128
+#define COMPRESSION_MAX_MSG_SIZE (COMPRESSION_MAX_CHUNK - COMPRESSION_MAX_OVERHEAD - 1)
+#define PLUGINSD_LINE_MAX (COMPRESSION_MAX_MSG_SIZE - 768)
bool run_command_and_copy_output_to_stdout(const char *command, int max_line_length);
+struct web_buffer *run_command_and_get_output_to_buffer(const char *command, int max_line_length);
typedef enum {
OPEN_FD_ACTION_CLOSE,
@@ -802,6 +709,8 @@ extern char *netdata_configured_host_prefix;
#define XXH_INLINE_ALL
#include "xxhash.h"
+#include "uuid/uuid.h"
+
#include "libjudy/src/Judy.h"
#include "july/july.h"
#include "os.h"
@@ -811,7 +720,11 @@ extern char *netdata_configured_host_prefix;
#include "circular_buffer/circular_buffer.h"
#include "avl/avl.h"
#include "inlined.h"
+#include "line_splitter/line_splitter.h"
#include "clocks/clocks.h"
+#include "datetime/iso8601.h"
+#include "datetime/rfc3339.h"
+#include "datetime/rfc7231.h"
#include "completion/completion.h"
#include "popen/popen.h"
#include "simple_pattern/simple_pattern.h"
@@ -820,7 +733,9 @@ extern char *netdata_configured_host_prefix;
#endif
#include "socket/socket.h"
#include "config/appconfig.h"
+#include "log/journal.h"
#include "log/log.h"
+#include "buffered_reader/buffered_reader.h"
#include "procfile/procfile.h"
#include "string/string.h"
#include "dictionary/dictionary.h"
diff --git a/libnetdata/line_splitter/Makefile.am b/libnetdata/line_splitter/Makefile.am
new file mode 100644
index 00000000..161784b8
--- /dev/null
+++ b/libnetdata/line_splitter/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/line_splitter/README.md b/libnetdata/line_splitter/README.md
new file mode 100644
index 00000000..f811bb4b
--- /dev/null
+++ b/libnetdata/line_splitter/README.md
@@ -0,0 +1,14 @@
+<!--
+title: "Log"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/log/README.md
+sidebar_label: "Log"
+learn_status: "Published"
+learn_topic_type: "Tasks"
+learn_rel_path: "Developers/libnetdata"
+-->
+
+# Log
+
+The netdata log library supports debug, info, error and fatal error logging.
+By default we have an access log, an error log and a collectors log.
+
diff --git a/libnetdata/line_splitter/line_splitter.c b/libnetdata/line_splitter/line_splitter.c
new file mode 100644
index 00000000..a459d334
--- /dev/null
+++ b/libnetdata/line_splitter/line_splitter.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+
+bool line_splitter_reconstruct_line(BUFFER *wb, void *ptr) {
+ struct line_splitter *spl = ptr;
+ if(!spl) return false;
+
+ size_t added = 0;
+ for(size_t i = 0; i < spl->num_words ;i++) {
+ if(i) buffer_fast_strcat(wb, " ", 1);
+
+ buffer_fast_strcat(wb, "'", 1);
+ const char *s = get_word(spl->words, spl->num_words, i);
+ buffer_strcat(wb, s?s:"");
+ buffer_fast_strcat(wb, "'", 1);
+ added++;
+ }
+
+ return added > 0;
+}
+
+inline int pluginsd_isspace(char c) {
+ switch(c) {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ case '=':
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+inline int config_isspace(char c) {
+ switch (c) {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ case ',':
+ return 1;
+
+ default:
+ return 0;
+ }
+}
+
+inline int group_by_label_isspace(char c) {
+ if(c == ',' || c == '|')
+ return 1;
+
+ return 0;
+}
+
+bool isspace_map_pluginsd[256] = {};
+bool isspace_map_config[256] = {};
+bool isspace_map_group_by_label[256] = {};
+
+__attribute__((constructor)) void initialize_is_space_arrays(void) {
+ for(int c = 0; c < 256 ; c++) {
+ isspace_map_pluginsd[c] = pluginsd_isspace((char) c);
+ isspace_map_config[c] = config_isspace((char) c);
+ isspace_map_group_by_label[c] = group_by_label_isspace((char) c);
+ }
+}
diff --git a/libnetdata/line_splitter/line_splitter.h b/libnetdata/line_splitter/line_splitter.h
new file mode 100644
index 00000000..b5a59ad3
--- /dev/null
+++ b/libnetdata/line_splitter/line_splitter.h
@@ -0,0 +1,120 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#ifndef NETDATA_LINE_SPLITTER_H
+#define NETDATA_LINE_SPLITTER_H
+
+#define PLUGINSD_MAX_WORDS 30
+
+struct line_splitter {
+ size_t count; // counts number of lines
+ char *words[PLUGINSD_MAX_WORDS]; // an array of pointers for the words in this line
+ size_t num_words; // the number of pointers used in this line
+};
+
+bool line_splitter_reconstruct_line(BUFFER *wb, void *ptr);
+
+static inline void line_splitter_reset(struct line_splitter *line) {
+ line->num_words = 0;
+}
+
+int pluginsd_isspace(char c);
+int config_isspace(char c);
+int group_by_label_isspace(char c);
+
+extern bool isspace_map_pluginsd[256];
+extern bool isspace_map_config[256];
+extern bool isspace_map_group_by_label[256];
+
+static inline size_t quoted_strings_splitter(char *str, char **words, size_t max_words, bool *isspace_map) {
+ char *s = str, quote = 0;
+ size_t i = 0;
+
+ // skip all white space
+ while (unlikely(isspace_map[(uint8_t)*s]))
+ s++;
+
+ if(unlikely(!*s)) {
+ words[i] = NULL;
+ return 0;
+ }
+
+ // check for quote
+ if (unlikely(*s == '\'' || *s == '"')) {
+ quote = *s; // remember the quote
+ s++; // skip the quote
+ }
+
+ // store the first word
+ words[i++] = s;
+
+ // while we have something
+ while (likely(*s)) {
+ // if it is an escape
+ if (unlikely(*s == '\\' && s[1])) {
+ s += 2;
+ continue;
+ }
+
+ // if it is a quote
+ else if (unlikely(*s == quote)) {
+ quote = 0;
+ *s = ' ';
+ continue;
+ }
+
+ // if it is a space
+ else if (unlikely(quote == 0 && isspace_map[(uint8_t)*s])) {
+ // terminate the word
+ *s++ = '\0';
+
+ // skip all white space
+ while (likely(isspace_map[(uint8_t)*s]))
+ s++;
+
+ // check for a quote
+ if (unlikely(*s == '\'' || *s == '"')) {
+ quote = *s; // remember the quote
+ s++; // skip the quote
+ }
+
+ // if we reached the end, stop
+ if (unlikely(!*s))
+ break;
+
+ // store the next word
+ if (likely(i < max_words))
+ words[i++] = s;
+ else
+ break;
+ }
+
+ // anything else
+ else
+ s++;
+ }
+
+ if (likely(i < max_words))
+ words[i] = NULL;
+
+ return i;
+}
+
+#define quoted_strings_splitter_query_group_by_label(str, words, max_words) \
+ quoted_strings_splitter(str, words, max_words, isspace_map_group_by_label)
+
+#define quoted_strings_splitter_config(str, words, max_words) \
+ quoted_strings_splitter(str, words, max_words, isspace_map_config)
+
+#define quoted_strings_splitter_pluginsd(str, words, max_words) \
+ quoted_strings_splitter(str, words, max_words, isspace_map_pluginsd)
+
+static inline char *get_word(char **words, size_t num_words, size_t index) {
+ if (unlikely(index >= num_words))
+ return NULL;
+
+ return words[index];
+}
+
+#endif //NETDATA_LINE_SPLITTER_H
diff --git a/libnetdata/log/Makefile.am b/libnetdata/log/Makefile.am
index 161784b8..a02b8ebd 100644
--- a/libnetdata/log/Makefile.am
+++ b/libnetdata/log/Makefile.am
@@ -5,4 +5,5 @@ MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
dist_noinst_DATA = \
README.md \
+ systemd-cat-native.md \
$(NULL)
diff --git a/libnetdata/log/README.md b/libnetdata/log/README.md
index f811bb4b..d9ed6437 100644
--- a/libnetdata/log/README.md
+++ b/libnetdata/log/README.md
@@ -7,8 +7,198 @@ learn_topic_type: "Tasks"
learn_rel_path: "Developers/libnetdata"
-->
-# Log
+# Netdata Logging
-The netdata log library supports debug, info, error and fatal error logging.
-By default we have an access log, an error log and a collectors log.
+This document describes how Netdata generates its own logs, not how Netdata manages and queries logs databases.
+
+## Log sources
+
+Netdata supports the following log sources:
+
+1. **daemon**, logs generated by Netdata daemon.
+2. **collector**, logs generated by Netdata collectors, including internal and external ones.
+3. **access**, API requests received by Netdata
+4. **health**, all alert transitions and notifications
+
+## Log outputs
+
+For each log source, Netdata supports the following output methods:
+
+- **off**, to disable this log source
+- **journal**, to send the logs to systemd-journal.
+- **syslog**, to send the logs to syslog.
+- **system**, to send the output to `stderr` or `stdout` depending on the log source.
+- **stdout**, to write the logs to Netdata's `stdout`.
+- **stderr**, to write the logs to Netdata's `stderr`.
+- **filename**, to send the logs to a file.
+
+For `daemon` and `collector` the default is `journal` when systemd-journal is available.
+To decide if systemd-journal is available, Netdata checks:
+
+1. `stderr` is connected to systemd-journald
+2. `/run/systemd/journal/socket` exists
+3. `/host/run/systemd/journal/socket` exists (`/host` is configurable in containers)
+
+If any of the above is detected, Netdata will select `journal` for `daemon` and `collector` sources.
+
+All other sources default to a file.
+
+## Log formats
+
+| Format | Description |
+|---------|--------------------------------------------------------------------------------------------------------|
+| journal | journald-specific log format. Automatically selected when logging to systemd-journal. |
+| logfmt | logs data as a series of key/value pairs. The default when logging to any output other than `journal`. |
+| json | logs data in JSON format. |
+
+## Log levels
+
+Each time Netdata logs, it assigns a priority to the log. It can be one of this (in order of importance):
+
+| Level | Description |
+|-----------|----------------------------------------------------------------------------------------|
+| emergency | a fatal condition, Netdata will most likely exit immediately after. |
+| alert | a very important issue that may affect how Netdata operates. |
+| critical | a very important issue the user should know which, Netdata thinks it can survive. |
+| error | an error condition indicating that Netdata is trying to do something, but it fails. |
+| warning | something unexpected has happened that may or may not affect the operation of Netdata. |
+| notice | something that does not affect the operation of Netdata, but the user should notice. |
+| info | the default log level about information the user should know. |
+| debug | these are more verbose logs that can be ignored. |
+
+## Logs Configuration
+
+In `netdata.conf`, there are the following settings:
+
+```
+[logs]
+ # logs to trigger flood protection = 1000
+ # logs flood protection period = 60
+ # facility = daemon
+ # level = info
+ # daemon = journal
+ # collector = journal
+ # access = /var/log/netdata/access.log
+ # health = /var/log/netdata/health.log
+```
+
+- `logs to trigger flood protection` and `logs flood protection period` enable logs flood protection for `daemon` and `collector` sources. It can also be configured per log source.
+- `facility` is used only when Netdata logs to syslog.
+- `level` defines the minimum [log level](#log-levels) of logs that will be logged. This setting is applied only to `daemon` and `collector` sources. It can also be configured per source.
+
+### Configuring log sources
+
+Each for the sources (`daemon`, `collector`, `access`, `health`), accepts the following:
+
+```
+source = {FORMAT},level={LEVEL},protection={LOG}/{PERIOD}@{OUTPUT}
+```
+
+Where:
+
+- `{FORMAT}`, is one of the [log formats](#log-formats),
+- `{LEVEL}`, is the minimum [log level](#log-levels) to be logged,
+- `{LOGS}` is the number of `logs to trigger flood protection` configured per output,
+- `{PERIOD}` is the equivalent of `logs flood protection period` configured per output,
+- `{OUTPUT}` is one of the `[log outputs](#log-outputs),
+
+All parameters can be omitted, except `{OUTPUT}`. If `{OUTPUT}` is the only given parameter, `@` can be omitted.
+
+### Logs rotation
+
+Netdata comes with `logrotate` configuration to rotate its log files periodically.
+
+The default is usually found in `/etc/logrotate.d/netdata`.
+
+Sending a `SIGHUP` to Netdata, will instruct it to re-open all its log files.
+
+## Log Fields
+
+Netdata exposes the following fields to its logs:
+
+| journal | logfmt | json | Description |
+|:--------------------------------------:|:------------------------------:|:------------------------------:|:---------------------------------------------------------------------------------------------------------:|
+| `_SOURCE_REALTIME_TIMESTAMP` | `time` | `time` | the timestamp of the event |
+| `SYSLOG_IDENTIFIER` | `comm` | `comm` | the program logging the event |
+| `ND_LOG_SOURCE` | `source` | `source` | one of the [log sources](#log-sources) |
+| `PRIORITY`<br/>numeric | `level`<br/>text | `level`<br/>numeric | one of the [log levels](#log-levels) |
+| `ERRNO` | `errno` | `errno` | the numeric value of `errno` |
+| `INVOCATION_ID` | - | - | a unique UUID of the Netdata session, reset on every Netdata restart, inherited by systemd when available |
+| `CODE_LINE` | - | - | the line number of of the source code logging this event |
+| `CODE_FILE` | - | - | the filename of the source code logging this event |
+| `CODE_FUNCTION` | - | - | the function name of the source code logging this event |
+| `TID` | `tid` | `tid` | the thread id of the thread logging this event |
+| `THREAD_TAG` | `thread` | `thread` | the name of the thread logging this event |
+| `MESSAGE_ID` | `msg_id` | `msg_id` | see [message IDs](#message-ids) |
+| `ND_MODULE` | `module` | `module` | the Netdata module logging this event |
+| `ND_NIDL_NODE` | `node` | `node` | the hostname of the node the event is related to |
+| `ND_NIDL_INSTANCE` | `instance` | `instance` | the instance of the node the event is related to |
+| `ND_NIDL_CONTEXT` | `context` | `context` | the context the event is related to (this is usually the chart name, as shown on netdata dashboards |
+| `ND_NIDL_DIMENSION` | `dimension` | `dimension` | the dimension the event is related to |
+| `ND_SRC_TRANSPORT` | `src_transport` | `src_transport` | when the event happened during a request, this is the request transport |
+| `ND_SRC_IP` | `src_ip` | `src_ip` | when the event happened during an inbound request, this is the IP the request came from |
+| `ND_SRC_PORT` | `src_port` | `src_port` | when the event happened during an inbound request, this is the port the request came from |
+| `ND_SRC_CAPABILITIES` | `src_capabilities` | `src_capabilities` | when the request came from a child, this is the communication capabilities of the child |
+| `ND_DST_TRANSPORT` | `dst_transport` | `dst_transport` | when the event happened during an outbound request, this is the outbound request transport |
+| `ND_DST_IP` | `dst_ip` | `dst_ip` | when the event happened during an outbound request, this is the IP the request destination |
+| `ND_DST_PORT` | `dst_port` | `dst_port` | when the event happened during an outbound request, this is the port the request destination |
+| `ND_DST_CAPABILITIES` | `dst_capabilities` | `dst_capabilities` | when the request goes to a parent, this is the communication capabilities of the parent |
+| `ND_REQUEST_METHOD` | `req_method` | `req_method` | when the event happened during an inbound request, this is the method the request was received |
+| `ND_RESPONSE_CODE` | `code` | `code` | when responding to a request, this this the response code |
+| `ND_CONNECTION_ID` | `conn` | `conn` | when there is a connection id for an inbound connection, this is the connection id |
+| `ND_TRANSACTION_ID` | `transaction` | `transaction` | the transaction id (UUID) of all API requests |
+| `ND_RESPONSE_SENT_BYTES` | `sent_bytes` | `sent_bytes` | the bytes we sent to API responses |
+| `ND_RESPONSE_SIZE_BYTES` | `size_bytes` | `size_bytes` | the uncompressed bytes of the API responses |
+| `ND_RESPONSE_PREP_TIME_USEC` | `prep_ut` | `prep_ut` | the time needed to prepare a response |
+| `ND_RESPONSE_SENT_TIME_USEC` | `sent_ut` | `sent_ut` | the time needed to send a response |
+| `ND_RESPONSE_TOTAL_TIME_USEC` | `total_ut` | `total_ut` | the total time needed to complete a response |
+| `ND_ALERT_ID` | `alert_id` | `alert_id` | the alert id this event is related to |
+| `ND_ALERT_EVENT_ID` | `alert_event_id` | `alert_event_id` | a sequential number of the alert transition (per host) |
+| `ND_ALERT_UNIQUE_ID` | `alert_unique_id` | `alert_unique_id` | a sequential number of the alert transition (per alert) |
+| `ND_ALERT_TRANSITION_ID` | `alert_transition_id` | `alert_transition_id` | the unique UUID of this alert transition |
+| `ND_ALERT_CONFIG` | `alert_config` | `alert_config` | the alert configuration hash (UUID) |
+| `ND_ALERT_NAME` | `alert` | `alert` | the alert name |
+| `ND_ALERT_CLASS` | `alert_class` | `alert_class` | the alert classification |
+| `ND_ALERT_COMPONENT` | `alert_component` | `alert_component` | the alert component |
+| `ND_ALERT_TYPE` | `alert_type` | `alert_type` | the alert type |
+| `ND_ALERT_EXEC` | `alert_exec` | `alert_exec` | the alert notification program |
+| `ND_ALERT_RECIPIENT` | `alert_recipient` | `alert_recipient` | the alert recipient(s) |
+| `ND_ALERT_VALUE` | `alert_value` | `alert_value` | the current alert value |
+| `ND_ALERT_VALUE_OLD` | `alert_value_old` | `alert_value_old` | the previous alert value |
+| `ND_ALERT_STATUS` | `alert_status` | `alert_status` | the current alert status |
+| `ND_ALERT_STATUS_OLD` | `alert_value_old` | `alert_value_old` | the previous alert value |
+| `ND_ALERT_UNITS` | `alert_units` | `alert_units` | the units of the alert |
+| `ND_ALERT_SUMMARY` | `alert_summary` | `alert_summary` | the summary text of the alert |
+| `ND_ALERT_INFO` | `alert_info` | `alert_info` | the info text of the alert |
+| `ND_ALERT_DURATION` | `alert_duration` | `alert_duration` | the duration the alert was in its previous state |
+| `ND_ALERT_NOTIFICATION_TIMESTAMP_USEC` | `alert_notification_timestamp` | `alert_notification_timestamp` | the timestamp the notification delivery is scheduled |
+| `ND_REQUEST` | `request` | `request` | the full request during which the event happened |
+| `MESSAGE` | `msg` | `msg` | the event message |
+
+
+### Message IDs
+
+Netdata assigns specific message IDs to certain events:
+
+- `ed4cdb8f1beb4ad3b57cb3cae2d162fa` when a Netdata child connects to this Netdata
+- `6e2e3839067648968b646045dbf28d66` when this Netdata connects to a Netdata parent
+- `9ce0cb58ab8b44df82c4bf1ad9ee22de` when alerts change state
+- `6db0018e83e34320ae2a659d78019fb7` when notifications are sent
+
+You can view these events using the Netdata systemd-journal.plugin at the `MESSAGE_ID` filter,
+or using `journalctl` like this:
+
+```bash
+# query children connection
+journalctl MESSAGE_ID=ed4cdb8f1beb4ad3b57cb3cae2d162fa
+
+# query parent connection
+journalctl MESSAGE_ID=6e2e3839067648968b646045dbf28d66
+
+# query alert transitions
+journalctl MESSAGE_ID=9ce0cb58ab8b44df82c4bf1ad9ee22de
+
+# query alert notifications
+journalctl MESSAGE_ID=6db0018e83e34320ae2a659d78019fb7
+```
diff --git a/libnetdata/log/journal.c b/libnetdata/log/journal.c
new file mode 100644
index 00000000..21978cf5
--- /dev/null
+++ b/libnetdata/log/journal.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "journal.h"
+
+bool is_path_unix_socket(const char *path) {
+ if(!path || !*path)
+ return false;
+
+ struct stat statbuf;
+
+ // Check if the path is valid
+ if (!path || !*path)
+ return false;
+
+ // Use stat to check if the file exists and is a socket
+ if (stat(path, &statbuf) == -1)
+ // The file does not exist or cannot be accessed
+ return false;
+
+ // Check if the file is a socket
+ if (S_ISSOCK(statbuf.st_mode))
+ return true;
+
+ return false;
+}
+
+bool is_stderr_connected_to_journal(void) {
+ const char *journal_stream = getenv("JOURNAL_STREAM");
+ if (!journal_stream)
+ return false; // JOURNAL_STREAM is not set
+
+ struct stat stderr_stat;
+ if (fstat(STDERR_FILENO, &stderr_stat) < 0)
+ return false; // Error in getting stderr info
+
+ // Parse device and inode from JOURNAL_STREAM
+ char *endptr;
+ long journal_dev = strtol(journal_stream, &endptr, 10);
+ if (*endptr != ':')
+ return false; // Format error in JOURNAL_STREAM
+
+ long journal_ino = strtol(endptr + 1, NULL, 10);
+
+ return (stderr_stat.st_dev == (dev_t)journal_dev) && (stderr_stat.st_ino == (ino_t)journal_ino);
+}
+
+int journal_direct_fd(const char *path) {
+ if(!path || !*path)
+ path = JOURNAL_DIRECT_SOCKET;
+
+ if(!is_path_unix_socket(path))
+ return -1;
+
+ int fd = socket(AF_UNIX, SOCK_DGRAM, 0);
+ if (fd < 0) return -1;
+
+ struct sockaddr_un addr;
+ memset(&addr, 0, sizeof(struct sockaddr_un));
+ addr.sun_family = AF_UNIX;
+ strncpy(addr.sun_path, path, sizeof(addr.sun_path) - 1);
+
+ // Connect the socket (optional, but can simplify send operations)
+ if (connect(fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
+ close(fd);
+ return -1;
+ }
+
+ return fd;
+}
+
+static inline bool journal_send_with_memfd(int fd, const char *msg, size_t msg_len) {
+#if defined(__NR_memfd_create) && defined(MFD_ALLOW_SEALING) && defined(F_ADD_SEALS) && defined(F_SEAL_SHRINK) && defined(F_SEAL_GROW) && defined(F_SEAL_WRITE)
+ // Create a memory file descriptor
+ int memfd = (int)syscall(__NR_memfd_create, "journald", MFD_ALLOW_SEALING);
+ if (memfd < 0) return false;
+
+ // Write data to the memfd
+ if (write(memfd, msg, msg_len) != (ssize_t)msg_len) {
+ close(memfd);
+ return false;
+ }
+
+ // Seal the memfd to make it immutable
+ if (fcntl(memfd, F_ADD_SEALS, F_SEAL_SHRINK | F_SEAL_GROW | F_SEAL_WRITE) < 0) {
+ close(memfd);
+ return false;
+ }
+
+ struct iovec iov = {0};
+ struct msghdr msghdr = {0};
+ struct cmsghdr *cmsghdr;
+ char cmsgbuf[CMSG_SPACE(sizeof(int))];
+
+ msghdr.msg_iov = &iov;
+ msghdr.msg_iovlen = 1;
+ msghdr.msg_control = cmsgbuf;
+ msghdr.msg_controllen = sizeof(cmsgbuf);
+
+ cmsghdr = CMSG_FIRSTHDR(&msghdr);
+ cmsghdr->cmsg_level = SOL_SOCKET;
+ cmsghdr->cmsg_type = SCM_RIGHTS;
+ cmsghdr->cmsg_len = CMSG_LEN(sizeof(int));
+ memcpy(CMSG_DATA(cmsghdr), &memfd, sizeof(int));
+
+ ssize_t r = sendmsg(fd, &msghdr, 0);
+
+ close(memfd);
+ return r >= 0;
+#else
+ return false;
+#endif
+}
+
+bool journal_direct_send(int fd, const char *msg, size_t msg_len) {
+ // Send the datagram
+ if (send(fd, msg, msg_len, 0) < 0) {
+ if(errno != EMSGSIZE)
+ return false;
+
+ // datagram is too large, fallback to memfd
+ if(!journal_send_with_memfd(fd, msg, msg_len))
+ return false;
+ }
+
+ return true;
+}
+
+void journal_construct_path(char *dst, size_t dst_len, const char *host_prefix, const char *namespace_str) {
+ if(!host_prefix)
+ host_prefix = "";
+
+ if(namespace_str)
+ snprintfz(dst, dst_len, "%s/run/systemd/journal.%s/socket",
+ host_prefix, namespace_str);
+ else
+ snprintfz(dst, dst_len, "%s" JOURNAL_DIRECT_SOCKET,
+ host_prefix);
+}
diff --git a/libnetdata/log/journal.h b/libnetdata/log/journal.h
new file mode 100644
index 00000000..df8ece18
--- /dev/null
+++ b/libnetdata/log/journal.h
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#ifndef NETDATA_LOG_JOURNAL_H
+#define NETDATA_LOG_JOURNAL_H
+
+#define JOURNAL_DIRECT_SOCKET "/run/systemd/journal/socket"
+
+void journal_construct_path(char *dst, size_t dst_len, const char *host_prefix, const char *namespace_str);
+
+int journal_direct_fd(const char *path);
+bool journal_direct_send(int fd, const char *msg, size_t msg_len);
+
+bool is_path_unix_socket(const char *path);
+bool is_stderr_connected_to_journal(void);
+
+#endif //NETDATA_LOG_JOURNAL_H
diff --git a/libnetdata/log/log.c b/libnetdata/log/log.c
index 02bb776c..c805716c 100644
--- a/libnetdata/log/log.c
+++ b/libnetdata/log/log.c
@@ -1,1038 +1,2305 @@
// SPDX-License-Identifier: GPL-3.0-or-later
-#include <daemon/main.h>
+#define SD_JOURNAL_SUPPRESS_LOCATION
+
#include "../libnetdata.h"
+#include <daemon/main.h>
+
+#ifdef __FreeBSD__
+#include <sys/endian.h>
+#endif
+
+#ifdef __APPLE__
+#include <machine/endian.h>
+#endif
#ifdef HAVE_BACKTRACE
#include <execinfo.h>
#endif
-int web_server_is_multithreaded = 1;
+#ifdef HAVE_SYSTEMD
+#include <systemd/sd-journal.h>
+#endif
+
+#include <syslog.h>
const char *program_name = "";
+
uint64_t debug_flags = 0;
-int access_log_syslog = 0;
-int error_log_syslog = 0;
-int collector_log_syslog = 0;
-int output_log_syslog = 0; // debug log
-int health_log_syslog = 0;
+#ifdef ENABLE_ACLK
+int aclklog_enabled = 0;
+#endif
+
+// ----------------------------------------------------------------------------
+
+struct nd_log_source;
+static bool nd_log_limit_reached(struct nd_log_source *source);
-int stdaccess_fd = -1;
-FILE *stdaccess = NULL;
+// ----------------------------------------------------------------------------
+// logging method
+
+typedef enum __attribute__((__packed__)) {
+ NDLM_DISABLED = 0,
+ NDLM_DEVNULL,
+ NDLM_DEFAULT,
+ NDLM_JOURNAL,
+ NDLM_SYSLOG,
+ NDLM_STDOUT,
+ NDLM_STDERR,
+ NDLM_FILE,
+} ND_LOG_METHOD;
+
+static struct {
+ ND_LOG_METHOD method;
+ const char *name;
+} nd_log_methods[] = {
+ { .method = NDLM_DISABLED, .name = "none" },
+ { .method = NDLM_DEVNULL, .name = "/dev/null" },
+ { .method = NDLM_DEFAULT, .name = "default" },
+ { .method = NDLM_JOURNAL, .name = "journal" },
+ { .method = NDLM_SYSLOG, .name = "syslog" },
+ { .method = NDLM_STDOUT, .name = "stdout" },
+ { .method = NDLM_STDERR, .name = "stderr" },
+ { .method = NDLM_FILE, .name = "file" },
+};
+
+static ND_LOG_METHOD nd_log_method2id(const char *method) {
+ if(!method || !*method)
+ return NDLM_DEFAULT;
+
+ size_t entries = sizeof(nd_log_methods) / sizeof(nd_log_methods[0]);
+ for(size_t i = 0; i < entries ;i++) {
+ if(strcmp(nd_log_methods[i].name, method) == 0)
+ return nd_log_methods[i].method;
+ }
-int stdhealth_fd = -1;
-FILE *stdhealth = NULL;
+ return NDLM_FILE;
+}
-int stdcollector_fd = -1;
-FILE *stderror = NULL;
+static const char *nd_log_id2method(ND_LOG_METHOD method) {
+ size_t entries = sizeof(nd_log_methods) / sizeof(nd_log_methods[0]);
+ for(size_t i = 0; i < entries ;i++) {
+ if(method == nd_log_methods[i].method)
+ return nd_log_methods[i].name;
+ }
-const char *stdaccess_filename = NULL;
-const char *stderr_filename = NULL;
-const char *stdout_filename = NULL;
-const char *facility_log = NULL;
-const char *stdhealth_filename = NULL;
-const char *stdcollector_filename = NULL;
+ return "unknown";
+}
-netdata_log_level_t global_log_severity_level = NETDATA_LOG_LEVEL_INFO;
+#define IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(ndlo) ((ndlo) == NDLM_JOURNAL || (ndlo) == NDLM_SYSLOG || (ndlo) == NDLM_STDERR)
-#ifdef ENABLE_ACLK
-const char *aclklog_filename = NULL;
-int aclklog_fd = -1;
-FILE *aclklog = NULL;
-int aclklog_syslog = 1;
-int aclklog_enabled = 0;
+const char *nd_log_method_for_external_plugins(const char *s) {
+ if(s && *s) {
+ ND_LOG_METHOD method = nd_log_method2id(s);
+ if(IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(method))
+ return nd_log_id2method(method);
+ }
+
+ return nd_log_id2method(NDLM_STDERR);
+}
+
+// ----------------------------------------------------------------------------
+// workaround strerror_r()
+
+#if defined(STRERROR_R_CHAR_P)
+// GLIBC version of strerror_r
+static const char *strerror_result(const char *a, const char *b) { (void)b; return a; }
+#elif defined(HAVE_STRERROR_R)
+// POSIX version of strerror_r
+static const char *strerror_result(int a, const char *b) { (void)a; return b; }
+#elif defined(HAVE_C__GENERIC)
+
+// what a trick!
+// http://stackoverflow.com/questions/479207/function-overloading-in-c
+static const char *strerror_result_int(int a, const char *b) { (void)a; return b; }
+static const char *strerror_result_string(const char *a, const char *b) { (void)b; return a; }
+
+#define strerror_result(a, b) _Generic((a), \
+ int: strerror_result_int, \
+ char *: strerror_result_string \
+ )(a, b)
+
+#else
+#error "cannot detect the format of function strerror_r()"
#endif
+static const char *errno2str(int errnum, char *buf, size_t size) {
+ return strerror_result(strerror_r(errnum, buf, size), buf);
+}
+
// ----------------------------------------------------------------------------
-// Log facility(https://tools.ietf.org/html/rfc5424)
+// facilities
//
-// The facilities accepted in the Netdata are in according with the following
-// header files for their respective operating system:
-// sys/syslog.h (Linux )
+// sys/syslog.h (Linux)
// sys/sys/syslog.h (FreeBSD)
// bsd/sys/syslog.h (darwin-xnu)
-#define LOG_AUTH_KEY "auth"
-#define LOG_AUTHPRIV_KEY "authpriv"
-#ifdef __FreeBSD__
-# define LOG_CONSOLE_KEY "console"
-#endif
-#define LOG_CRON_KEY "cron"
-#define LOG_DAEMON_KEY "daemon"
-#define LOG_FTP_KEY "ftp"
-#ifdef __APPLE__
-# define LOG_INSTALL_KEY "install"
-#endif
-#define LOG_KERN_KEY "kern"
-#define LOG_LPR_KEY "lpr"
-#define LOG_MAIL_KEY "mail"
-//#define LOG_INTERNAL_MARK_KEY "mark"
-#ifdef __APPLE__
-# define LOG_NETINFO_KEY "netinfo"
-# define LOG_RAS_KEY "ras"
-# define LOG_REMOTEAUTH_KEY "remoteauth"
-#endif
-#define LOG_NEWS_KEY "news"
-#ifdef __FreeBSD__
-# define LOG_NTP_KEY "ntp"
-#endif
-#define LOG_SECURITY_KEY "security"
-#define LOG_SYSLOG_KEY "syslog"
-#define LOG_USER_KEY "user"
-#define LOG_UUCP_KEY "uucp"
-#ifdef __APPLE__
-# define LOG_LAUNCHD_KEY "launchd"
-#endif
-#define LOG_LOCAL0_KEY "local0"
-#define LOG_LOCAL1_KEY "local1"
-#define LOG_LOCAL2_KEY "local2"
-#define LOG_LOCAL3_KEY "local3"
-#define LOG_LOCAL4_KEY "local4"
-#define LOG_LOCAL5_KEY "local5"
-#define LOG_LOCAL6_KEY "local6"
-#define LOG_LOCAL7_KEY "local7"
-
-static int log_facility_id(const char *facility_name)
-{
- static int
- hash_auth = 0,
- hash_authpriv = 0,
-#ifdef __FreeBSD__
- hash_console = 0,
-#endif
- hash_cron = 0,
- hash_daemon = 0,
- hash_ftp = 0,
-#ifdef __APPLE__
- hash_install = 0,
-#endif
- hash_kern = 0,
- hash_lpr = 0,
- hash_mail = 0,
-// hash_mark = 0,
-#ifdef __APPLE__
- hash_netinfo = 0,
- hash_ras = 0,
- hash_remoteauth = 0,
-#endif
- hash_news = 0,
-#ifdef __FreeBSD__
- hash_ntp = 0,
-#endif
- hash_security = 0,
- hash_syslog = 0,
- hash_user = 0,
- hash_uucp = 0,
-#ifdef __APPLE__
- hash_launchd = 0,
-#endif
- hash_local0 = 0,
- hash_local1 = 0,
- hash_local2 = 0,
- hash_local3 = 0,
- hash_local4 = 0,
- hash_local5 = 0,
- hash_local6 = 0,
- hash_local7 = 0;
-
- if(unlikely(!hash_auth))
- {
- hash_auth = simple_hash(LOG_AUTH_KEY);
- hash_authpriv = simple_hash(LOG_AUTHPRIV_KEY);
-#ifdef __FreeBSD__
- hash_console = simple_hash(LOG_CONSOLE_KEY);
-#endif
- hash_cron = simple_hash(LOG_CRON_KEY);
- hash_daemon = simple_hash(LOG_DAEMON_KEY);
- hash_ftp = simple_hash(LOG_FTP_KEY);
-#ifdef __APPLE__
- hash_install = simple_hash(LOG_INSTALL_KEY);
-#endif
- hash_kern = simple_hash(LOG_KERN_KEY);
- hash_lpr = simple_hash(LOG_LPR_KEY);
- hash_mail = simple_hash(LOG_MAIL_KEY);
-// hash_mark = simple_uhash();
-#ifdef __APPLE__
- hash_netinfo = simple_hash(LOG_NETINFO_KEY);
- hash_ras = simple_hash(LOG_RAS_KEY);
- hash_remoteauth = simple_hash(LOG_REMOTEAUTH_KEY);
-#endif
- hash_news = simple_hash(LOG_NEWS_KEY);
+static struct {
+ int facility;
+ const char *name;
+} nd_log_facilities[] = {
+ { LOG_AUTH, "auth" },
+ { LOG_AUTHPRIV, "authpriv" },
+ { LOG_CRON, "cron" },
+ { LOG_DAEMON, "daemon" },
+ { LOG_FTP, "ftp" },
+ { LOG_KERN, "kern" },
+ { LOG_LPR, "lpr" },
+ { LOG_MAIL, "mail" },
+ { LOG_NEWS, "news" },
+ { LOG_SYSLOG, "syslog" },
+ { LOG_USER, "user" },
+ { LOG_UUCP, "uucp" },
+ { LOG_LOCAL0, "local0" },
+ { LOG_LOCAL1, "local1" },
+ { LOG_LOCAL2, "local2" },
+ { LOG_LOCAL3, "local3" },
+ { LOG_LOCAL4, "local4" },
+ { LOG_LOCAL5, "local5" },
+ { LOG_LOCAL6, "local6" },
+ { LOG_LOCAL7, "local7" },
+
#ifdef __FreeBSD__
- hash_ntp = simple_hash(LOG_NTP_KEY);
+ { LOG_CONSOLE, "console" },
+ { LOG_NTP, "ntp" },
+
+ // FreeBSD does not consider 'security' as deprecated.
+ { LOG_SECURITY, "security" },
+#else
+ // For all other O/S 'security' is mapped to 'auth'.
+ { LOG_AUTH, "security" },
#endif
- hash_security = simple_hash(LOG_SECURITY_KEY);
- hash_syslog = simple_hash(LOG_SYSLOG_KEY);
- hash_user = simple_hash(LOG_USER_KEY);
- hash_uucp = simple_hash(LOG_UUCP_KEY);
+
#ifdef __APPLE__
- hash_launchd = simple_hash(LOG_LAUNCHD_KEY);
+ { LOG_INSTALL, "install" },
+ { LOG_NETINFO, "netinfo" },
+ { LOG_RAS, "ras" },
+ { LOG_REMOTEAUTH, "remoteauth" },
+ { LOG_LAUNCHD, "launchd" },
+
#endif
- hash_local0 = simple_hash(LOG_LOCAL0_KEY);
- hash_local1 = simple_hash(LOG_LOCAL1_KEY);
- hash_local2 = simple_hash(LOG_LOCAL2_KEY);
- hash_local3 = simple_hash(LOG_LOCAL3_KEY);
- hash_local4 = simple_hash(LOG_LOCAL4_KEY);
- hash_local5 = simple_hash(LOG_LOCAL5_KEY);
- hash_local6 = simple_hash(LOG_LOCAL6_KEY);
- hash_local7 = simple_hash(LOG_LOCAL7_KEY);
+};
+
+static int nd_log_facility2id(const char *facility) {
+ size_t entries = sizeof(nd_log_facilities) / sizeof(nd_log_facilities[0]);
+ for(size_t i = 0; i < entries ;i++) {
+ if(strcmp(nd_log_facilities[i].name, facility) == 0)
+ return nd_log_facilities[i].facility;
}
- int hash = simple_hash(facility_name);
- if ( hash == hash_auth )
- {
- return LOG_AUTH;
+ return LOG_DAEMON;
+}
+
+static const char *nd_log_id2facility(int facility) {
+ size_t entries = sizeof(nd_log_facilities) / sizeof(nd_log_facilities[0]);
+ for(size_t i = 0; i < entries ;i++) {
+ if(nd_log_facilities[i].facility == facility)
+ return nd_log_facilities[i].name;
}
- else if ( hash == hash_authpriv )
- {
- return LOG_AUTHPRIV;
+
+ return "daemon";
+}
+
+// ----------------------------------------------------------------------------
+// priorities
+
+static struct {
+ ND_LOG_FIELD_PRIORITY priority;
+ const char *name;
+} nd_log_priorities[] = {
+ { .priority = NDLP_EMERG, .name = "emergency" },
+ { .priority = NDLP_EMERG, .name = "emerg" },
+ { .priority = NDLP_ALERT, .name = "alert" },
+ { .priority = NDLP_CRIT, .name = "critical" },
+ { .priority = NDLP_CRIT, .name = "crit" },
+ { .priority = NDLP_ERR, .name = "error" },
+ { .priority = NDLP_ERR, .name = "err" },
+ { .priority = NDLP_WARNING, .name = "warning" },
+ { .priority = NDLP_WARNING, .name = "warn" },
+ { .priority = NDLP_NOTICE, .name = "notice" },
+ { .priority = NDLP_INFO, .name = NDLP_INFO_STR },
+ { .priority = NDLP_DEBUG, .name = "debug" },
+};
+
+int nd_log_priority2id(const char *priority) {
+ size_t entries = sizeof(nd_log_priorities) / sizeof(nd_log_priorities[0]);
+ for(size_t i = 0; i < entries ;i++) {
+ if(strcmp(nd_log_priorities[i].name, priority) == 0)
+ return nd_log_priorities[i].priority;
}
-#ifdef __FreeBSD__
- else if ( hash == hash_console )
- {
- return LOG_CONSOLE;
+
+ return NDLP_INFO;
+}
+
+const char *nd_log_id2priority(ND_LOG_FIELD_PRIORITY priority) {
+ size_t entries = sizeof(nd_log_priorities) / sizeof(nd_log_priorities[0]);
+ for(size_t i = 0; i < entries ;i++) {
+ if(priority == nd_log_priorities[i].priority)
+ return nd_log_priorities[i].name;
}
-#endif
- else if ( hash == hash_cron )
- {
- return LOG_CRON;
+
+ return NDLP_INFO_STR;
+}
+
+// ----------------------------------------------------------------------------
+// log sources
+
+const char *nd_log_sources[] = {
+ [NDLS_UNSET] = "UNSET",
+ [NDLS_ACCESS] = "access",
+ [NDLS_ACLK] = "aclk",
+ [NDLS_COLLECTORS] = "collector",
+ [NDLS_DAEMON] = "daemon",
+ [NDLS_HEALTH] = "health",
+ [NDLS_DEBUG] = "debug",
+};
+
+size_t nd_log_source2id(const char *source, ND_LOG_SOURCES def) {
+ size_t entries = sizeof(nd_log_sources) / sizeof(nd_log_sources[0]);
+ for(size_t i = 0; i < entries ;i++) {
+ if(strcmp(nd_log_sources[i], source) == 0)
+ return i;
}
- else if ( hash == hash_daemon )
- {
- return LOG_DAEMON;
+
+ return def;
+}
+
+
+static const char *nd_log_id2source(ND_LOG_SOURCES source) {
+ size_t entries = sizeof(nd_log_sources) / sizeof(nd_log_sources[0]);
+ if(source < entries)
+ return nd_log_sources[source];
+
+ return nd_log_sources[NDLS_COLLECTORS];
+}
+
+// ----------------------------------------------------------------------------
+// log output formats
+
+typedef enum __attribute__((__packed__)) {
+ NDLF_JOURNAL,
+ NDLF_LOGFMT,
+ NDLF_JSON,
+} ND_LOG_FORMAT;
+
+static struct {
+ ND_LOG_FORMAT format;
+ const char *name;
+} nd_log_formats[] = {
+ { .format = NDLF_JOURNAL, .name = "journal" },
+ { .format = NDLF_LOGFMT, .name = "logfmt" },
+ { .format = NDLF_JSON, .name = "json" },
+};
+
+static ND_LOG_FORMAT nd_log_format2id(const char *format) {
+ if(!format || !*format)
+ return NDLF_LOGFMT;
+
+ size_t entries = sizeof(nd_log_formats) / sizeof(nd_log_formats[0]);
+ for(size_t i = 0; i < entries ;i++) {
+ if(strcmp(nd_log_formats[i].name, format) == 0)
+ return nd_log_formats[i].format;
}
- else if ( hash == hash_ftp )
- {
- return LOG_FTP;
+
+ return NDLF_LOGFMT;
+}
+
+static const char *nd_log_id2format(ND_LOG_FORMAT format) {
+ size_t entries = sizeof(nd_log_formats) / sizeof(nd_log_formats[0]);
+ for(size_t i = 0; i < entries ;i++) {
+ if(format == nd_log_formats[i].format)
+ return nd_log_formats[i].name;
}
-#ifdef __APPLE__
- else if ( hash == hash_install )
- {
- return LOG_INSTALL;
+
+ return "logfmt";
+}
+
+// ----------------------------------------------------------------------------
+// format dates
+
+void log_date(char *buffer, size_t len, time_t now) {
+ if(unlikely(!buffer || !len))
+ return;
+
+ time_t t = now;
+ struct tm *tmp, tmbuf;
+
+ tmp = localtime_r(&t, &tmbuf);
+
+ if (unlikely(!tmp)) {
+ buffer[0] = '\0';
+ return;
}
-#endif
- else if ( hash == hash_kern )
- {
- return LOG_KERN;
+
+ if (unlikely(strftime(buffer, len, "%Y-%m-%d %H:%M:%S", tmp) == 0))
+ buffer[0] = '\0';
+
+ buffer[len - 1] = '\0';
+}
+
+// ----------------------------------------------------------------------------
+
+struct nd_log_limit {
+ usec_t started_monotonic_ut;
+ uint32_t counter;
+ uint32_t prevented;
+
+ uint32_t throttle_period;
+ uint32_t logs_per_period;
+ uint32_t logs_per_period_backup;
+};
+
+#define ND_LOG_LIMITS_DEFAULT (struct nd_log_limit){ .logs_per_period = ND_LOG_DEFAULT_THROTTLE_LOGS, .logs_per_period_backup = ND_LOG_DEFAULT_THROTTLE_LOGS, .throttle_period = ND_LOG_DEFAULT_THROTTLE_PERIOD, }
+#define ND_LOG_LIMITS_UNLIMITED (struct nd_log_limit){ .logs_per_period = 0, .logs_per_period_backup = 0, .throttle_period = 0, }
+
+struct nd_log_source {
+ SPINLOCK spinlock;
+ ND_LOG_METHOD method;
+ ND_LOG_FORMAT format;
+ const char *filename;
+ int fd;
+ FILE *fp;
+
+ ND_LOG_FIELD_PRIORITY min_priority;
+ const char *pending_msg;
+ struct nd_log_limit limits;
+};
+
+static __thread ND_LOG_SOURCES overwrite_thread_source = 0;
+
+void nd_log_set_thread_source(ND_LOG_SOURCES source) {
+ overwrite_thread_source = source;
+}
+
+static struct {
+ uuid_t invocation_id;
+
+ ND_LOG_SOURCES overwrite_process_source;
+
+ struct nd_log_source sources[_NDLS_MAX];
+
+ struct {
+ bool initialized;
+ } journal;
+
+ struct {
+ bool initialized;
+ int fd;
+ char filename[FILENAME_MAX + 1];
+ } journal_direct;
+
+ struct {
+ bool initialized;
+ int facility;
+ } syslog;
+
+ struct {
+ SPINLOCK spinlock;
+ bool initialized;
+ } std_output;
+
+ struct {
+ SPINLOCK spinlock;
+ bool initialized;
+ } std_error;
+
+} nd_log = {
+ .overwrite_process_source = 0,
+ .journal = {
+ .initialized = false,
+ },
+ .journal_direct = {
+ .initialized = false,
+ .fd = -1,
+ },
+ .syslog = {
+ .initialized = false,
+ .facility = LOG_DAEMON,
+ },
+ .std_output = {
+ .spinlock = NETDATA_SPINLOCK_INITIALIZER,
+ .initialized = false,
+ },
+ .std_error = {
+ .spinlock = NETDATA_SPINLOCK_INITIALIZER,
+ .initialized = false,
+ },
+ .sources = {
+ [NDLS_UNSET] = {
+ .spinlock = NETDATA_SPINLOCK_INITIALIZER,
+ .method = NDLM_DISABLED,
+ .format = NDLF_JOURNAL,
+ .filename = NULL,
+ .fd = -1,
+ .fp = NULL,
+ .min_priority = NDLP_EMERG,
+ .limits = ND_LOG_LIMITS_UNLIMITED,
+ },
+ [NDLS_ACCESS] = {
+ .spinlock = NETDATA_SPINLOCK_INITIALIZER,
+ .method = NDLM_DEFAULT,
+ .format = NDLF_LOGFMT,
+ .filename = LOG_DIR "/access.log",
+ .fd = -1,
+ .fp = NULL,
+ .min_priority = NDLP_DEBUG,
+ .limits = ND_LOG_LIMITS_UNLIMITED,
+ },
+ [NDLS_ACLK] = {
+ .spinlock = NETDATA_SPINLOCK_INITIALIZER,
+ .method = NDLM_FILE,
+ .format = NDLF_LOGFMT,
+ .filename = LOG_DIR "/aclk.log",
+ .fd = -1,
+ .fp = NULL,
+ .min_priority = NDLP_DEBUG,
+ .limits = ND_LOG_LIMITS_UNLIMITED,
+ },
+ [NDLS_COLLECTORS] = {
+ .spinlock = NETDATA_SPINLOCK_INITIALIZER,
+ .method = NDLM_DEFAULT,
+ .format = NDLF_LOGFMT,
+ .filename = LOG_DIR "/collectors.log",
+ .fd = STDERR_FILENO,
+ .fp = NULL,
+ .min_priority = NDLP_INFO,
+ .limits = ND_LOG_LIMITS_DEFAULT,
+ },
+ [NDLS_DEBUG] = {
+ .spinlock = NETDATA_SPINLOCK_INITIALIZER,
+ .method = NDLM_DISABLED,
+ .format = NDLF_LOGFMT,
+ .filename = LOG_DIR "/debug.log",
+ .fd = STDOUT_FILENO,
+ .fp = NULL,
+ .min_priority = NDLP_DEBUG,
+ .limits = ND_LOG_LIMITS_UNLIMITED,
+ },
+ [NDLS_DAEMON] = {
+ .spinlock = NETDATA_SPINLOCK_INITIALIZER,
+ .method = NDLM_DEFAULT,
+ .filename = LOG_DIR "/daemon.log",
+ .format = NDLF_LOGFMT,
+ .fd = -1,
+ .fp = NULL,
+ .min_priority = NDLP_INFO,
+ .limits = ND_LOG_LIMITS_DEFAULT,
+ },
+ [NDLS_HEALTH] = {
+ .spinlock = NETDATA_SPINLOCK_INITIALIZER,
+ .method = NDLM_DEFAULT,
+ .format = NDLF_LOGFMT,
+ .filename = LOG_DIR "/health.log",
+ .fd = -1,
+ .fp = NULL,
+ .min_priority = NDLP_DEBUG,
+ .limits = ND_LOG_LIMITS_UNLIMITED,
+ },
+ },
+};
+
+__attribute__((constructor)) void initialize_invocation_id(void) {
+ // check for a NETDATA_INVOCATION_ID
+ if(uuid_parse_flexi(getenv("NETDATA_INVOCATION_ID"), nd_log.invocation_id) != 0) {
+ // not found, check for systemd set INVOCATION_ID
+ if(uuid_parse_flexi(getenv("INVOCATION_ID"), nd_log.invocation_id) != 0) {
+ // not found, generate a new one
+ uuid_generate_random(nd_log.invocation_id);
+ }
+ }
+
+ char uuid[UUID_COMPACT_STR_LEN];
+ uuid_unparse_lower_compact(nd_log.invocation_id, uuid);
+ setenv("NETDATA_INVOCATION_ID", uuid, 1);
+}
+
+int nd_log_health_fd(void) {
+ if(nd_log.sources[NDLS_HEALTH].method == NDLM_FILE && nd_log.sources[NDLS_HEALTH].fd != -1)
+ return nd_log.sources[NDLS_HEALTH].fd;
+
+ return STDERR_FILENO;
+}
+
+void nd_log_set_user_settings(ND_LOG_SOURCES source, const char *setting) {
+ char buf[FILENAME_MAX + 100];
+ if(setting && *setting)
+ strncpyz(buf, setting, sizeof(buf) - 1);
+ else
+ buf[0] = '\0';
+
+ struct nd_log_source *ls = &nd_log.sources[source];
+ char *output = strrchr(buf, '@');
+
+ if(!output)
+ // all of it is the output
+ output = buf;
+ else {
+ // we found an '@', the next char is the output
+ *output = '\0';
+ output++;
+
+ // parse the other params
+ char *remaining = buf;
+ while(remaining) {
+ char *value = strsep_skip_consecutive_separators(&remaining, ",");
+ if (!value || !*value) continue;
+
+ char *name = strsep_skip_consecutive_separators(&value, "=");
+ if (!name || !*name) continue;
+
+ if(strcmp(name, "logfmt") == 0)
+ ls->format = NDLF_LOGFMT;
+ else if(strcmp(name, "json") == 0)
+ ls->format = NDLF_JSON;
+ else if(strcmp(name, "journal") == 0)
+ ls->format = NDLF_JOURNAL;
+ else if(strcmp(name, "level") == 0 && value && *value)
+ ls->min_priority = nd_log_priority2id(value);
+ else if(strcmp(name, "protection") == 0 && value && *value) {
+ if(strcmp(value, "off") == 0 || strcmp(value, "none") == 0) {
+ ls->limits = ND_LOG_LIMITS_UNLIMITED;
+ ls->limits.counter = 0;
+ ls->limits.prevented = 0;
+ }
+ else {
+ ls->limits = ND_LOG_LIMITS_DEFAULT;
+
+ char *slash = strchr(value, '/');
+ if(slash) {
+ *slash = '\0';
+ slash++;
+ ls->limits.logs_per_period = ls->limits.logs_per_period_backup = str2u(value);
+ ls->limits.throttle_period = str2u(slash);
+ }
+ else {
+ ls->limits.logs_per_period = ls->limits.logs_per_period_backup = str2u(value);
+ ls->limits.throttle_period = ND_LOG_DEFAULT_THROTTLE_PERIOD;
+ }
+ }
+ }
+ else
+ nd_log(NDLS_DAEMON, NDLP_ERR, "Error while parsing configuration of log source '%s'. "
+ "In config '%s', '%s' is not understood.",
+ nd_log_id2source(source), setting, name);
+ }
}
- else if ( hash == hash_lpr )
- {
- return LOG_LPR;
+
+ if(!output || !*output || strcmp(output, "none") == 0 || strcmp(output, "off") == 0) {
+ ls->method = NDLM_DISABLED;
+ ls->filename = "/dev/null";
}
- else if ( hash == hash_mail )
- {
- return LOG_MAIL;
+ else if(strcmp(output, "journal") == 0) {
+ ls->method = NDLM_JOURNAL;
+ ls->filename = NULL;
}
- /*
- else if ( hash == hash_mark )
- {
- //this is internal for all OS
- return INTERNAL_MARK;
+ else if(strcmp(output, "syslog") == 0) {
+ ls->method = NDLM_SYSLOG;
+ ls->filename = NULL;
}
- */
-#ifdef __APPLE__
- else if ( hash == hash_netinfo )
- {
- return LOG_NETINFO;
+ else if(strcmp(output, "/dev/null") == 0) {
+ ls->method = NDLM_DEVNULL;
+ ls->filename = "/dev/null";
}
- else if ( hash == hash_ras )
- {
- return LOG_RAS;
+ else if(strcmp(output, "system") == 0) {
+ if(ls->fd == STDERR_FILENO) {
+ ls->method = NDLM_STDERR;
+ ls->filename = NULL;
+ ls->fd = STDERR_FILENO;
+ }
+ else {
+ ls->method = NDLM_STDOUT;
+ ls->filename = NULL;
+ ls->fd = STDOUT_FILENO;
+ }
}
- else if ( hash == hash_remoteauth )
- {
- return LOG_REMOTEAUTH;
+ else if(strcmp(output, "stderr") == 0) {
+ ls->method = NDLM_STDERR;
+ ls->filename = NULL;
+ ls->fd = STDERR_FILENO;
}
-#endif
- else if ( hash == hash_news )
- {
- return LOG_NEWS;
+ else if(strcmp(output, "stdout") == 0) {
+ ls->method = NDLM_STDOUT;
+ ls->filename = NULL;
+ ls->fd = STDOUT_FILENO;
}
-#ifdef __FreeBSD__
- else if ( hash == hash_ntp )
- {
- return LOG_NTP;
+ else {
+ ls->method = NDLM_FILE;
+ ls->filename = strdupz(output);
}
+
+#if defined(NETDATA_INTERNAL_CHECKS) || defined(NETDATA_DEV_MODE)
+ ls->min_priority = NDLP_DEBUG;
#endif
- else if ( hash == hash_security )
- {
- //FreeBSD is the unique that does not consider
- //this facility deprecated. We are keeping
- //it for other OS while they are kept in their headers.
-#ifdef __FreeBSD__
- return LOG_SECURITY;
-#else
- return LOG_AUTH;
-#endif
- }
- else if ( hash == hash_syslog )
- {
- return LOG_SYSLOG;
+
+ if(source == NDLS_COLLECTORS) {
+ // set the method for the collector processes we will spawn
+
+ ND_LOG_METHOD method;
+ ND_LOG_FORMAT format = ls->format;
+ ND_LOG_FIELD_PRIORITY priority = ls->min_priority;
+
+ if(ls->method == NDLM_SYSLOG || ls->method == NDLM_JOURNAL)
+ method = ls->method;
+ else
+ method = NDLM_STDERR;
+
+ setenv("NETDATA_LOG_METHOD", nd_log_id2method(method), 1);
+ setenv("NETDATA_LOG_FORMAT", nd_log_id2format(format), 1);
+ setenv("NETDATA_LOG_LEVEL", nd_log_id2priority(priority), 1);
}
- else if ( hash == hash_user )
- {
- return LOG_USER;
+}
+
+void nd_log_set_priority_level(const char *setting) {
+ if(!setting || !*setting)
+ setting = "info";
+
+ ND_LOG_FIELD_PRIORITY priority = nd_log_priority2id(setting);
+
+#if defined(NETDATA_INTERNAL_CHECKS) || defined(NETDATA_DEV_MODE)
+ priority = NDLP_DEBUG;
+#endif
+
+ for (size_t i = 0; i < _NDLS_MAX; i++) {
+ if (i != NDLS_DEBUG)
+ nd_log.sources[i].min_priority = priority;
}
- else if ( hash == hash_uucp )
- {
- return LOG_UUCP;
+
+ // the right one
+ setenv("NETDATA_LOG_LEVEL", nd_log_id2priority(priority), 1);
+}
+
+void nd_log_set_facility(const char *facility) {
+ if(!facility || !*facility)
+ facility = "daemon";
+
+ nd_log.syslog.facility = nd_log_facility2id(facility);
+ setenv("NETDATA_SYSLOG_FACILITY", nd_log_id2facility(nd_log.syslog.facility), 1);
+}
+
+void nd_log_set_flood_protection(size_t logs, time_t period) {
+ nd_log.sources[NDLS_DAEMON].limits.logs_per_period =
+ nd_log.sources[NDLS_DAEMON].limits.logs_per_period_backup;
+ nd_log.sources[NDLS_COLLECTORS].limits.logs_per_period =
+ nd_log.sources[NDLS_COLLECTORS].limits.logs_per_period_backup = logs;
+
+ nd_log.sources[NDLS_DAEMON].limits.throttle_period =
+ nd_log.sources[NDLS_COLLECTORS].limits.throttle_period = period;
+
+ char buf[100];
+ snprintfz(buf, sizeof(buf), "%" PRIu64, (uint64_t )period);
+ setenv("NETDATA_ERRORS_THROTTLE_PERIOD", buf, 1);
+ snprintfz(buf, sizeof(buf), "%" PRIu64, (uint64_t )logs);
+ setenv("NETDATA_ERRORS_PER_PERIOD", buf, 1);
+}
+
+static bool nd_log_journal_systemd_init(void) {
+#ifdef HAVE_SYSTEMD
+ nd_log.journal.initialized = true;
+#else
+ nd_log.journal.initialized = false;
+#endif
+
+ return nd_log.journal.initialized;
+}
+
+static void nd_log_journal_direct_set_env(void) {
+ if(nd_log.sources[NDLS_COLLECTORS].method == NDLM_JOURNAL)
+ setenv("NETDATA_SYSTEMD_JOURNAL_PATH", nd_log.journal_direct.filename, 1);
+}
+
+static bool nd_log_journal_direct_init(const char *path) {
+ if(nd_log.journal_direct.initialized) {
+ nd_log_journal_direct_set_env();
+ return true;
}
- else if ( hash == hash_local0 )
- {
- return LOG_LOCAL0;
+
+ int fd;
+ char filename[FILENAME_MAX + 1];
+ if(!is_path_unix_socket(path)) {
+
+ journal_construct_path(filename, sizeof(filename), netdata_configured_host_prefix, "netdata");
+ if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1) {
+
+ journal_construct_path(filename, sizeof(filename), netdata_configured_host_prefix, NULL);
+ if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1) {
+
+ journal_construct_path(filename, sizeof(filename), NULL, "netdata");
+ if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1) {
+
+ journal_construct_path(filename, sizeof(filename), NULL, NULL);
+ if (!is_path_unix_socket(filename) || (fd = journal_direct_fd(filename)) == -1)
+ return false;
+ }
+ }
+ }
}
- else if ( hash == hash_local1 )
- {
- return LOG_LOCAL1;
+ else {
+ snprintfz(filename, sizeof(filename), "%s", path);
+ fd = journal_direct_fd(filename);
}
- else if ( hash == hash_local2 )
- {
- return LOG_LOCAL2;
+
+ if(fd < 0)
+ return false;
+
+ nd_log.journal_direct.fd = fd;
+ nd_log.journal_direct.initialized = true;
+
+ strncpyz(nd_log.journal_direct.filename, filename, sizeof(nd_log.journal_direct.filename) - 1);
+ nd_log_journal_direct_set_env();
+
+ return true;
+}
+
+static void nd_log_syslog_init() {
+ if(nd_log.syslog.initialized)
+ return;
+
+ openlog(program_name, LOG_PID, nd_log.syslog.facility);
+ nd_log.syslog.initialized = true;
+}
+
+void nd_log_initialize_for_external_plugins(const char *name) {
+ // if we don't run under Netdata, log to stderr,
+ // otherwise, use the logging method Netdata wants us to use.
+ setenv("NETDATA_LOG_METHOD", "stderr", 0);
+ setenv("NETDATA_LOG_FORMAT", "logfmt", 0);
+
+ nd_log.overwrite_process_source = NDLS_COLLECTORS;
+ program_name = name;
+
+ for(size_t i = 0; i < _NDLS_MAX ;i++) {
+ nd_log.sources[i].method = STDERR_FILENO;
+ nd_log.sources[i].fd = -1;
+ nd_log.sources[i].fp = NULL;
}
- else if ( hash == hash_local3 )
- {
- return LOG_LOCAL3;
+
+ nd_log_set_priority_level(getenv("NETDATA_LOG_LEVEL"));
+ nd_log_set_facility(getenv("NETDATA_SYSLOG_FACILITY"));
+
+ time_t period = 1200;
+ size_t logs = 200;
+ const char *s = getenv("NETDATA_ERRORS_THROTTLE_PERIOD");
+ if(s && *s >= '0' && *s <= '9') {
+ period = str2l(s);
+ if(period < 0) period = 0;
}
- else if ( hash == hash_local4 )
- {
- return LOG_LOCAL4;
+
+ s = getenv("NETDATA_ERRORS_PER_PERIOD");
+ if(s && *s >= '0' && *s <= '9')
+ logs = str2u(s);
+
+ nd_log_set_flood_protection(logs, period);
+
+ if(!netdata_configured_host_prefix) {
+ s = getenv("NETDATA_HOST_PREFIX");
+ if(s && *s)
+ netdata_configured_host_prefix = (char *)s;
}
- else if ( hash == hash_local5 )
- {
- return LOG_LOCAL5;
+
+ ND_LOG_METHOD method = nd_log_method2id(getenv("NETDATA_LOG_METHOD"));
+ ND_LOG_FORMAT format = nd_log_format2id(getenv("NETDATA_LOG_FORMAT"));
+
+ if(!IS_VALID_LOG_METHOD_FOR_EXTERNAL_PLUGINS(method)) {
+ if(is_stderr_connected_to_journal()) {
+ nd_log(NDLS_COLLECTORS, NDLP_WARNING, "NETDATA_LOG_METHOD is not set. Using journal.");
+ method = NDLM_JOURNAL;
+ }
+ else {
+ nd_log(NDLS_COLLECTORS, NDLP_WARNING, "NETDATA_LOG_METHOD is not set. Using stderr.");
+ method = NDLM_STDERR;
+ }
}
- else if ( hash == hash_local6 )
- {
- return LOG_LOCAL6;
+
+ switch(method) {
+ case NDLM_JOURNAL:
+ if(!nd_log_journal_direct_init(getenv("NETDATA_SYSTEMD_JOURNAL_PATH")) ||
+ !nd_log_journal_direct_init(NULL) || !nd_log_journal_systemd_init()) {
+ nd_log(NDLS_COLLECTORS, NDLP_WARNING, "Failed to initialize journal. Using stderr.");
+ method = NDLM_STDERR;
+ }
+ break;
+
+ case NDLM_SYSLOG:
+ nd_log_syslog_init();
+ break;
+
+ default:
+ method = NDLM_STDERR;
+ break;
}
- else if ( hash == hash_local7 )
- {
- return LOG_LOCAL7;
+
+ for(size_t i = 0; i < _NDLS_MAX ;i++) {
+ nd_log.sources[i].method = method;
+ nd_log.sources[i].format = format;
+ nd_log.sources[i].fd = -1;
+ nd_log.sources[i].fp = NULL;
}
-#ifdef __APPLE__
- else if ( hash == hash_launchd )
- {
- return LOG_LAUNCHD;
+
+// nd_log(NDLS_COLLECTORS, NDLP_NOTICE, "FINAL_LOG_METHOD: %s", nd_log_id2method(method));
+}
+
+static bool nd_log_replace_existing_fd(struct nd_log_source *e, int new_fd) {
+ if(new_fd == -1 || e->fd == -1 ||
+ (e->fd == STDOUT_FILENO && nd_log.std_output.initialized) ||
+ (e->fd == STDERR_FILENO && nd_log.std_error.initialized))
+ return false;
+
+ if(new_fd != e->fd) {
+ int t = dup2(new_fd, e->fd);
+
+ bool ret = true;
+ if (t == -1) {
+ netdata_log_error("Cannot dup2() new fd %d to old fd %d for '%s'", new_fd, e->fd, e->filename);
+ ret = false;
+ }
+ else
+ close(new_fd);
+
+ if(e->fd == STDOUT_FILENO)
+ nd_log.std_output.initialized = true;
+ else if(e->fd == STDERR_FILENO)
+ nd_log.std_error.initialized = true;
+
+ return ret;
}
-#endif
- return LOG_DAEMON;
+ return false;
}
-//we do not need to use this now, but I already created this function to be
-//used case necessary.
-/*
-char *log_facility_name(int code)
-{
- char *defvalue = { "daemon" };
- switch(code)
- {
- case LOG_AUTH:
- {
- return "auth";
- }
- case LOG_AUTHPRIV:
- {
- return "authpriv";
- }
-#ifdef __FreeBSD__
- case LOG_CONSOLE:
- {
- return "console";
- }
-#endif
- case LOG_CRON:
- {
- return "cron";
- }
- case LOG_DAEMON:
- {
- return defvalue;
- }
- case LOG_FTP:
- {
- return "ftp";
- }
-#ifdef __APPLE__
- case LOG_INSTALL:
- {
- return "install";
- }
-#endif
- case LOG_KERN:
- {
- return "kern";
- }
- case LOG_LPR:
- {
- return "lpr";
- }
- case LOG_MAIL:
- {
- return "mail";
- }
-#ifdef __APPLE__
- case LOG_NETINFO:
- {
- return "netinfo" ;
- }
- case LOG_RAS:
- {
- return "ras";
+static void nd_log_open(struct nd_log_source *e, ND_LOG_SOURCES source) {
+ if(e->method == NDLM_DEFAULT)
+ nd_log_set_user_settings(source, e->filename);
+
+ if((e->method == NDLM_FILE && !e->filename) ||
+ (e->method == NDLM_DEVNULL && e->fd == -1))
+ e->method = NDLM_DISABLED;
+
+ if(e->fp)
+ fflush(e->fp);
+
+ switch(e->method) {
+ case NDLM_SYSLOG:
+ nd_log_syslog_init();
+ break;
+
+ case NDLM_JOURNAL:
+ nd_log_journal_direct_init(NULL);
+ nd_log_journal_systemd_init();
+ break;
+
+ case NDLM_STDOUT:
+ e->fp = stdout;
+ e->fd = STDOUT_FILENO;
+ break;
+
+ case NDLM_DISABLED:
+ break;
+
+ case NDLM_DEFAULT:
+ case NDLM_STDERR:
+ e->method = NDLM_STDERR;
+ e->fp = stderr;
+ e->fd = STDERR_FILENO;
+ break;
+
+ case NDLM_DEVNULL:
+ case NDLM_FILE: {
+ int fd = open(e->filename, O_WRONLY | O_APPEND | O_CREAT, 0664);
+ if(fd == -1) {
+ if(e->fd != STDOUT_FILENO && e->fd != STDERR_FILENO) {
+ e->fd = STDERR_FILENO;
+ e->method = NDLM_STDERR;
+ netdata_log_error("Cannot open log file '%s'. Falling back to stderr.", e->filename);
+ }
+ else
+ netdata_log_error("Cannot open log file '%s'. Leaving fd %d as-is.", e->filename, e->fd);
}
- case LOG_REMOTEAUTH:
- {
- return "remoteauth";
+ else {
+ if (!nd_log_replace_existing_fd(e, fd)) {
+ if(e->fd == STDOUT_FILENO || e->fd == STDERR_FILENO) {
+ if(e->fd == STDOUT_FILENO)
+ e->method = NDLM_STDOUT;
+ else if(e->fd == STDERR_FILENO)
+ e->method = NDLM_STDERR;
+
+ // we have dup2() fd, so we can close the one we opened
+ if(fd != STDOUT_FILENO && fd != STDERR_FILENO)
+ close(fd);
+ }
+ else
+ e->fd = fd;
+ }
}
-#endif
- case LOG_NEWS:
- {
- return "news";
- }
-#ifdef __FreeBSD__
- case LOG_NTP:
- {
- return "ntp" ;
- }
- case LOG_SECURITY:
- {
- return "security";
- }
-#endif
- case LOG_SYSLOG:
- {
- return "syslog";
- }
- case LOG_USER:
- {
- return "user";
- }
- case LOG_UUCP:
- {
- return "uucp";
- }
- case LOG_LOCAL0:
- {
- return "local0";
- }
- case LOG_LOCAL1:
- {
- return "local1";
- }
- case LOG_LOCAL2:
- {
- return "local2";
- }
- case LOG_LOCAL3:
- {
- return "local3";
- }
- case LOG_LOCAL4:
- {
- return "local4" ;
- }
- case LOG_LOCAL5:
- {
- return "local5";
- }
- case LOG_LOCAL6:
- {
- return "local6";
- }
- case LOG_LOCAL7:
- {
- return "local7" ;
+
+ // at this point we have e->fd set properly
+
+ if(e->fd == STDOUT_FILENO)
+ e->fp = stdout;
+ else if(e->fd == STDERR_FILENO)
+ e->fp = stderr;
+
+ if(!e->fp) {
+ e->fp = fdopen(e->fd, "a");
+ if (!e->fp) {
+ netdata_log_error("Cannot fdopen() fd %d ('%s')", e->fd, e->filename);
+
+ if(e->fd != STDOUT_FILENO && e->fd != STDERR_FILENO)
+ close(e->fd);
+
+ e->fp = stderr;
+ e->fd = STDERR_FILENO;
+ }
}
-#ifdef __APPLE__
- case LOG_LAUNCHD:
- {
- return "launchd";
+ else {
+ if (setvbuf(e->fp, NULL, _IOLBF, 0) != 0)
+ netdata_log_error("Cannot set line buffering on fd %d ('%s')", e->fd, e->filename);
}
-#endif
+ }
+ break;
}
-
- return defvalue;
}
-*/
-
-// ----------------------------------------------------------------------------
-void syslog_init() {
- static int i = 0;
+static void nd_log_stdin_init(int fd, const char *filename) {
+ int f = open(filename, O_WRONLY | O_APPEND | O_CREAT, 0664);
+ if(f == -1)
+ return;
- if(!i) {
- openlog(program_name, LOG_PID,log_facility_id(facility_log));
- i = 1;
+ if(f != fd) {
+ dup2(f, fd);
+ close(f);
}
}
-void log_date(char *buffer, size_t len, time_t now) {
- if(unlikely(!buffer || !len))
- return;
+void nd_log_initialize(void) {
+ nd_log_stdin_init(STDIN_FILENO, "/dev/null");
- time_t t = now;
- struct tm *tmp, tmbuf;
+ for(size_t i = 0 ; i < _NDLS_MAX ; i++)
+ nd_log_open(&nd_log.sources[i], i);
+}
- tmp = localtime_r(&t, &tmbuf);
+void nd_log_reopen_log_files(void) {
+ netdata_log_info("Reopening all log files.");
- if (tmp == NULL) {
- buffer[0] = '\0';
+ nd_log.std_output.initialized = false;
+ nd_log.std_error.initialized = false;
+ nd_log_initialize();
+
+ netdata_log_info("Log files re-opened.");
+}
+
+void chown_open_file(int fd, uid_t uid, gid_t gid) {
+ if(fd == -1) return;
+
+ struct stat buf;
+
+ if(fstat(fd, &buf) == -1) {
+ netdata_log_error("Cannot fstat() fd %d", fd);
return;
}
- if (unlikely(strftime(buffer, len, "%Y-%m-%d %H:%M:%S", tmp) == 0))
- buffer[0] = '\0';
-
- buffer[len - 1] = '\0';
+ if((buf.st_uid != uid || buf.st_gid != gid) && S_ISREG(buf.st_mode)) {
+ if(fchown(fd, uid, gid) == -1)
+ netdata_log_error("Cannot fchown() fd %d.", fd);
+ }
}
-static netdata_mutex_t log_mutex = NETDATA_MUTEX_INITIALIZER;
-static inline void log_lock() {
- netdata_mutex_lock(&log_mutex);
-}
-static inline void log_unlock() {
- netdata_mutex_unlock(&log_mutex);
+void nd_log_chown_log_files(uid_t uid, gid_t gid) {
+ for(size_t i = 0 ; i < _NDLS_MAX ; i++) {
+ if(nd_log.sources[i].fd != -1 && nd_log.sources[i].fd != STDIN_FILENO)
+ chown_open_file(nd_log.sources[i].fd, uid, gid);
+ }
}
-static FILE *open_log_file(int fd, FILE *fp, const char *filename, int *enabled_syslog, int is_stdaccess, int *fd_ptr) {
- int f, devnull = 0;
+// ----------------------------------------------------------------------------
+// annotators
+struct log_field;
+static void errno_annotator(BUFFER *wb, const char *key, struct log_field *lf);
+static void priority_annotator(BUFFER *wb, const char *key, struct log_field *lf);
+static void timestamp_usec_annotator(BUFFER *wb, const char *key, struct log_field *lf);
+
+// ----------------------------------------------------------------------------
- if(!filename || !*filename || !strcmp(filename, "none") || !strcmp(filename, "/dev/null")) {
- filename = "/dev/null";
- devnull = 1;
+typedef void (*annotator_t)(BUFFER *wb, const char *key, struct log_field *lf);
+
+struct log_field {
+ const char *journal;
+ const char *logfmt;
+ annotator_t logfmt_annotator;
+ struct log_stack_entry entry;
+};
+
+#define THREAD_LOG_STACK_MAX 50
+
+static __thread struct log_stack_entry *thread_log_stack_base[THREAD_LOG_STACK_MAX];
+static __thread size_t thread_log_stack_next = 0;
+
+static __thread struct log_field thread_log_fields[_NDF_MAX] = {
+ // THE ORDER DEFINES THE ORDER FIELDS WILL APPEAR IN logfmt
+
+ [NDF_STOP] = { // processing will not stop on this - so it is ok to be first
+ .journal = NULL,
+ .logfmt = NULL,
+ .logfmt_annotator = NULL,
+ },
+ [NDF_TIMESTAMP_REALTIME_USEC] = {
+ .journal = NULL,
+ .logfmt = "time",
+ .logfmt_annotator = timestamp_usec_annotator,
+ },
+ [NDF_SYSLOG_IDENTIFIER] = {
+ .journal = "SYSLOG_IDENTIFIER", // standard journald field
+ .logfmt = "comm",
+ },
+ [NDF_LOG_SOURCE] = {
+ .journal = "ND_LOG_SOURCE",
+ .logfmt = "source",
+ },
+ [NDF_PRIORITY] = {
+ .journal = "PRIORITY", // standard journald field
+ .logfmt = "level",
+ .logfmt_annotator = priority_annotator,
+ },
+ [NDF_ERRNO] = {
+ .journal = "ERRNO", // standard journald field
+ .logfmt = "errno",
+ .logfmt_annotator = errno_annotator,
+ },
+ [NDF_INVOCATION_ID] = {
+ .journal = "INVOCATION_ID", // standard journald field
+ .logfmt = NULL,
+ },
+ [NDF_LINE] = {
+ .journal = "CODE_LINE", // standard journald field
+ .logfmt = NULL,
+ },
+ [NDF_FILE] = {
+ .journal = "CODE_FILE", // standard journald field
+ .logfmt = NULL,
+ },
+ [NDF_FUNC] = {
+ .journal = "CODE_FUNC", // standard journald field
+ .logfmt = NULL,
+ },
+ [NDF_TID] = {
+ .journal = "TID", // standard journald field
+ .logfmt = "tid",
+ },
+ [NDF_THREAD_TAG] = {
+ .journal = "THREAD_TAG",
+ .logfmt = "thread",
+ },
+ [NDF_MESSAGE_ID] = {
+ .journal = "MESSAGE_ID",
+ .logfmt = "msg_id",
+ },
+ [NDF_MODULE] = {
+ .journal = "ND_MODULE",
+ .logfmt = "module",
+ },
+ [NDF_NIDL_NODE] = {
+ .journal = "ND_NIDL_NODE",
+ .logfmt = "node",
+ },
+ [NDF_NIDL_INSTANCE] = {
+ .journal = "ND_NIDL_INSTANCE",
+ .logfmt = "instance",
+ },
+ [NDF_NIDL_CONTEXT] = {
+ .journal = "ND_NIDL_CONTEXT",
+ .logfmt = "context",
+ },
+ [NDF_NIDL_DIMENSION] = {
+ .journal = "ND_NIDL_DIMENSION",
+ .logfmt = "dimension",
+ },
+ [NDF_SRC_TRANSPORT] = {
+ .journal = "ND_SRC_TRANSPORT",
+ .logfmt = "src_transport",
+ },
+ [NDF_SRC_IP] = {
+ .journal = "ND_SRC_IP",
+ .logfmt = "src_ip",
+ },
+ [NDF_SRC_PORT] = {
+ .journal = "ND_SRC_PORT",
+ .logfmt = "src_port",
+ },
+ [NDF_SRC_CAPABILITIES] = {
+ .journal = "ND_SRC_CAPABILITIES",
+ .logfmt = "src_capabilities",
+ },
+ [NDF_DST_TRANSPORT] = {
+ .journal = "ND_DST_TRANSPORT",
+ .logfmt = "dst_transport",
+ },
+ [NDF_DST_IP] = {
+ .journal = "ND_DST_IP",
+ .logfmt = "dst_ip",
+ },
+ [NDF_DST_PORT] = {
+ .journal = "ND_DST_PORT",
+ .logfmt = "dst_port",
+ },
+ [NDF_DST_CAPABILITIES] = {
+ .journal = "ND_DST_CAPABILITIES",
+ .logfmt = "dst_capabilities",
+ },
+ [NDF_REQUEST_METHOD] = {
+ .journal = "ND_REQUEST_METHOD",
+ .logfmt = "req_method",
+ },
+ [NDF_RESPONSE_CODE] = {
+ .journal = "ND_RESPONSE_CODE",
+ .logfmt = "code",
+ },
+ [NDF_CONNECTION_ID] = {
+ .journal = "ND_CONNECTION_ID",
+ .logfmt = "conn",
+ },
+ [NDF_TRANSACTION_ID] = {
+ .journal = "ND_TRANSACTION_ID",
+ .logfmt = "transaction",
+ },
+ [NDF_RESPONSE_SENT_BYTES] = {
+ .journal = "ND_RESPONSE_SENT_BYTES",
+ .logfmt = "sent_bytes",
+ },
+ [NDF_RESPONSE_SIZE_BYTES] = {
+ .journal = "ND_RESPONSE_SIZE_BYTES",
+ .logfmt = "size_bytes",
+ },
+ [NDF_RESPONSE_PREPARATION_TIME_USEC] = {
+ .journal = "ND_RESPONSE_PREP_TIME_USEC",
+ .logfmt = "prep_ut",
+ },
+ [NDF_RESPONSE_SENT_TIME_USEC] = {
+ .journal = "ND_RESPONSE_SENT_TIME_USEC",
+ .logfmt = "sent_ut",
+ },
+ [NDF_RESPONSE_TOTAL_TIME_USEC] = {
+ .journal = "ND_RESPONSE_TOTAL_TIME_USEC",
+ .logfmt = "total_ut",
+ },
+ [NDF_ALERT_ID] = {
+ .journal = "ND_ALERT_ID",
+ .logfmt = "alert_id",
+ },
+ [NDF_ALERT_UNIQUE_ID] = {
+ .journal = "ND_ALERT_UNIQUE_ID",
+ .logfmt = "alert_unique_id",
+ },
+ [NDF_ALERT_TRANSITION_ID] = {
+ .journal = "ND_ALERT_TRANSITION_ID",
+ .logfmt = "alert_transition_id",
+ },
+ [NDF_ALERT_EVENT_ID] = {
+ .journal = "ND_ALERT_EVENT_ID",
+ .logfmt = "alert_event_id",
+ },
+ [NDF_ALERT_CONFIG_HASH] = {
+ .journal = "ND_ALERT_CONFIG",
+ .logfmt = "alert_config",
+ },
+ [NDF_ALERT_NAME] = {
+ .journal = "ND_ALERT_NAME",
+ .logfmt = "alert",
+ },
+ [NDF_ALERT_CLASS] = {
+ .journal = "ND_ALERT_CLASS",
+ .logfmt = "alert_class",
+ },
+ [NDF_ALERT_COMPONENT] = {
+ .journal = "ND_ALERT_COMPONENT",
+ .logfmt = "alert_component",
+ },
+ [NDF_ALERT_TYPE] = {
+ .journal = "ND_ALERT_TYPE",
+ .logfmt = "alert_type",
+ },
+ [NDF_ALERT_EXEC] = {
+ .journal = "ND_ALERT_EXEC",
+ .logfmt = "alert_exec",
+ },
+ [NDF_ALERT_RECIPIENT] = {
+ .journal = "ND_ALERT_RECIPIENT",
+ .logfmt = "alert_recipient",
+ },
+ [NDF_ALERT_VALUE] = {
+ .journal = "ND_ALERT_VALUE",
+ .logfmt = "alert_value",
+ },
+ [NDF_ALERT_VALUE_OLD] = {
+ .journal = "ND_ALERT_VALUE_OLD",
+ .logfmt = "alert_value_old",
+ },
+ [NDF_ALERT_STATUS] = {
+ .journal = "ND_ALERT_STATUS",
+ .logfmt = "alert_status",
+ },
+ [NDF_ALERT_STATUS_OLD] = {
+ .journal = "ND_ALERT_STATUS_OLD",
+ .logfmt = "alert_value_old",
+ },
+ [NDF_ALERT_UNITS] = {
+ .journal = "ND_ALERT_UNITS",
+ .logfmt = "alert_units",
+ },
+ [NDF_ALERT_SUMMARY] = {
+ .journal = "ND_ALERT_SUMMARY",
+ .logfmt = "alert_summary",
+ },
+ [NDF_ALERT_INFO] = {
+ .journal = "ND_ALERT_INFO",
+ .logfmt = "alert_info",
+ },
+ [NDF_ALERT_DURATION] = {
+ .journal = "ND_ALERT_DURATION",
+ .logfmt = "alert_duration",
+ },
+ [NDF_ALERT_NOTIFICATION_REALTIME_USEC] = {
+ .journal = "ND_ALERT_NOTIFICATION_TIMESTAMP_USEC",
+ .logfmt = "alert_notification_timestamp",
+ .logfmt_annotator = timestamp_usec_annotator,
+ },
+
+ // put new items here
+ // leave the request URL and the message last
+
+ [NDF_REQUEST] = {
+ .journal = "ND_REQUEST",
+ .logfmt = "request",
+ },
+ [NDF_MESSAGE] = {
+ .journal = "MESSAGE",
+ .logfmt = "msg",
+ },
+};
+
+#define THREAD_FIELDS_MAX (sizeof(thread_log_fields) / sizeof(thread_log_fields[0]))
+
+ND_LOG_FIELD_ID nd_log_field_id_by_name(const char *field, size_t len) {
+ for(size_t i = 0; i < THREAD_FIELDS_MAX ;i++) {
+ if(thread_log_fields[i].journal && strlen(thread_log_fields[i].journal) == len && strncmp(field, thread_log_fields[i].journal, len) == 0)
+ return i;
}
- if(!strcmp(filename, "syslog")) {
- filename = "/dev/null";
- devnull = 1;
+ return NDF_STOP;
+}
- syslog_init();
- if(enabled_syslog) *enabled_syslog = 1;
- }
- else if(enabled_syslog) *enabled_syslog = 0;
+void log_stack_pop(void *ptr) {
+ if(!ptr) return;
- // don't do anything if the user is willing
- // to have the standard one
- if(!strcmp(filename, "system")) {
- if(fd != -1 && !is_stdaccess) {
- if(fd_ptr) *fd_ptr = fd;
- return fp;
- }
+ struct log_stack_entry *lgs = *(struct log_stack_entry (*)[])ptr;
- filename = "stderr";
+ if(unlikely(!thread_log_stack_next || lgs != thread_log_stack_base[thread_log_stack_next - 1])) {
+ fatal("You cannot pop in the middle of the stack, or an item not in the stack");
+ return;
}
- if(!strcmp(filename, "stdout"))
- f = STDOUT_FILENO;
+ thread_log_stack_next--;
+}
- else if(!strcmp(filename, "stderr"))
- f = STDERR_FILENO;
+void log_stack_push(struct log_stack_entry *lgs) {
+ if(!lgs || thread_log_stack_next >= THREAD_LOG_STACK_MAX) return;
+ thread_log_stack_base[thread_log_stack_next++] = lgs;
+}
- else {
- f = open(filename, O_WRONLY | O_APPEND | O_CREAT, 0664);
- if(f == -1) {
- netdata_log_error("Cannot open file '%s'. Leaving %d to its default.", filename, fd);
- if(fd_ptr) *fd_ptr = fd;
- return fp;
+// ----------------------------------------------------------------------------
+// json formatter
+
+static void nd_logger_json(BUFFER *wb, struct log_field *fields, size_t fields_max) {
+
+ // --- FIELD_PARSER_VERSIONS ---
+ //
+ // IMPORTANT:
+ // THERE ARE 6 VERSIONS OF THIS CODE
+ //
+ // 1. journal (direct socket API),
+ // 2. journal (libsystemd API),
+ // 3. logfmt,
+ // 4. json,
+ // 5. convert to uint64
+ // 6. convert to int64
+ //
+ // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES
+
+ buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY);
+ CLEAN_BUFFER *tmp = NULL;
+
+ for (size_t i = 0; i < fields_max; i++) {
+ if (!fields[i].entry.set || !fields[i].logfmt)
+ continue;
+
+ const char *key = fields[i].logfmt;
+
+ const char *s = NULL;
+ switch(fields[i].entry.type) {
+ case NDFT_TXT:
+ s = fields[i].entry.txt;
+ break;
+ case NDFT_STR:
+ s = string2str(fields[i].entry.str);
+ break;
+ case NDFT_BFR:
+ s = buffer_tostring(fields[i].entry.bfr);
+ break;
+ case NDFT_U64:
+ buffer_json_member_add_uint64(wb, key, fields[i].entry.u64);
+ break;
+ case NDFT_I64:
+ buffer_json_member_add_int64(wb, key, fields[i].entry.i64);
+ break;
+ case NDFT_DBL:
+ buffer_json_member_add_double(wb, key, fields[i].entry.dbl);
+ break;
+ case NDFT_UUID:{
+ char u[UUID_COMPACT_STR_LEN];
+ uuid_unparse_lower_compact(*fields[i].entry.uuid, u);
+ buffer_json_member_add_string(wb, key, u);
+ }
+ break;
+ case NDFT_CALLBACK: {
+ if(!tmp)
+ tmp = buffer_create(1024, NULL);
+ else
+ buffer_flush(tmp);
+ if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data))
+ s = buffer_tostring(tmp);
+ else
+ s = NULL;
+ }
+ break;
+ default:
+ s = "UNHANDLED";
+ break;
}
+
+ if(s && *s)
+ buffer_json_member_add_string(wb, key, s);
}
- // if there is a level-2 file pointer
- // flush it before switching the level-1 fds
- if(fp)
- fflush(fp);
+ buffer_json_finalize(wb);
+}
- if(devnull && is_stdaccess) {
- fd = -1;
- fp = NULL;
+// ----------------------------------------------------------------------------
+// logfmt formatter
+
+
+static int64_t log_field_to_int64(struct log_field *lf) {
+
+ // --- FIELD_PARSER_VERSIONS ---
+ //
+ // IMPORTANT:
+ // THERE ARE 6 VERSIONS OF THIS CODE
+ //
+ // 1. journal (direct socket API),
+ // 2. journal (libsystemd API),
+ // 3. logfmt,
+ // 4. json,
+ // 5. convert to uint64
+ // 6. convert to int64
+ //
+ // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES
+
+ CLEAN_BUFFER *tmp = NULL;
+ const char *s = NULL;
+
+ switch(lf->entry.type) {
+ case NDFT_UUID:
+ case NDFT_UNSET:
+ return 0;
+
+ case NDFT_TXT:
+ s = lf->entry.txt;
+ break;
+
+ case NDFT_STR:
+ s = string2str(lf->entry.str);
+ break;
+
+ case NDFT_BFR:
+ s = buffer_tostring(lf->entry.bfr);
+ break;
+
+ case NDFT_CALLBACK:
+ if(!tmp)
+ tmp = buffer_create(0, NULL);
+ else
+ buffer_flush(tmp);
+
+ if(lf->entry.cb.formatter(tmp, lf->entry.cb.formatter_data))
+ s = buffer_tostring(tmp);
+ else
+ s = NULL;
+ break;
+
+ case NDFT_U64:
+ return lf->entry.u64;
+
+ case NDFT_I64:
+ return lf->entry.i64;
+
+ case NDFT_DBL:
+ return lf->entry.dbl;
}
- if(fd != f && fd != -1) {
- // it automatically closes
- int t = dup2(f, fd);
- if (t == -1) {
- netdata_log_error("Cannot dup2() new fd %d to old fd %d for '%s'", f, fd, filename);
- close(f);
- if(fd_ptr) *fd_ptr = fd;
- return fp;
- }
- // netdata_log_info("dup2() new fd %d to old fd %d for '%s'", f, fd, filename);
- close(f);
- }
- else fd = f;
+ if(s && *s)
+ return str2ll(s, NULL);
- if(!fp) {
- fp = fdopen(fd, "a");
- if (!fp)
- netdata_log_error("Cannot fdopen() fd %d ('%s')", fd, filename);
- else {
- if (setvbuf(fp, NULL, _IOLBF, 0) != 0)
- netdata_log_error("Cannot set line buffering on fd %d ('%s')", fd, filename);
- }
+ return 0;
+}
+
+static uint64_t log_field_to_uint64(struct log_field *lf) {
+
+ // --- FIELD_PARSER_VERSIONS ---
+ //
+ // IMPORTANT:
+ // THERE ARE 6 VERSIONS OF THIS CODE
+ //
+ // 1. journal (direct socket API),
+ // 2. journal (libsystemd API),
+ // 3. logfmt,
+ // 4. json,
+ // 5. convert to uint64
+ // 6. convert to int64
+ //
+ // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES
+
+ CLEAN_BUFFER *tmp = NULL;
+ const char *s = NULL;
+
+ switch(lf->entry.type) {
+ case NDFT_UUID:
+ case NDFT_UNSET:
+ return 0;
+
+ case NDFT_TXT:
+ s = lf->entry.txt;
+ break;
+
+ case NDFT_STR:
+ s = string2str(lf->entry.str);
+ break;
+
+ case NDFT_BFR:
+ s = buffer_tostring(lf->entry.bfr);
+ break;
+
+ case NDFT_CALLBACK:
+ if(!tmp)
+ tmp = buffer_create(0, NULL);
+ else
+ buffer_flush(tmp);
+
+ if(lf->entry.cb.formatter(tmp, lf->entry.cb.formatter_data))
+ s = buffer_tostring(tmp);
+ else
+ s = NULL;
+ break;
+
+ case NDFT_U64:
+ return lf->entry.u64;
+
+ case NDFT_I64:
+ return lf->entry.i64;
+
+ case NDFT_DBL:
+ return lf->entry.dbl;
}
- if(fd_ptr) *fd_ptr = fd;
- return fp;
+ if(s && *s)
+ return str2uint64_t(s, NULL);
+
+ return 0;
}
-void reopen_all_log_files() {
- if(stdout_filename)
- open_log_file(STDOUT_FILENO, stdout, stdout_filename, &output_log_syslog, 0, NULL);
+static void timestamp_usec_annotator(BUFFER *wb, const char *key, struct log_field *lf) {
+ usec_t ut = log_field_to_uint64(lf);
- if(stdcollector_filename)
- open_log_file(STDERR_FILENO, stderr, stdcollector_filename, &collector_log_syslog, 0, NULL);
+ if(!ut)
+ return;
- if(stderr_filename) {
- // Netdata starts using stderr and if it has success to open file it redirects
- FILE *fp = open_log_file(stdcollector_fd, stderror, stderr_filename,
- &error_log_syslog, 1, &stdcollector_fd);
- if (fp)
- stderror = fp;
- }
+ char datetime[RFC3339_MAX_LENGTH];
+ rfc3339_datetime_ut(datetime, sizeof(datetime), ut, 3, false);
-#ifdef ENABLE_ACLK
- if (aclklog_enabled)
- aclklog = open_log_file(aclklog_fd, aclklog, aclklog_filename, NULL, 0, &aclklog_fd);
-#endif
+ if(buffer_strlen(wb))
+ buffer_fast_strcat(wb, " ", 1);
- if(stdaccess_filename)
- stdaccess = open_log_file(stdaccess_fd, stdaccess, stdaccess_filename, &access_log_syslog, 1, &stdaccess_fd);
+ buffer_strcat(wb, key);
+ buffer_fast_strcat(wb, "=", 1);
+ buffer_json_strcat(wb, datetime);
+}
+
+static void errno_annotator(BUFFER *wb, const char *key, struct log_field *lf) {
+ int64_t errnum = log_field_to_int64(lf);
+
+ if(errnum == 0)
+ return;
- if(stdhealth_filename)
- stdhealth = open_log_file(stdhealth_fd, stdhealth, stdhealth_filename, &health_log_syslog, 1, &stdhealth_fd);
+ char buf[1024];
+ const char *s = errno2str(errnum, buf, sizeof(buf));
+
+ if(buffer_strlen(wb))
+ buffer_fast_strcat(wb, " ", 1);
+
+ buffer_strcat(wb, key);
+ buffer_fast_strcat(wb, "=\"", 2);
+ buffer_print_int64(wb, errnum);
+ buffer_fast_strcat(wb, ", ", 2);
+ buffer_json_strcat(wb, s);
+ buffer_fast_strcat(wb, "\"", 1);
}
-void open_all_log_files() {
- // disable stdin
- open_log_file(STDIN_FILENO, stdin, "/dev/null", NULL, 0, NULL);
+static void priority_annotator(BUFFER *wb, const char *key, struct log_field *lf) {
+ uint64_t pri = log_field_to_uint64(lf);
- open_log_file(STDOUT_FILENO, stdout, stdout_filename, &output_log_syslog, 0, NULL);
- open_log_file(STDERR_FILENO, stderr, stdcollector_filename, &collector_log_syslog, 0, NULL);
+ if(buffer_strlen(wb))
+ buffer_fast_strcat(wb, " ", 1);
- // Netdata starts using stderr and if it has success to open file it redirects
- FILE *fp = open_log_file(stdcollector_fd, NULL, stderr_filename, &error_log_syslog, 1, &stdcollector_fd);
- if (fp)
- stderror = fp;
+ buffer_strcat(wb, key);
+ buffer_fast_strcat(wb, "=", 1);
+ buffer_strcat(wb, nd_log_id2priority(pri));
+}
-#ifdef ENABLE_ACLK
- if(aclklog_enabled)
- aclklog = open_log_file(aclklog_fd, aclklog, aclklog_filename, NULL, 0, &aclklog_fd);
-#endif
+static bool needs_quotes_for_logfmt(const char *s) {
+ static bool safe_for_logfmt[256] = {
+ [' '] = true, ['!'] = true, ['"'] = false, ['#'] = true, ['$'] = true, ['%'] = true, ['&'] = true,
+ ['\''] = true, ['('] = true, [')'] = true, ['*'] = true, ['+'] = true, [','] = true, ['-'] = true,
+ ['.'] = true, ['/'] = true, ['0'] = true, ['1'] = true, ['2'] = true, ['3'] = true, ['4'] = true,
+ ['5'] = true, ['6'] = true, ['7'] = true, ['8'] = true, ['9'] = true, [':'] = true, [';'] = true,
+ ['<'] = true, ['='] = true, ['>'] = true, ['?'] = true, ['@'] = true, ['A'] = true, ['B'] = true,
+ ['C'] = true, ['D'] = true, ['E'] = true, ['F'] = true, ['G'] = true, ['H'] = true, ['I'] = true,
+ ['J'] = true, ['K'] = true, ['L'] = true, ['M'] = true, ['N'] = true, ['O'] = true, ['P'] = true,
+ ['Q'] = true, ['R'] = true, ['S'] = true, ['T'] = true, ['U'] = true, ['V'] = true, ['W'] = true,
+ ['X'] = true, ['Y'] = true, ['Z'] = true, ['['] = true, ['\\'] = false, [']'] = true, ['^'] = true,
+ ['_'] = true, ['`'] = true, ['a'] = true, ['b'] = true, ['c'] = true, ['d'] = true, ['e'] = true,
+ ['f'] = true, ['g'] = true, ['h'] = true, ['i'] = true, ['j'] = true, ['k'] = true, ['l'] = true,
+ ['m'] = true, ['n'] = true, ['o'] = true, ['p'] = true, ['q'] = true, ['r'] = true, ['s'] = true,
+ ['t'] = true, ['u'] = true, ['v'] = true, ['w'] = true, ['x'] = true, ['y'] = true, ['z'] = true,
+ ['{'] = true, ['|'] = true, ['}'] = true, ['~'] = true, [0x7f] = true,
+ };
+
+ if(!*s)
+ return true;
+
+ while(*s) {
+ if(*s == '=' || isspace(*s) || !safe_for_logfmt[(uint8_t)*s])
+ return true;
+
+ s++;
+ }
+
+ return false;
+}
+
+static void string_to_logfmt(BUFFER *wb, const char *s) {
+ bool spaces = needs_quotes_for_logfmt(s);
- stdaccess = open_log_file(stdaccess_fd, stdaccess, stdaccess_filename, &access_log_syslog, 1, &stdaccess_fd);
+ if(spaces)
+ buffer_fast_strcat(wb, "\"", 1);
- stdhealth = open_log_file(stdhealth_fd, stdhealth, stdhealth_filename, &health_log_syslog, 1, &stdhealth_fd);
+ buffer_json_strcat(wb, s);
+
+ if(spaces)
+ buffer_fast_strcat(wb, "\"", 1);
}
-// ----------------------------------------------------------------------------
-// error log throttling
+static void nd_logger_logfmt(BUFFER *wb, struct log_field *fields, size_t fields_max) {
+
+ // --- FIELD_PARSER_VERSIONS ---
+ //
+ // IMPORTANT:
+ // THERE ARE 6 VERSIONS OF THIS CODE
+ //
+ // 1. journal (direct socket API),
+ // 2. journal (libsystemd API),
+ // 3. logfmt,
+ // 4. json,
+ // 5. convert to uint64
+ // 6. convert to int64
+ //
+ // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES
+
+ CLEAN_BUFFER *tmp = NULL;
-time_t error_log_throttle_period = 1200;
-unsigned long error_log_errors_per_period = 200;
-unsigned long error_log_errors_per_period_backup = 0;
+ for (size_t i = 0; i < fields_max; i++) {
+ if (!fields[i].entry.set || !fields[i].logfmt)
+ continue;
-int error_log_limit(int reset) {
- static time_t start = 0;
- static unsigned long counter = 0, prevented = 0;
+ const char *key = fields[i].logfmt;
- FILE *fp = stderror ? stderror : stderr;
+ if(fields[i].logfmt_annotator)
+ fields[i].logfmt_annotator(wb, key, &fields[i]);
+ else {
+ if(buffer_strlen(wb))
+ buffer_fast_strcat(wb, " ", 1);
+
+ switch(fields[i].entry.type) {
+ case NDFT_TXT:
+ if(*fields[i].entry.txt) {
+ buffer_strcat(wb, key);
+ buffer_fast_strcat(wb, "=", 1);
+ string_to_logfmt(wb, fields[i].entry.txt);
+ }
+ break;
+ case NDFT_STR:
+ buffer_strcat(wb, key);
+ buffer_fast_strcat(wb, "=", 1);
+ string_to_logfmt(wb, string2str(fields[i].entry.str));
+ break;
+ case NDFT_BFR:
+ if(buffer_strlen(fields[i].entry.bfr)) {
+ buffer_strcat(wb, key);
+ buffer_fast_strcat(wb, "=", 1);
+ string_to_logfmt(wb, buffer_tostring(fields[i].entry.bfr));
+ }
+ break;
+ case NDFT_U64:
+ buffer_strcat(wb, key);
+ buffer_fast_strcat(wb, "=", 1);
+ buffer_print_uint64(wb, fields[i].entry.u64);
+ break;
+ case NDFT_I64:
+ buffer_strcat(wb, key);
+ buffer_fast_strcat(wb, "=", 1);
+ buffer_print_int64(wb, fields[i].entry.i64);
+ break;
+ case NDFT_DBL:
+ buffer_strcat(wb, key);
+ buffer_fast_strcat(wb, "=", 1);
+ buffer_print_netdata_double(wb, fields[i].entry.dbl);
+ break;
+ case NDFT_UUID: {
+ char u[UUID_COMPACT_STR_LEN];
+ uuid_unparse_lower_compact(*fields[i].entry.uuid, u);
+ buffer_strcat(wb, key);
+ buffer_fast_strcat(wb, "=", 1);
+ buffer_fast_strcat(wb, u, sizeof(u) - 1);
+ }
+ break;
+ case NDFT_CALLBACK: {
+ if(!tmp)
+ tmp = buffer_create(1024, NULL);
+ else
+ buffer_flush(tmp);
+ if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data)) {
+ buffer_strcat(wb, key);
+ buffer_fast_strcat(wb, "=", 1);
+ string_to_logfmt(wb, buffer_tostring(tmp));
+ }
+ }
+ break;
+ default:
+ buffer_strcat(wb, "UNHANDLED");
+ break;
+ }
+ }
+ }
+}
- // fprintf(fp, "FLOOD: counter=%lu, allowed=%lu, backup=%lu, period=%llu\n", counter, error_log_errors_per_period, error_log_errors_per_period_backup, (unsigned long long)error_log_throttle_period);
+// ----------------------------------------------------------------------------
+// journal logger
- // do not throttle if the period is 0
- if(error_log_throttle_period == 0)
- return 0;
+bool nd_log_journal_socket_available(void) {
+ if(netdata_configured_host_prefix && *netdata_configured_host_prefix) {
+ char filename[FILENAME_MAX + 1];
- // prevent all logs if the errors per period is 0
- if(error_log_errors_per_period == 0)
-#ifdef NETDATA_INTERNAL_CHECKS
- return 0;
-#else
- return 1;
-#endif
+ snprintfz(filename, sizeof(filename), "%s%s",
+ netdata_configured_host_prefix, "/run/systemd/journal/socket");
- time_t now = now_monotonic_sec();
- if(!start) start = now;
-
- if(reset) {
- if(prevented) {
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH, now_realtime_sec());
- fprintf(
- fp,
- "%s: %s LOG FLOOD PROTECTION reset for process '%s' "
- "(prevented %lu logs in the last %"PRId64" seconds).\n",
- date,
- program_name,
- program_name,
- prevented,
- (int64_t)(now - start));
+ if(is_path_unix_socket(filename))
+ return true;
+ }
+
+ return is_path_unix_socket("/run/systemd/journal/socket");
+}
+
+static bool nd_logger_journal_libsystemd(struct log_field *fields, size_t fields_max) {
+#ifdef HAVE_SYSTEMD
+
+ // --- FIELD_PARSER_VERSIONS ---
+ //
+ // IMPORTANT:
+ // THERE ARE 6 VERSIONS OF THIS CODE
+ //
+ // 1. journal (direct socket API),
+ // 2. journal (libsystemd API),
+ // 3. logfmt,
+ // 4. json,
+ // 5. convert to uint64
+ // 6. convert to int64
+ //
+ // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES
+
+ struct iovec iov[fields_max];
+ int iov_count = 0;
+
+ memset(iov, 0, sizeof(iov));
+
+ CLEAN_BUFFER *tmp = NULL;
+
+ for (size_t i = 0; i < fields_max; i++) {
+ if (!fields[i].entry.set || !fields[i].journal)
+ continue;
+
+ const char *key = fields[i].journal;
+ char *value = NULL;
+ switch (fields[i].entry.type) {
+ case NDFT_TXT:
+ if(*fields[i].entry.txt)
+ asprintf(&value, "%s=%s", key, fields[i].entry.txt);
+ break;
+ case NDFT_STR:
+ asprintf(&value, "%s=%s", key, string2str(fields[i].entry.str));
+ break;
+ case NDFT_BFR:
+ if(buffer_strlen(fields[i].entry.bfr))
+ asprintf(&value, "%s=%s", key, buffer_tostring(fields[i].entry.bfr));
+ break;
+ case NDFT_U64:
+ asprintf(&value, "%s=%" PRIu64, key, fields[i].entry.u64);
+ break;
+ case NDFT_I64:
+ asprintf(&value, "%s=%" PRId64, key, fields[i].entry.i64);
+ break;
+ case NDFT_DBL:
+ asprintf(&value, "%s=%f", key, fields[i].entry.dbl);
+ break;
+ case NDFT_UUID: {
+ char u[UUID_COMPACT_STR_LEN];
+ uuid_unparse_lower_compact(*fields[i].entry.uuid, u);
+ asprintf(&value, "%s=%s", key, u);
+ }
+ break;
+ case NDFT_CALLBACK: {
+ if(!tmp)
+ tmp = buffer_create(1024, NULL);
+ else
+ buffer_flush(tmp);
+ if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data))
+ asprintf(&value, "%s=%s", key, buffer_tostring(tmp));
+ }
+ break;
+ default:
+ asprintf(&value, "%s=%s", key, "UNHANDLED");
+ break;
}
- start = now;
- counter = 0;
- prevented = 0;
- }
-
- // detect if we log too much
- counter++;
-
- if(now - start > error_log_throttle_period) {
- if(prevented) {
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH, now_realtime_sec());
- fprintf(
- fp,
- "%s: %s LOG FLOOD PROTECTION resuming logging from process '%s' "
- "(prevented %lu logs in the last %"PRId64" seconds).\n",
- date,
- program_name,
- program_name,
- prevented,
- (int64_t)error_log_throttle_period);
+ if (value) {
+ iov[iov_count].iov_base = value;
+ iov[iov_count].iov_len = strlen(value);
+ iov_count++;
}
+ }
- // restart the period accounting
- start = now;
- counter = 1;
- prevented = 0;
+ int r = sd_journal_sendv(iov, iov_count);
- // log this error
- return 0;
- }
-
- if(counter > error_log_errors_per_period) {
- if(!prevented) {
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH, now_realtime_sec());
- fprintf(
- fp,
- "%s: %s LOG FLOOD PROTECTION too many logs (%lu logs in %"PRId64" seconds, threshold is set to %lu logs "
- "in %"PRId64" seconds). Preventing more logs from process '%s' for %"PRId64" seconds.\n",
- date,
- program_name,
- counter,
- (int64_t)(now - start),
- error_log_errors_per_period,
- (int64_t)error_log_throttle_period,
- program_name,
- (int64_t)(start + error_log_throttle_period - now));
+ // Clean up allocated memory
+ for (int i = 0; i < iov_count; i++) {
+ if (iov[i].iov_base != NULL) {
+ free(iov[i].iov_base);
}
+ }
- prevented++;
-
- // prevent logging this error
-#ifdef NETDATA_INTERNAL_CHECKS
- return 0;
+ return r == 0;
#else
- return 1;
+ return false;
#endif
- }
-
- return 0;
}
-void error_log_limit_reset(void) {
- log_lock();
+static bool nd_logger_journal_direct(struct log_field *fields, size_t fields_max) {
+ if(!nd_log.journal_direct.initialized)
+ return false;
+
+ // --- FIELD_PARSER_VERSIONS ---
+ //
+ // IMPORTANT:
+ // THERE ARE 6 VERSIONS OF THIS CODE
+ //
+ // 1. journal (direct socket API),
+ // 2. journal (libsystemd API),
+ // 3. logfmt,
+ // 4. json,
+ // 5. convert to uint64
+ // 6. convert to int64
+ //
+ // UPDATE ALL OF THEM FOR NEW FEATURES OR FIXES
+
+ CLEAN_BUFFER *wb = buffer_create(4096, NULL);
+ CLEAN_BUFFER *tmp = NULL;
+
+ for (size_t i = 0; i < fields_max; i++) {
+ if (!fields[i].entry.set || !fields[i].journal)
+ continue;
+
+ const char *key = fields[i].journal;
+
+ const char *s = NULL;
+ switch(fields[i].entry.type) {
+ case NDFT_TXT:
+ s = fields[i].entry.txt;
+ break;
+ case NDFT_STR:
+ s = string2str(fields[i].entry.str);
+ break;
+ case NDFT_BFR:
+ s = buffer_tostring(fields[i].entry.bfr);
+ break;
+ case NDFT_U64:
+ buffer_strcat(wb, key);
+ buffer_putc(wb, '=');
+ buffer_print_uint64(wb, fields[i].entry.u64);
+ buffer_putc(wb, '\n');
+ break;
+ case NDFT_I64:
+ buffer_strcat(wb, key);
+ buffer_putc(wb, '=');
+ buffer_print_int64(wb, fields[i].entry.i64);
+ buffer_putc(wb, '\n');
+ break;
+ case NDFT_DBL:
+ buffer_strcat(wb, key);
+ buffer_putc(wb, '=');
+ buffer_print_netdata_double(wb, fields[i].entry.dbl);
+ buffer_putc(wb, '\n');
+ break;
+ case NDFT_UUID:{
+ char u[UUID_COMPACT_STR_LEN];
+ uuid_unparse_lower_compact(*fields[i].entry.uuid, u);
+ buffer_strcat(wb, key);
+ buffer_putc(wb, '=');
+ buffer_fast_strcat(wb, u, sizeof(u) - 1);
+ buffer_putc(wb, '\n');
+ }
+ break;
+ case NDFT_CALLBACK: {
+ if(!tmp)
+ tmp = buffer_create(1024, NULL);
+ else
+ buffer_flush(tmp);
+ if(fields[i].entry.cb.formatter(tmp, fields[i].entry.cb.formatter_data))
+ s = buffer_tostring(tmp);
+ else
+ s = NULL;
+ }
+ break;
+ default:
+ s = "UNHANDLED";
+ break;
+ }
- error_log_errors_per_period = error_log_errors_per_period_backup;
- error_log_limit(1);
+ if(s && *s) {
+ buffer_strcat(wb, key);
+ if(!strchr(s, '\n')) {
+ buffer_putc(wb, '=');
+ buffer_strcat(wb, s);
+ buffer_putc(wb, '\n');
+ }
+ else {
+ buffer_putc(wb, '\n');
+ size_t size = strlen(s);
+ uint64_t le_size = htole64(size);
+ buffer_memcat(wb, &le_size, sizeof(le_size));
+ buffer_memcat(wb, s, size);
+ buffer_putc(wb, '\n');
+ }
+ }
+ }
- log_unlock();
+ return journal_direct_send(nd_log.journal_direct.fd, buffer_tostring(wb), buffer_strlen(wb));
}
-void error_log_limit_unlimited(void) {
- log_lock();
+// ----------------------------------------------------------------------------
+// syslog logger - uses logfmt
- error_log_errors_per_period = error_log_errors_per_period_backup;
- error_log_limit(1);
+static bool nd_logger_syslog(int priority, ND_LOG_FORMAT format, struct log_field *fields, size_t fields_max) {
+ CLEAN_BUFFER *wb = buffer_create(1024, NULL);
- error_log_errors_per_period = ((error_log_errors_per_period_backup * 10) < 10000) ? 10000 : (error_log_errors_per_period_backup * 10);
+ nd_logger_logfmt(wb, fields, fields_max);
+ syslog(priority, "%s", buffer_tostring(wb));
- log_unlock();
+ return true;
}
// ----------------------------------------------------------------------------
-// debug log
-
-void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
- va_list args;
+// file logger - uses logfmt
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH, now_realtime_sec());
+static bool nd_logger_file(FILE *fp, ND_LOG_FORMAT format, struct log_field *fields, size_t fields_max) {
+ BUFFER *wb = buffer_create(1024, NULL);
- va_start( args, fmt );
- printf("%s: %s DEBUG : %s : (%04lu@%-20.20s:%-15.15s): ", date, program_name, netdata_thread_tag(), line, file, function);
- vprintf(fmt, args);
- va_end( args );
- putchar('\n');
+ if(format == NDLF_JSON)
+ nd_logger_json(wb, fields, fields_max);
+ else
+ nd_logger_logfmt(wb, fields, fields_max);
- if(output_log_syslog) {
- va_start( args, fmt );
- vsyslog(LOG_ERR, fmt, args );
- va_end( args );
- }
+ int r = fprintf(fp, "%s\n", buffer_tostring(wb));
+ fflush(fp);
- fflush(stdout);
+ buffer_free(wb);
+ return r > 0;
}
// ----------------------------------------------------------------------------
-// info log
+// logger router
+
+static ND_LOG_METHOD nd_logger_select_output(ND_LOG_SOURCES source, FILE **fpp, SPINLOCK **spinlock) {
+ *spinlock = NULL;
+ ND_LOG_METHOD output = nd_log.sources[source].method;
+
+ switch(output) {
+ case NDLM_JOURNAL:
+ if(unlikely(!nd_log.journal_direct.initialized && !nd_log.journal.initialized)) {
+ output = NDLM_FILE;
+ *fpp = stderr;
+ *spinlock = &nd_log.std_error.spinlock;
+ }
+ else {
+ *fpp = NULL;
+ *spinlock = NULL;
+ }
+ break;
-void info_int( int is_collector, const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt, ... )
-{
-#if !defined(NETDATA_INTERNAL_CHECKS) && !defined(NETDATA_DEV_MODE)
- if (NETDATA_LOG_LEVEL_INFO > global_log_severity_level)
- return;
-#endif
+ case NDLM_SYSLOG:
+ if(unlikely(!nd_log.syslog.initialized)) {
+ output = NDLM_FILE;
+ *spinlock = &nd_log.std_error.spinlock;
+ *fpp = stderr;
+ }
+ else {
+ *spinlock = NULL;
+ *fpp = NULL;
+ }
+ break;
- va_list args;
- FILE *fp = (is_collector || !stderror) ? stderr : stderror;
+ case NDLM_FILE:
+ if(!nd_log.sources[source].fp) {
+ *fpp = stderr;
+ *spinlock = &nd_log.std_error.spinlock;
+ }
+ else {
+ *fpp = nd_log.sources[source].fp;
+ *spinlock = &nd_log.sources[source].spinlock;
+ }
+ break;
- log_lock();
+ case NDLM_STDOUT:
+ output = NDLM_FILE;
+ *fpp = stdout;
+ *spinlock = &nd_log.std_output.spinlock;
+ break;
- // prevent logging too much
- if (error_log_limit(0)) {
- log_unlock();
- return;
+ default:
+ case NDLM_DEFAULT:
+ case NDLM_STDERR:
+ output = NDLM_FILE;
+ *fpp = stderr;
+ *spinlock = &nd_log.std_error.spinlock;
+ break;
+
+ case NDLM_DISABLED:
+ case NDLM_DEVNULL:
+ output = NDLM_DISABLED;
+ *fpp = NULL;
+ *spinlock = NULL;
+ break;
}
- if(collector_log_syslog) {
- va_start( args, fmt );
- vsyslog(LOG_INFO, fmt, args );
- va_end( args );
+ return output;
+}
+
+// ----------------------------------------------------------------------------
+// high level logger
+
+static void nd_logger_log_fields(SPINLOCK *spinlock, FILE *fp, bool limit, ND_LOG_FIELD_PRIORITY priority,
+ ND_LOG_METHOD output, struct nd_log_source *source,
+ struct log_field *fields, size_t fields_max) {
+ if(spinlock)
+ spinlock_lock(spinlock);
+
+ // check the limits
+ if(limit && nd_log_limit_reached(source))
+ goto cleanup;
+
+ if(output == NDLM_JOURNAL) {
+ if(!nd_logger_journal_direct(fields, fields_max) && !nd_logger_journal_libsystemd(fields, fields_max)) {
+ // we can't log to journal, let's log to stderr
+ if(spinlock)
+ spinlock_unlock(spinlock);
+
+ output = NDLM_FILE;
+ spinlock = &nd_log.std_error.spinlock;
+ fp = stderr;
+
+ if(spinlock)
+ spinlock_lock(spinlock);
+ }
}
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH, now_realtime_sec());
+ if(output == NDLM_SYSLOG)
+ nd_logger_syslog(priority, source->format, fields, fields_max);
- va_start( args, fmt );
-#ifdef NETDATA_INTERNAL_CHECKS
- fprintf(fp, "%s: %s INFO : %s : (%04lu@%-20.20s:%-15.15s): ",
- date, program_name, netdata_thread_tag(), line, file, function);
-#else
- fprintf(fp, "%s: %s INFO : %s : ", date, program_name, netdata_thread_tag());
-#endif
- vfprintf(fp, fmt, args );
- va_end( args );
+ if(output == NDLM_FILE)
+ nd_logger_file(fp, source->format, fields, fields_max);
- fputc('\n', fp);
- log_unlock();
+cleanup:
+ if(spinlock)
+ spinlock_unlock(spinlock);
}
-// ----------------------------------------------------------------------------
-// error log
+static void nd_logger_unset_all_thread_fields(void) {
+ size_t fields_max = THREAD_FIELDS_MAX;
+ for(size_t i = 0; i < fields_max ; i++)
+ thread_log_fields[i].entry.set = false;
+}
-#if defined(STRERROR_R_CHAR_P)
-// GLIBC version of strerror_r
-static const char *strerror_result(const char *a, const char *b) { (void)b; return a; }
-#elif defined(HAVE_STRERROR_R)
-// POSIX version of strerror_r
-static const char *strerror_result(int a, const char *b) { (void)a; return b; }
-#elif defined(HAVE_C__GENERIC)
+static void nd_logger_merge_log_stack_to_thread_fields(void) {
+ for(size_t c = 0; c < thread_log_stack_next ;c++) {
+ struct log_stack_entry *lgs = thread_log_stack_base[c];
-// what a trick!
-// http://stackoverflow.com/questions/479207/function-overloading-in-c
-static const char *strerror_result_int(int a, const char *b) { (void)a; return b; }
-static const char *strerror_result_string(const char *a, const char *b) { (void)b; return a; }
+ for(size_t i = 0; lgs[i].id != NDF_STOP ; i++) {
+ if(lgs[i].id >= _NDF_MAX || !lgs[i].set)
+ continue;
-#define strerror_result(a, b) _Generic((a), \
- int: strerror_result_int, \
- char *: strerror_result_string \
- )(a, b)
+ struct log_stack_entry *e = &lgs[i];
+ ND_LOG_STACK_FIELD_TYPE type = lgs[i].type;
-#else
-#error "cannot detect the format of function strerror_r()"
-#endif
+ // do not add empty / unset fields
+ if((type == NDFT_TXT && (!e->txt || !*e->txt)) ||
+ (type == NDFT_BFR && (!e->bfr || !buffer_strlen(e->bfr))) ||
+ (type == NDFT_STR && !e->str) ||
+ (type == NDFT_UUID && !e->uuid) ||
+ (type == NDFT_CALLBACK && !e->cb.formatter) ||
+ type == NDFT_UNSET)
+ continue;
-void error_limit_int(ERROR_LIMIT *erl, const char *prefix, const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt, ... ) {
- FILE *fp = stderror ? stderror : stderr;
+ thread_log_fields[lgs[i].id].entry = *e;
+ }
+ }
+}
- if(erl->sleep_ut)
- sleep_usec(erl->sleep_ut);
+static void nd_logger(const char *file, const char *function, const unsigned long line,
+ ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, bool limit, int saved_errno,
+ const char *fmt, va_list ap) {
- // save a copy of errno - just in case this function generates a new error
- int __errno = errno;
+ SPINLOCK *spinlock;
+ FILE *fp;
+ ND_LOG_METHOD output = nd_logger_select_output(source, &fp, &spinlock);
+ if(output != NDLM_FILE && output != NDLM_JOURNAL && output != NDLM_SYSLOG)
+ return;
- va_list args;
+ // mark all fields as unset
+ nd_logger_unset_all_thread_fields();
- log_lock();
+ // flatten the log stack into the fields
+ nd_logger_merge_log_stack_to_thread_fields();
- erl->count++;
- time_t now = now_boottime_sec();
- if(now - erl->last_logged < erl->log_every) {
- log_unlock();
- return;
+ // set the common fields that are automatically set by the logging subsystem
+
+ if(likely(!thread_log_fields[NDF_INVOCATION_ID].entry.set))
+ thread_log_fields[NDF_INVOCATION_ID].entry = ND_LOG_FIELD_UUID(NDF_INVOCATION_ID, &nd_log.invocation_id);
+
+ if(likely(!thread_log_fields[NDF_LOG_SOURCE].entry.set))
+ thread_log_fields[NDF_LOG_SOURCE].entry = ND_LOG_FIELD_TXT(NDF_LOG_SOURCE, nd_log_id2source(source));
+ else {
+ ND_LOG_SOURCES src = source;
+
+ if(thread_log_fields[NDF_LOG_SOURCE].entry.type == NDFT_TXT)
+ src = nd_log_source2id(thread_log_fields[NDF_LOG_SOURCE].entry.txt, source);
+ else if(thread_log_fields[NDF_LOG_SOURCE].entry.type == NDFT_U64)
+ src = thread_log_fields[NDF_LOG_SOURCE].entry.u64;
+
+ if(src != source && src >= 0 && src < _NDLS_MAX) {
+ source = src;
+ output = nd_logger_select_output(source, &fp, &spinlock);
+ if(output != NDLM_FILE && output != NDLM_JOURNAL && output != NDLM_SYSLOG)
+ return;
+ }
}
- // prevent logging too much
- if (error_log_limit(0)) {
- log_unlock();
- return;
+ if(likely(!thread_log_fields[NDF_SYSLOG_IDENTIFIER].entry.set))
+ thread_log_fields[NDF_SYSLOG_IDENTIFIER].entry = ND_LOG_FIELD_TXT(NDF_SYSLOG_IDENTIFIER, program_name);
+
+ if(likely(!thread_log_fields[NDF_LINE].entry.set)) {
+ thread_log_fields[NDF_LINE].entry = ND_LOG_FIELD_U64(NDF_LINE, line);
+ thread_log_fields[NDF_FILE].entry = ND_LOG_FIELD_TXT(NDF_FILE, file);
+ thread_log_fields[NDF_FUNC].entry = ND_LOG_FIELD_TXT(NDF_FUNC, function);
}
- if(collector_log_syslog) {
- va_start( args, fmt );
- vsyslog(LOG_ERR, fmt, args );
- va_end( args );
+ if(likely(!thread_log_fields[NDF_PRIORITY].entry.set)) {
+ thread_log_fields[NDF_PRIORITY].entry = ND_LOG_FIELD_U64(NDF_PRIORITY, priority);
}
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH, now_realtime_sec());
+ if(likely(!thread_log_fields[NDF_TID].entry.set))
+ thread_log_fields[NDF_TID].entry = ND_LOG_FIELD_U64(NDF_TID, gettid());
- va_start( args, fmt );
-#ifdef NETDATA_INTERNAL_CHECKS
- fprintf(fp, "%s: %s %-5.5s : %s : (%04lu@%-20.20s:%-15.15s): ",
- date, program_name, prefix, netdata_thread_tag(), line, file, function);
-#else
- fprintf(fp, "%s: %s %-5.5s : %s : ", date, program_name, prefix, netdata_thread_tag());
-#endif
- vfprintf(fp, fmt, args );
- va_end( args );
+ char os_threadname[NETDATA_THREAD_NAME_MAX + 1];
+ if(likely(!thread_log_fields[NDF_THREAD_TAG].entry.set)) {
+ const char *thread_tag = netdata_thread_tag();
+ if(!netdata_thread_tag_exists()) {
+ if (!netdata_thread_tag_exists()) {
+ os_thread_get_current_name_np(os_threadname);
+ if ('\0' != os_threadname[0])
+ /* If it is not an empty string replace "MAIN" thread_tag */
+ thread_tag = os_threadname;
+ }
+ }
+ thread_log_fields[NDF_THREAD_TAG].entry = ND_LOG_FIELD_TXT(NDF_THREAD_TAG, thread_tag);
- if(erl->count > 1)
- fprintf(fp, " (similar messages repeated %zu times in the last %llu secs)",
- erl->count, (unsigned long long)(erl->last_logged ? now - erl->last_logged : 0));
+ // TODO: fix the ND_MODULE in logging by setting proper module name in threads
+// if(!thread_log_fields[NDF_MODULE].entry.set)
+// thread_log_fields[NDF_MODULE].entry = ND_LOG_FIELD_CB(NDF_MODULE, thread_tag_to_module, (void *)thread_tag);
+ }
- if(erl->sleep_ut)
- fprintf(fp, " (sleeping for %"PRIu64" microseconds every time this happens)", erl->sleep_ut);
+ if(likely(!thread_log_fields[NDF_TIMESTAMP_REALTIME_USEC].entry.set))
+ thread_log_fields[NDF_TIMESTAMP_REALTIME_USEC].entry = ND_LOG_FIELD_U64(NDF_TIMESTAMP_REALTIME_USEC, now_realtime_usec());
- if(__errno) {
- char buf[1024];
- fprintf(fp,
- " (errno %d, %s)\n", __errno, strerror_result(strerror_r(__errno, buf, 1023), buf));
- errno = 0;
+ if(saved_errno != 0 && !thread_log_fields[NDF_ERRNO].entry.set)
+ thread_log_fields[NDF_ERRNO].entry = ND_LOG_FIELD_I64(NDF_ERRNO, saved_errno);
+
+ CLEAN_BUFFER *wb = NULL;
+ if(fmt && !thread_log_fields[NDF_MESSAGE].entry.set) {
+ wb = buffer_create(1024, NULL);
+ buffer_vsprintf(wb, fmt, ap);
+ thread_log_fields[NDF_MESSAGE].entry = ND_LOG_FIELD_TXT(NDF_MESSAGE, buffer_tostring(wb));
}
- else
- fputc('\n', fp);
- erl->last_logged = now;
- erl->count = 0;
+ nd_logger_log_fields(spinlock, fp, limit, priority, output, &nd_log.sources[source],
+ thread_log_fields, THREAD_FIELDS_MAX);
- log_unlock();
-}
+ if(nd_log.sources[source].pending_msg) {
+ // log a pending message
-void error_int(int is_collector, const char *prefix, const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt, ... ) {
-#if !defined(NETDATA_INTERNAL_CHECKS) && !defined(NETDATA_DEV_MODE)
- if (NETDATA_LOG_LEVEL_ERROR > global_log_severity_level)
- return;
-#endif
+ nd_logger_unset_all_thread_fields();
- // save a copy of errno - just in case this function generates a new error
- int __errno = errno;
- FILE *fp = (is_collector || !stderror) ? stderr : stderror;
+ thread_log_fields[NDF_TIMESTAMP_REALTIME_USEC].entry = (struct log_stack_entry){
+ .set = true,
+ .type = NDFT_U64,
+ .u64 = now_realtime_usec(),
+ };
- va_list args;
+ thread_log_fields[NDF_LOG_SOURCE].entry = (struct log_stack_entry){
+ .set = true,
+ .type = NDFT_TXT,
+ .txt = nd_log_id2source(source),
+ };
- log_lock();
+ thread_log_fields[NDF_SYSLOG_IDENTIFIER].entry = (struct log_stack_entry){
+ .set = true,
+ .type = NDFT_TXT,
+ .txt = program_name,
+ };
- // prevent logging too much
- if (error_log_limit(0)) {
- log_unlock();
- return;
- }
+ thread_log_fields[NDF_MESSAGE].entry = (struct log_stack_entry){
+ .set = true,
+ .type = NDFT_TXT,
+ .txt = nd_log.sources[source].pending_msg,
+ };
+
+ nd_logger_log_fields(spinlock, fp, false, priority, output,
+ &nd_log.sources[source],
+ thread_log_fields, THREAD_FIELDS_MAX);
- if(collector_log_syslog) {
- va_start( args, fmt );
- vsyslog(LOG_ERR, fmt, args );
- va_end( args );
+ freez((void *)nd_log.sources[source].pending_msg);
+ nd_log.sources[source].pending_msg = NULL;
}
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH, now_realtime_sec());
+ errno = 0;
+}
- va_start( args, fmt );
-#ifdef NETDATA_INTERNAL_CHECKS
- fprintf(fp, "%s: %s %-5.5s : %s : (%04lu@%-20.20s:%-15.15s): ",
- date, program_name, prefix, netdata_thread_tag(), line, file, function);
-#else
- fprintf(fp, "%s: %s %-5.5s : %s : ", date, program_name, prefix, netdata_thread_tag());
-#endif
- vfprintf(fp, fmt, args );
- va_end( args );
+static ND_LOG_SOURCES nd_log_validate_source(ND_LOG_SOURCES source) {
+ if(source >= _NDLS_MAX)
+ source = NDLS_DAEMON;
- if(__errno) {
- char buf[1024];
- fprintf(fp,
- " (errno %d, %s)\n", __errno, strerror_result(strerror_r(__errno, buf, 1023), buf));
- errno = 0;
- }
- else
- fputc('\n', fp);
+ if(overwrite_thread_source)
+ source = overwrite_thread_source;
- log_unlock();
-}
+ if(nd_log.overwrite_process_source)
+ source = nd_log.overwrite_process_source;
-#ifdef NETDATA_INTERNAL_CHECKS
-static void crash_netdata(void) {
- // make Netdata core dump
- abort();
+ return source;
}
-#endif
-#ifdef HAVE_BACKTRACE
-#define BT_BUF_SIZE 100
-static void print_call_stack(void) {
- FILE *fp = (!stderror) ? stderr : stderror;
+// ----------------------------------------------------------------------------
+// public API for loggers
- int nptrs;
- void *buffer[BT_BUF_SIZE];
+void netdata_logger(ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file, const char *function, unsigned long line, const char *fmt, ... ) {
+ int saved_errno = errno;
+ source = nd_log_validate_source(source);
- nptrs = backtrace(buffer, BT_BUF_SIZE);
- if(nptrs)
- backtrace_symbols_fd(buffer, nptrs, fileno(fp));
+ if (source != NDLS_DEBUG && priority > nd_log.sources[source].min_priority)
+ return;
+
+ va_list args;
+ va_start(args, fmt);
+ nd_logger(file, function, line, source, priority,
+ source == NDLS_DAEMON || source == NDLS_COLLECTORS,
+ saved_errno, fmt, args);
+ va_end(args);
}
-#endif
-void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
- FILE *fp = stderror ? stderror : stderr;
+void netdata_logger_with_limit(ERROR_LIMIT *erl, ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt, ... ) {
+ int saved_errno = errno;
+ source = nd_log_validate_source(source);
- // save a copy of errno - just in case this function generates a new error
- int __errno = errno;
- va_list args;
- const char *thread_tag;
- char os_threadname[NETDATA_THREAD_NAME_MAX + 1];
+ if (source != NDLS_DEBUG && priority > nd_log.sources[source].min_priority)
+ return;
- if(collector_log_syslog) {
- va_start( args, fmt );
- vsyslog(LOG_CRIT, fmt, args );
- va_end( args );
- }
+ if(erl->sleep_ut)
+ sleep_usec(erl->sleep_ut);
- thread_tag = netdata_thread_tag();
- if (!netdata_thread_tag_exists()) {
- os_thread_get_current_name_np(os_threadname);
- if ('\0' != os_threadname[0]) { /* If it is not an empty string replace "MAIN" thread_tag */
- thread_tag = os_threadname;
- }
+ spinlock_lock(&erl->spinlock);
+
+ erl->count++;
+ time_t now = now_boottime_sec();
+ if(now - erl->last_logged < erl->log_every) {
+ spinlock_unlock(&erl->spinlock);
+ return;
}
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH, now_realtime_sec());
+ spinlock_unlock(&erl->spinlock);
- log_lock();
+ va_list args;
+ va_start(args, fmt);
+ nd_logger(file, function, line, source, priority,
+ source == NDLS_DAEMON || source == NDLS_COLLECTORS,
+ saved_errno, fmt, args);
+ va_end(args);
+ erl->last_logged = now;
+ erl->count = 0;
+}
- va_start( args, fmt );
-#ifdef NETDATA_INTERNAL_CHECKS
- fprintf(fp,
- "%s: %s FATAL : %s : (%04lu@%-20.20s:%-15.15s): ", date, program_name, thread_tag, line, file, function);
-#else
- fprintf(fp, "%s: %s FATAL : %s : ", date, program_name, thread_tag);
-#endif
- vfprintf(fp, fmt, args );
- va_end( args );
+void netdata_logger_fatal( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) {
+ int saved_errno = errno;
+ ND_LOG_SOURCES source = NDLS_DAEMON;
+ source = nd_log_validate_source(source);
- perror(" # ");
- fputc('\n', fp);
+ va_list args;
+ va_start(args, fmt);
+ nd_logger(file, function, line, source, NDLP_ALERT, true, saved_errno, fmt, args);
+ va_end(args);
- log_unlock();
+ char date[LOG_DATE_LENGTH];
+ log_date(date, LOG_DATE_LENGTH, now_realtime_sec());
char action_data[70+1];
- snprintfz(action_data, 70, "%04lu@%-10.10s:%-15.15s/%d", line, file, function, __errno);
+ snprintfz(action_data, 70, "%04lu@%-10.10s:%-15.15s/%d", line, file, function, saved_errno);
char action_result[60+1];
+ char os_threadname[NETDATA_THREAD_NAME_MAX + 1];
+ const char *thread_tag = netdata_thread_tag();
+ if(!netdata_thread_tag_exists()) {
+ if (!netdata_thread_tag_exists()) {
+ os_thread_get_current_name_np(os_threadname);
+ if ('\0' != os_threadname[0])
+ /* If it is not an empty string replace "MAIN" thread_tag */
+ thread_tag = os_threadname;
+ }
+ }
+ if(!thread_tag)
+ thread_tag = "UNKNOWN";
+
const char *tag_to_send = thread_tag;
// anonymize thread names
@@ -1045,129 +2312,120 @@ void fatal_int( const char *file, const char *function, const unsigned long line
send_statistics("FATAL", action_result, action_data);
#ifdef HAVE_BACKTRACE
- print_call_stack();
+ int fd = nd_log.sources[NDLS_DAEMON].fd;
+ if(fd == -1)
+ fd = STDERR_FILENO;
+
+ int nptrs;
+ void *buffer[10000];
+
+ nptrs = backtrace(buffer, sizeof(buffer));
+ if(nptrs)
+ backtrace_symbols_fd(buffer, nptrs, fd);
#endif
#ifdef NETDATA_INTERNAL_CHECKS
- crash_netdata();
+ abort();
#endif
netdata_cleanup_and_exit(1);
}
// ----------------------------------------------------------------------------
-// access log
-
-void netdata_log_access( const char *fmt, ... ) {
- va_list args;
-
- if(access_log_syslog) {
- va_start( args, fmt );
- vsyslog(LOG_INFO, fmt, args );
- va_end( args );
- }
-
- if(stdaccess) {
- static netdata_mutex_t access_mutex = NETDATA_MUTEX_INITIALIZER;
-
- if(web_server_is_multithreaded)
- netdata_mutex_lock(&access_mutex);
+// log limits
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH, now_realtime_sec());
- fprintf(stdaccess, "%s: ", date);
+void nd_log_limits_reset(void) {
+ usec_t now_ut = now_monotonic_usec();
- va_start( args, fmt );
- vfprintf( stdaccess, fmt, args );
- va_end( args );
- fputc('\n', stdaccess);
+ spinlock_lock(&nd_log.std_output.spinlock);
+ spinlock_lock(&nd_log.std_error.spinlock);
- if(web_server_is_multithreaded)
- netdata_mutex_unlock(&access_mutex);
+ for(size_t i = 0; i < _NDLS_MAX ;i++) {
+ spinlock_lock(&nd_log.sources[i].spinlock);
+ nd_log.sources[i].limits.prevented = 0;
+ nd_log.sources[i].limits.counter = 0;
+ nd_log.sources[i].limits.started_monotonic_ut = now_ut;
+ nd_log.sources[i].limits.logs_per_period = nd_log.sources[i].limits.logs_per_period_backup;
+ spinlock_unlock(&nd_log.sources[i].spinlock);
}
-}
-
-// ----------------------------------------------------------------------------
-// health log
-void netdata_log_health( const char *fmt, ... ) {
- va_list args;
+ spinlock_unlock(&nd_log.std_output.spinlock);
+ spinlock_unlock(&nd_log.std_error.spinlock);
+}
- if(health_log_syslog) {
- va_start( args, fmt );
- vsyslog(LOG_INFO, fmt, args );
- va_end( args );
+void nd_log_limits_unlimited(void) {
+ nd_log_limits_reset();
+ for(size_t i = 0; i < _NDLS_MAX ;i++) {
+ nd_log.sources[i].limits.logs_per_period = 0;
}
+}
- if(stdhealth) {
- static netdata_mutex_t health_mutex = NETDATA_MUTEX_INITIALIZER;
+static bool nd_log_limit_reached(struct nd_log_source *source) {
+ if(source->limits.throttle_period == 0 || source->limits.logs_per_period == 0)
+ return false;
- if(web_server_is_multithreaded)
- netdata_mutex_lock(&health_mutex);
+ usec_t now_ut = now_monotonic_usec();
+ if(!source->limits.started_monotonic_ut)
+ source->limits.started_monotonic_ut = now_ut;
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH, now_realtime_sec());
- fprintf(stdhealth, "%s: ", date);
+ source->limits.counter++;
- va_start( args, fmt );
- vfprintf( stdhealth, fmt, args );
- va_end( args );
- fputc('\n', stdhealth);
+ if(now_ut - source->limits.started_monotonic_ut > (usec_t)source->limits.throttle_period) {
+ if(source->limits.prevented) {
+ BUFFER *wb = buffer_create(1024, NULL);
+ buffer_sprintf(wb,
+ "LOG FLOOD PROTECTION: resuming logging "
+ "(prevented %"PRIu32" logs in the last %"PRIu32" seconds).",
+ source->limits.prevented,
+ source->limits.throttle_period);
- if(web_server_is_multithreaded)
- netdata_mutex_unlock(&health_mutex);
- }
-}
+ if(source->pending_msg)
+ freez((void *)source->pending_msg);
-#ifdef ENABLE_ACLK
-void log_aclk_message_bin( const char *data, const size_t data_len, int tx, const char *mqtt_topic, const char *message_name) {
- if (aclklog) {
- static netdata_mutex_t aclklog_mutex = NETDATA_MUTEX_INITIALIZER;
- netdata_mutex_lock(&aclklog_mutex);
+ source->pending_msg = strdupz(buffer_tostring(wb));
- char date[LOG_DATE_LENGTH];
- log_date(date, LOG_DATE_LENGTH, now_realtime_sec());
- fprintf(aclklog, "%s: %s Msg:\"%s\", MQTT-topic:\"%s\": ", date, tx ? "OUTGOING" : "INCOMING", message_name, mqtt_topic);
-
- fwrite(data, data_len, 1, aclklog);
+ buffer_free(wb);
+ }
- fputc('\n', aclklog);
+ // restart the period accounting
+ source->limits.started_monotonic_ut = now_ut;
+ source->limits.counter = 1;
+ source->limits.prevented = 0;
- netdata_mutex_unlock(&aclklog_mutex);
+ // log this error
+ return false;
}
-}
-#endif
-void log_set_global_severity_level(netdata_log_level_t value)
-{
- global_log_severity_level = value;
-}
+ if(source->limits.counter > source->limits.logs_per_period) {
+ if(!source->limits.prevented) {
+ BUFFER *wb = buffer_create(1024, NULL);
+ buffer_sprintf(wb,
+ "LOG FLOOD PROTECTION: too many logs (%"PRIu32" logs in %"PRId64" seconds, threshold is set to %"PRIu32" logs "
+ "in %"PRIu32" seconds). Preventing more logs from process '%s' for %"PRId64" seconds.",
+ source->limits.counter,
+ (int64_t)((now_ut - source->limits.started_monotonic_ut) / USEC_PER_SEC),
+ source->limits.logs_per_period,
+ source->limits.throttle_period,
+ program_name,
+ (int64_t)((source->limits.started_monotonic_ut + (source->limits.throttle_period * USEC_PER_SEC) - now_ut)) / USEC_PER_SEC);
+
+ if(source->pending_msg)
+ freez((void *)source->pending_msg);
+
+ source->pending_msg = strdupz(buffer_tostring(wb));
+
+ buffer_free(wb);
+ }
-netdata_log_level_t log_severity_string_to_severity_level(char *level)
-{
- if (!strcmp(level, NETDATA_LOG_LEVEL_INFO_STR))
- return NETDATA_LOG_LEVEL_INFO;
- if (!strcmp(level, NETDATA_LOG_LEVEL_ERROR_STR) || !strcmp(level, NETDATA_LOG_LEVEL_ERROR_SHORT_STR))
- return NETDATA_LOG_LEVEL_ERROR;
+ source->limits.prevented++;
- return NETDATA_LOG_LEVEL_INFO;
-}
-
-char *log_severity_level_to_severity_string(netdata_log_level_t level)
-{
- switch (level) {
- case NETDATA_LOG_LEVEL_ERROR:
- return NETDATA_LOG_LEVEL_ERROR_STR;
- case NETDATA_LOG_LEVEL_INFO:
- default:
- return NETDATA_LOG_LEVEL_INFO_STR;
+ // prevent logging this error
+#ifdef NETDATA_INTERNAL_CHECKS
+ return false;
+#else
+ return true;
+#endif
}
-}
-void log_set_global_severity_for_external_plugins() {
- char *s = getenv("NETDATA_LOG_SEVERITY_LEVEL");
- if (!s)
- return;
- netdata_log_level_t level = log_severity_string_to_severity_level(s);
- log_set_global_severity_level(level);
+ return false;
}
diff --git a/libnetdata/log/log.h b/libnetdata/log/log.h
index cf0865cf..ad634693 100644
--- a/libnetdata/log/log.h
+++ b/libnetdata/log/log.h
@@ -9,6 +9,184 @@ extern "C" {
#include "../libnetdata.h"
+#define ND_LOG_DEFAULT_THROTTLE_LOGS 1000
+#define ND_LOG_DEFAULT_THROTTLE_PERIOD 60
+
+typedef enum __attribute__((__packed__)) {
+ NDLS_UNSET = 0, // internal use only
+ NDLS_ACCESS, // access.log
+ NDLS_ACLK, // aclk.log
+ NDLS_COLLECTORS, // collectors.log
+ NDLS_DAEMON, // error.log
+ NDLS_HEALTH, // health.log
+ NDLS_DEBUG, // debug.log
+
+ // terminator
+ _NDLS_MAX,
+} ND_LOG_SOURCES;
+
+typedef enum __attribute__((__packed__)) {
+ NDLP_EMERG = LOG_EMERG,
+ NDLP_ALERT = LOG_ALERT,
+ NDLP_CRIT = LOG_CRIT,
+ NDLP_ERR = LOG_ERR,
+ NDLP_WARNING = LOG_WARNING,
+ NDLP_NOTICE = LOG_NOTICE,
+ NDLP_INFO = LOG_INFO,
+ NDLP_DEBUG = LOG_DEBUG,
+} ND_LOG_FIELD_PRIORITY;
+
+typedef enum __attribute__((__packed__)) {
+ // KEEP THESE IN THE SAME ORDER AS in thread_log_fields (log.c)
+ // so that it easy to audit for missing fields
+
+ NDF_STOP = 0,
+ NDF_TIMESTAMP_REALTIME_USEC, // the timestamp of the log message - added automatically
+ NDF_SYSLOG_IDENTIFIER, // the syslog identifier of the application - added automatically
+ NDF_LOG_SOURCE, // DAEMON, COLLECTORS, HEALTH, ACCESS, ACLK - set at the log call
+ NDF_PRIORITY, // the syslog priority (severity) - set at the log call
+ NDF_ERRNO, // the ERRNO at the time of the log call - added automatically
+ NDF_INVOCATION_ID, // the INVOCATION_ID of Netdata - added automatically
+ NDF_LINE, // the source code file line number - added automatically
+ NDF_FILE, // the source code filename - added automatically
+ NDF_FUNC, // the source code function - added automatically
+ NDF_TID, // the thread ID of the thread logging - added automatically
+ NDF_THREAD_TAG, // the thread tag of the thread logging - added automatically
+ NDF_MESSAGE_ID, // for specific events
+ NDF_MODULE, // for internal plugin module, all other get the NDF_THREAD_TAG
+
+ NDF_NIDL_NODE, // the node / rrdhost currently being worked
+ NDF_NIDL_INSTANCE, // the instance / rrdset currently being worked
+ NDF_NIDL_CONTEXT, // the context of the instance currently being worked
+ NDF_NIDL_DIMENSION, // the dimension / rrddim currently being worked
+
+ // web server, aclk and stream receiver
+ NDF_SRC_TRANSPORT, // the transport we received the request, one of: http, https, pluginsd
+
+ // web server and stream receiver
+ NDF_SRC_IP, // the streaming / web server source IP
+ NDF_SRC_PORT, // the streaming / web server source Port
+ NDF_SRC_CAPABILITIES, // the stream receiver capabilities
+
+ // stream sender (established links)
+ NDF_DST_TRANSPORT, // the transport we send the request, one of: http, https
+ NDF_DST_IP, // the destination streaming IP
+ NDF_DST_PORT, // the destination streaming Port
+ NDF_DST_CAPABILITIES, // the destination streaming capabilities
+
+ // web server, aclk and stream receiver
+ NDF_REQUEST_METHOD, // for http like requests, the http request method
+ NDF_RESPONSE_CODE, // for http like requests, the http response code, otherwise a status string
+
+ // web server (all), aclk (queries)
+ NDF_CONNECTION_ID, // the web server connection ID
+ NDF_TRANSACTION_ID, // the web server and API transaction ID
+ NDF_RESPONSE_SENT_BYTES, // for http like requests, the response bytes
+ NDF_RESPONSE_SIZE_BYTES, // for http like requests, the uncompressed response size
+ NDF_RESPONSE_PREPARATION_TIME_USEC, // for http like requests, the preparation time
+ NDF_RESPONSE_SENT_TIME_USEC, // for http like requests, the time to send the response back
+ NDF_RESPONSE_TOTAL_TIME_USEC, // for http like requests, the total time to complete the response
+
+ // health alerts
+ NDF_ALERT_ID,
+ NDF_ALERT_UNIQUE_ID,
+ NDF_ALERT_EVENT_ID,
+ NDF_ALERT_TRANSITION_ID,
+ NDF_ALERT_CONFIG_HASH,
+ NDF_ALERT_NAME,
+ NDF_ALERT_CLASS,
+ NDF_ALERT_COMPONENT,
+ NDF_ALERT_TYPE,
+ NDF_ALERT_EXEC,
+ NDF_ALERT_RECIPIENT,
+ NDF_ALERT_DURATION,
+ NDF_ALERT_VALUE,
+ NDF_ALERT_VALUE_OLD,
+ NDF_ALERT_STATUS,
+ NDF_ALERT_STATUS_OLD,
+ NDF_ALERT_SOURCE,
+ NDF_ALERT_UNITS,
+ NDF_ALERT_SUMMARY,
+ NDF_ALERT_INFO,
+ NDF_ALERT_NOTIFICATION_REALTIME_USEC,
+ // NDF_ALERT_FLAGS,
+
+ // put new items here
+ // leave the request URL and the message last
+
+ NDF_REQUEST, // the request we are currently working on
+ NDF_MESSAGE, // the log message, if any
+
+ // terminator
+ _NDF_MAX,
+} ND_LOG_FIELD_ID;
+
+typedef enum __attribute__((__packed__)) {
+ NDFT_UNSET = 0,
+ NDFT_TXT,
+ NDFT_STR,
+ NDFT_BFR,
+ NDFT_U64,
+ NDFT_I64,
+ NDFT_DBL,
+ NDFT_UUID,
+ NDFT_CALLBACK,
+} ND_LOG_STACK_FIELD_TYPE;
+
+void nd_log_set_user_settings(ND_LOG_SOURCES source, const char *setting);
+void nd_log_set_facility(const char *facility);
+void nd_log_set_priority_level(const char *setting);
+void nd_log_initialize(void);
+void nd_log_reopen_log_files(void);
+void chown_open_file(int fd, uid_t uid, gid_t gid);
+void nd_log_chown_log_files(uid_t uid, gid_t gid);
+void nd_log_set_flood_protection(size_t logs, time_t period);
+void nd_log_initialize_for_external_plugins(const char *name);
+void nd_log_set_thread_source(ND_LOG_SOURCES source);
+bool nd_log_journal_socket_available(void);
+ND_LOG_FIELD_ID nd_log_field_id_by_name(const char *field, size_t len);
+int nd_log_priority2id(const char *priority);
+const char *nd_log_id2priority(ND_LOG_FIELD_PRIORITY priority);
+const char *nd_log_method_for_external_plugins(const char *s);
+
+int nd_log_health_fd(void);
+typedef bool (*log_formatter_callback_t)(BUFFER *wb, void *data);
+
+struct log_stack_entry {
+ ND_LOG_FIELD_ID id;
+ ND_LOG_STACK_FIELD_TYPE type;
+ bool set;
+ union {
+ const char *txt;
+ struct netdata_string *str;
+ BUFFER *bfr;
+ uint64_t u64;
+ int64_t i64;
+ double dbl;
+ const uuid_t *uuid;
+ struct {
+ log_formatter_callback_t formatter;
+ void *formatter_data;
+ } cb;
+ };
+};
+
+#define ND_LOG_STACK _cleanup_(log_stack_pop) struct log_stack_entry
+#define ND_LOG_STACK_PUSH(lgs) log_stack_push(lgs)
+
+#define ND_LOG_FIELD_TXT(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_TXT, .txt = (value), .set = true, }
+#define ND_LOG_FIELD_STR(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_STR, .str = (value), .set = true, }
+#define ND_LOG_FIELD_BFR(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_BFR, .bfr = (value), .set = true, }
+#define ND_LOG_FIELD_U64(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_U64, .u64 = (value), .set = true, }
+#define ND_LOG_FIELD_I64(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_I64, .i64 = (value), .set = true, }
+#define ND_LOG_FIELD_DBL(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_DBL, .dbl = (value), .set = true, }
+#define ND_LOG_FIELD_CB(field, func, data) (struct log_stack_entry){ .id = (field), .type = NDFT_CALLBACK, .cb = { .formatter = (func), .formatter_data = (data) }, .set = true, }
+#define ND_LOG_FIELD_UUID(field, value) (struct log_stack_entry){ .id = (field), .type = NDFT_UUID, .uuid = (value), .set = true, }
+#define ND_LOG_FIELD_END() (struct log_stack_entry){ .id = NDF_STOP, .type = NDFT_UNSET, .set = false, }
+
+void log_stack_pop(void *ptr);
+void log_stack_push(struct log_stack_entry *lgs);
+
#define D_WEB_BUFFER 0x0000000000000001
#define D_WEB_CLIENT 0x0000000000000002
#define D_LISTENER 0x0000000000000004
@@ -46,114 +224,75 @@ extern "C" {
#define D_REPLICATION 0x0000002000000000
#define D_SYSTEM 0x8000000000000000
-extern int web_server_is_multithreaded;
-
extern uint64_t debug_flags;
extern const char *program_name;
-extern int stdaccess_fd;
-extern FILE *stdaccess;
+#ifdef ENABLE_ACLK
+extern int aclklog_enabled;
+#endif
-extern int stdhealth_fd;
-extern FILE *stdhealth;
+#define LOG_DATE_LENGTH 26
+void log_date(char *buffer, size_t len, time_t now);
-extern int stdcollector_fd;
-extern FILE *stderror;
+static inline void debug_dummy(void) {}
-extern const char *stdaccess_filename;
-extern const char *stderr_filename;
-extern const char *stdout_filename;
-extern const char *stdhealth_filename;
-extern const char *stdcollector_filename;
-extern const char *facility_log;
+void nd_log_limits_reset(void);
+void nd_log_limits_unlimited(void);
-#ifdef ENABLE_ACLK
-extern const char *aclklog_filename;
-extern int aclklog_fd;
-extern FILE *aclklog;
-extern int aclklog_enabled;
+#define NDLP_INFO_STR "info"
+
+#ifdef NETDATA_INTERNAL_CHECKS
+#define netdata_log_debug(type, args...) do { if(unlikely(debug_flags & type)) netdata_logger(NDLS_DEBUG, NDLP_DEBUG, __FILE__, __FUNCTION__, __LINE__, ##args); } while(0)
+#define internal_error(condition, args...) do { if(unlikely(condition)) netdata_logger(NDLS_DAEMON, NDLP_DEBUG, __FILE__, __FUNCTION__, __LINE__, ##args); } while(0)
+#define internal_fatal(condition, args...) do { if(unlikely(condition)) netdata_logger_fatal(__FILE__, __FUNCTION__, __LINE__, ##args); } while(0)
+#else
+#define netdata_log_debug(type, args...) debug_dummy()
+#define internal_error(args...) debug_dummy()
+#define internal_fatal(args...) debug_dummy()
#endif
-extern int access_log_syslog;
-extern int error_log_syslog;
-extern int output_log_syslog;
-extern int health_log_syslog;
+#define fatal(args...) netdata_logger_fatal(__FILE__, __FUNCTION__, __LINE__, ##args)
+#define fatal_assert(expr) ((expr) ? (void)(0) : netdata_logger_fatal(__FILE__, __FUNCTION__, __LINE__, "Assertion `%s' failed", #expr))
-extern time_t error_log_throttle_period;
-extern unsigned long error_log_errors_per_period, error_log_errors_per_period_backup;
-int error_log_limit(int reset);
+// ----------------------------------------------------------------------------
+// normal logging
-void open_all_log_files();
-void reopen_all_log_files();
+void netdata_logger(ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file, const char *function, unsigned long line, const char *fmt, ... ) PRINTFLIKE(6, 7);
+#define nd_log(NDLS, NDLP, args...) netdata_logger(NDLS, NDLP, __FILE__, __FUNCTION__, __LINE__, ##args)
+#define nd_log_daemon(NDLP, args...) netdata_logger(NDLS_DAEMON, NDLP, __FILE__, __FUNCTION__, __LINE__, ##args)
+#define nd_log_collector(NDLP, args...) netdata_logger(NDLS_COLLECTORS, NDLP, __FILE__, __FUNCTION__, __LINE__, ##args)
-#define LOG_DATE_LENGTH 26
-void log_date(char *buffer, size_t len, time_t now);
+#define netdata_log_info(args...) netdata_logger(NDLS_DAEMON, NDLP_INFO, __FILE__, __FUNCTION__, __LINE__, ##args)
+#define netdata_log_error(args...) netdata_logger(NDLS_DAEMON, NDLP_ERR, __FILE__, __FUNCTION__, __LINE__, ##args)
+#define collector_info(args...) netdata_logger(NDLS_COLLECTORS, NDLP_INFO, __FILE__, __FUNCTION__, __LINE__, ##args)
+#define collector_error(args...) netdata_logger(NDLS_COLLECTORS, NDLP_ERR, __FILE__, __FUNCTION__, __LINE__, ##args)
-static inline void debug_dummy(void) {}
+#define log_aclk_message_bin(__data, __data_len, __tx, __mqtt_topic, __message_name) \
+ nd_log(NDLS_ACLK, NDLP_INFO, \
+ "direction:%s message:'%s' topic:'%s' json:'%.*s'", \
+ (__tx) ? "OUTGOING" : "INCOMING", __message_name, __mqtt_topic, (int)(__data_len), __data)
-void error_log_limit_reset(void);
-void error_log_limit_unlimited(void);
+// ----------------------------------------------------------------------------
+// logging with limits
typedef struct error_with_limit {
+ SPINLOCK spinlock;
time_t log_every;
size_t count;
time_t last_logged;
usec_t sleep_ut;
} ERROR_LIMIT;
-typedef enum netdata_log_level {
- NETDATA_LOG_LEVEL_ERROR,
- NETDATA_LOG_LEVEL_INFO,
-
- NETDATA_LOG_LEVEL_END
-} netdata_log_level_t;
+#define nd_log_limit_static_global_var(var, log_every_secs, sleep_usecs) static ERROR_LIMIT var = { .last_logged = 0, .count = 0, .log_every = (log_every_secs), .sleep_ut = (sleep_usecs) }
+#define nd_log_limit_static_thread_var(var, log_every_secs, sleep_usecs) static __thread ERROR_LIMIT var = { .last_logged = 0, .count = 0, .log_every = (log_every_secs), .sleep_ut = (sleep_usecs) }
+void netdata_logger_with_limit(ERROR_LIMIT *erl, ND_LOG_SOURCES source, ND_LOG_FIELD_PRIORITY priority, const char *file, const char *function, unsigned long line, const char *fmt, ... ) PRINTFLIKE(7, 8);;
+#define nd_log_limit(erl, NDLS, NDLP, args...) netdata_logger_with_limit(erl, NDLS, NDLP, __FILE__, __FUNCTION__, __LINE__, ##args)
-#define NETDATA_LOG_LEVEL_INFO_STR "info"
-#define NETDATA_LOG_LEVEL_ERROR_STR "error"
-#define NETDATA_LOG_LEVEL_ERROR_SHORT_STR "err"
-
-extern netdata_log_level_t global_log_severity_level;
-netdata_log_level_t log_severity_string_to_severity_level(char *level);
-char *log_severity_level_to_severity_string(netdata_log_level_t level);
-void log_set_global_severity_level(netdata_log_level_t value);
-void log_set_global_severity_for_external_plugins();
-
-#define error_limit_static_global_var(var, log_every_secs, sleep_usecs) static ERROR_LIMIT var = { .last_logged = 0, .count = 0, .log_every = (log_every_secs), .sleep_ut = (sleep_usecs) }
-#define error_limit_static_thread_var(var, log_every_secs, sleep_usecs) static __thread ERROR_LIMIT var = { .last_logged = 0, .count = 0, .log_every = (log_every_secs), .sleep_ut = (sleep_usecs) }
-
-#ifdef NETDATA_INTERNAL_CHECKS
-#define netdata_log_debug(type, args...) do { if(unlikely(debug_flags & type)) debug_int(__FILE__, __FUNCTION__, __LINE__, ##args); } while(0)
-#define internal_error(condition, args...) do { if(unlikely(condition)) error_int(0, "IERR", __FILE__, __FUNCTION__, __LINE__, ##args); } while(0)
-#define internal_fatal(condition, args...) do { if(unlikely(condition)) fatal_int(__FILE__, __FUNCTION__, __LINE__, ##args); } while(0)
-#else
-#define netdata_log_debug(type, args...) debug_dummy()
-#define internal_error(args...) debug_dummy()
-#define internal_fatal(args...) debug_dummy()
-#endif
-
-#define netdata_log_info(args...) info_int(0, __FILE__, __FUNCTION__, __LINE__, ##args)
-#define collector_info(args...) info_int(1, __FILE__, __FUNCTION__, __LINE__, ##args)
-#define infoerr(args...) error_int(0, "INFO", __FILE__, __FUNCTION__, __LINE__, ##args)
-#define netdata_log_error(args...) error_int(0, "ERROR", __FILE__, __FUNCTION__, __LINE__, ##args)
-#define collector_infoerr(args...) error_int(1, "INFO", __FILE__, __FUNCTION__, __LINE__, ##args)
-#define collector_error(args...) error_int(1, "ERROR", __FILE__, __FUNCTION__, __LINE__, ##args)
-#define error_limit(erl, args...) error_limit_int(erl, "ERROR", __FILE__, __FUNCTION__, __LINE__, ##args)
-#define fatal(args...) fatal_int(__FILE__, __FUNCTION__, __LINE__, ##args)
-#define fatal_assert(expr) ((expr) ? (void)(0) : fatal_int(__FILE__, __FUNCTION__, __LINE__, "Assertion `%s' failed", #expr))
+// ----------------------------------------------------------------------------
void send_statistics(const char *action, const char *action_result, const char *action_data);
-void debug_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(4, 5);
-void info_int( int is_collector, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(5, 6);
-void error_int( int is_collector, const char *prefix, const char *file, const char *function, const unsigned long line, const char *fmt, ... ) PRINTFLIKE(6, 7);
-void error_limit_int(ERROR_LIMIT *erl, const char *prefix, const char *file __maybe_unused, const char *function __maybe_unused, unsigned long line __maybe_unused, const char *fmt, ... ) PRINTFLIKE(6, 7);;
-void fatal_int( const char *file, const char *function, const unsigned long line, const char *fmt, ... ) NORETURN PRINTFLIKE(4, 5);
-void netdata_log_access( const char *fmt, ... ) PRINTFLIKE(1, 2);
-void netdata_log_health( const char *fmt, ... ) PRINTFLIKE(1, 2);
-
-#ifdef ENABLE_ACLK
-void log_aclk_message_bin( const char *data, const size_t data_len, int tx, const char *mqtt_topic, const char *message_name);
-#endif
+void netdata_logger_fatal( const char *file, const char *function, unsigned long line, const char *fmt, ... ) NORETURN PRINTFLIKE(4, 5);
# ifdef __cplusplus
}
diff --git a/libnetdata/log/systemd-cat-native.c b/libnetdata/log/systemd-cat-native.c
new file mode 100644
index 00000000..de6211cc
--- /dev/null
+++ b/libnetdata/log/systemd-cat-native.c
@@ -0,0 +1,820 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "systemd-cat-native.h"
+#include "../required_dummies.h"
+
+#ifdef __FreeBSD__
+#include <sys/endian.h>
+#endif
+
+#ifdef __APPLE__
+#include <machine/endian.h>
+#endif
+
+static void log_message_to_stderr(BUFFER *msg) {
+ CLEAN_BUFFER *tmp = buffer_create(0, NULL);
+
+ for(size_t i = 0; i < msg->len ;i++) {
+ if(isprint(msg->buffer[i]))
+ buffer_putc(tmp, msg->buffer[i]);
+ else {
+ buffer_putc(tmp, '[');
+ buffer_print_uint64_hex(tmp, msg->buffer[i]);
+ buffer_putc(tmp, ']');
+ }
+ }
+
+ fprintf(stderr, "SENDING: %s\n", buffer_tostring(tmp));
+}
+
+static inline buffered_reader_ret_t get_next_line(struct buffered_reader *reader, BUFFER *line, int timeout_ms) {
+ while(true) {
+ if(unlikely(!buffered_reader_next_line(reader, line))) {
+ buffered_reader_ret_t ret = buffered_reader_read_timeout(reader, STDIN_FILENO, timeout_ms, false);
+ if(unlikely(ret != BUFFERED_READER_READ_OK))
+ return ret;
+
+ continue;
+ }
+ else {
+ // make sure the buffer is NULL terminated
+ line->buffer[line->len] = '\0';
+
+ // remove the trailing newlines
+ while(line->len && line->buffer[line->len - 1] == '\n')
+ line->buffer[--line->len] = '\0';
+
+ return BUFFERED_READER_READ_OK;
+ }
+ }
+}
+
+static inline size_t copy_replacing_newlines(char *dst, size_t dst_len, const char *src, size_t src_len, const char *newline) {
+ if (!dst || !src) return 0;
+
+ const char *current_src = src;
+ const char *src_end = src + src_len; // Pointer to the end of src
+ char *current_dst = dst;
+ size_t remaining_dst_len = dst_len;
+ size_t newline_len = newline && *newline ? strlen(newline) : 0;
+
+ size_t bytes_copied = 0; // To track the number of bytes copied
+
+ while (remaining_dst_len > 1 && current_src < src_end) {
+ if (newline_len > 0) {
+ const char *found = strstr(current_src, newline);
+ if (found && found < src_end) {
+ size_t copy_len = found - current_src;
+ if (copy_len >= remaining_dst_len) copy_len = remaining_dst_len - 1;
+
+ memcpy(current_dst, current_src, copy_len);
+ current_dst += copy_len;
+ *current_dst++ = '\n';
+ remaining_dst_len -= (copy_len + 1);
+ bytes_copied += copy_len + 1; // +1 for the newline character
+ current_src = found + newline_len;
+ continue;
+ }
+ }
+
+ // Copy the remaining part of src to dst
+ size_t copy_len = src_end - current_src;
+ if (copy_len >= remaining_dst_len) copy_len = remaining_dst_len - 1;
+
+ memcpy(current_dst, current_src, copy_len);
+ current_dst += copy_len;
+ remaining_dst_len -= copy_len;
+ bytes_copied += copy_len;
+ break;
+ }
+
+ // Ensure the string is null-terminated
+ *current_dst = '\0';
+
+ return bytes_copied;
+}
+
+static inline void buffer_memcat_replacing_newlines(BUFFER *wb, const char *src, size_t src_len, const char *newline) {
+ if(!src) return;
+
+ const char *equal;
+ if(!newline || !*newline || !strstr(src, newline) || !(equal = strchr(src, '='))) {
+ buffer_memcat(wb, src, src_len);
+ buffer_putc(wb, '\n');
+ return;
+ }
+
+ size_t key_len = equal - src;
+ buffer_memcat(wb, src, key_len);
+ buffer_putc(wb, '\n');
+
+ char *length_ptr = &wb->buffer[wb->len];
+ uint64_t le_size = 0;
+ buffer_memcat(wb, &le_size, sizeof(le_size));
+
+ const char *value = ++equal;
+ size_t value_len = src_len - key_len - 1;
+ buffer_need_bytes(wb, value_len + 1);
+ size_t size = copy_replacing_newlines(&wb->buffer[wb->len], value_len + 1, value, value_len, newline);
+ wb->len += size;
+ buffer_putc(wb, '\n');
+
+ le_size = htole64(size);
+ memcpy(length_ptr, &le_size, sizeof(le_size));
+}
+
+// ----------------------------------------------------------------------------
+// log to a systemd-journal-remote
+
+#ifdef HAVE_CURL
+#include <curl/curl.h>
+
+#ifndef HOST_NAME_MAX
+#define HOST_NAME_MAX 256
+#endif
+
+char global_hostname[HOST_NAME_MAX] = "";
+char global_boot_id[UUID_COMPACT_STR_LEN] = "";
+char global_machine_id[UUID_COMPACT_STR_LEN] = "";
+char global_stream_id[UUID_COMPACT_STR_LEN] = "";
+char global_namespace[1024] = "";
+char global_systemd_invocation_id[1024] = "";
+#define BOOT_ID_PATH "/proc/sys/kernel/random/boot_id"
+#define MACHINE_ID_PATH "/etc/machine-id"
+
+#define DEFAULT_PRIVATE_KEY "/etc/ssl/private/journal-upload.pem"
+#define DEFAULT_PUBLIC_KEY "/etc/ssl/certs/journal-upload.pem"
+#define DEFAULT_CA_CERT "/etc/ssl/ca/trusted.pem"
+
+struct upload_data {
+ char *data;
+ size_t length;
+};
+
+static size_t systemd_journal_remote_read_callback(void *ptr, size_t size, size_t nmemb, void *userp) {
+ struct upload_data *upload = (struct upload_data *)userp;
+ size_t buffer_size = size * nmemb;
+
+ if (upload->length) {
+ size_t copy_size = upload->length < buffer_size ? upload->length : buffer_size;
+ memcpy(ptr, upload->data, copy_size);
+ upload->data += copy_size;
+ upload->length -= copy_size;
+ return copy_size;
+ }
+
+ return 0;
+}
+
+CURL* initialize_connection_to_systemd_journal_remote(const char* url, const char* private_key, const char* public_key, const char* ca_cert, struct curl_slist **headers) {
+ CURL *curl = curl_easy_init();
+ if (!curl) {
+ fprintf(stderr, "Failed to initialize curl\n");
+ return NULL;
+ }
+
+ *headers = curl_slist_append(*headers, "Content-Type: application/vnd.fdo.journal");
+ *headers = curl_slist_append(*headers, "Transfer-Encoding: chunked");
+ curl_easy_setopt(curl, CURLOPT_HTTPHEADER, *headers);
+ curl_easy_setopt(curl, CURLOPT_URL, url);
+ curl_easy_setopt(curl, CURLOPT_POST, 1L);
+ curl_easy_setopt(curl, CURLOPT_READFUNCTION, systemd_journal_remote_read_callback);
+
+ if (strncmp(url, "https://", 8) == 0) {
+ if (private_key) curl_easy_setopt(curl, CURLOPT_SSLKEY, private_key);
+ if (public_key) curl_easy_setopt(curl, CURLOPT_SSLCERT, public_key);
+
+ if (strcmp(ca_cert, "all") != 0) {
+ curl_easy_setopt(curl, CURLOPT_CAINFO, ca_cert);
+ } else {
+ curl_easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0L);
+ }
+ }
+ // curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L); // Remove for less verbose output
+
+ return curl;
+}
+
+static void journal_remote_complete_event(BUFFER *msg, usec_t *monotonic_ut) {
+ usec_t ut = now_monotonic_usec();
+
+ if(monotonic_ut)
+ *monotonic_ut = ut;
+
+ buffer_sprintf(msg,
+ ""
+ "__REALTIME_TIMESTAMP=%llu\n"
+ "__MONOTONIC_TIMESTAMP=%llu\n"
+ "_MACHINE_ID=%s\n"
+ "_BOOT_ID=%s\n"
+ "_HOSTNAME=%s\n"
+ "_TRANSPORT=stdout\n"
+ "_LINE_BREAK=nul\n"
+ "_STREAM_ID=%s\n"
+ "_RUNTIME_SCOPE=system\n"
+ "%s%s\n"
+ , now_realtime_usec()
+ , ut
+ , global_machine_id
+ , global_boot_id
+ , global_hostname
+ , global_stream_id
+ , global_namespace
+ , global_systemd_invocation_id
+ );
+}
+
+static CURLcode journal_remote_send_buffer(CURL* curl, BUFFER *msg) {
+
+ // log_message_to_stderr(msg);
+
+ struct upload_data upload = {0};
+
+ if (!curl || !buffer_strlen(msg))
+ return CURLE_FAILED_INIT;
+
+ upload.data = (char *) buffer_tostring(msg);
+ upload.length = buffer_strlen(msg);
+
+ curl_easy_setopt(curl, CURLOPT_READDATA, &upload);
+ curl_easy_setopt(curl, CURLOPT_INFILESIZE_LARGE, (curl_off_t)upload.length);
+
+ return curl_easy_perform(curl);
+}
+
+typedef enum {
+ LOG_TO_JOURNAL_REMOTE_BAD_PARAMS = -1,
+ LOG_TO_JOURNAL_REMOTE_CANNOT_INITIALIZE = -2,
+ LOG_TO_JOURNAL_REMOTE_CANNOT_SEND = -3,
+ LOG_TO_JOURNAL_REMOTE_CANNOT_READ = -4,
+} log_to_journal_remote_ret_t;
+
+static log_to_journal_remote_ret_t log_input_to_journal_remote(const char *url, const char *key, const char *cert, const char *trust, const char *newline, int timeout_ms) {
+ if(!url || !*url) {
+ fprintf(stderr, "No URL is given.\n");
+ return LOG_TO_JOURNAL_REMOTE_BAD_PARAMS;
+ }
+
+ if(timeout_ms < 10)
+ timeout_ms = 10;
+
+ global_boot_id[0] = '\0';
+ char buffer[1024];
+ if(read_file(BOOT_ID_PATH, buffer, sizeof(buffer)) == 0) {
+ uuid_t uuid;
+ if(uuid_parse_flexi(buffer, uuid) == 0)
+ uuid_unparse_lower_compact(uuid, global_boot_id);
+ else
+ fprintf(stderr, "WARNING: cannot parse the UUID found in '%s'.\n", BOOT_ID_PATH);
+ }
+
+ if(global_boot_id[0] == '\0') {
+ fprintf(stderr, "WARNING: cannot read '%s'. Will generate a random _BOOT_ID.\n", BOOT_ID_PATH);
+ uuid_t uuid;
+ uuid_generate_random(uuid);
+ uuid_unparse_lower_compact(uuid, global_boot_id);
+ }
+
+ if(read_file(MACHINE_ID_PATH, buffer, sizeof(buffer)) == 0) {
+ uuid_t uuid;
+ if(uuid_parse_flexi(buffer, uuid) == 0)
+ uuid_unparse_lower_compact(uuid, global_machine_id);
+ else
+ fprintf(stderr, "WARNING: cannot parse the UUID found in '%s'.\n", MACHINE_ID_PATH);
+ }
+
+ if(global_machine_id[0] == '\0') {
+ fprintf(stderr, "WARNING: cannot read '%s'. Will generate a random _MACHINE_ID.\n", MACHINE_ID_PATH);
+ uuid_t uuid;
+ uuid_generate_random(uuid);
+ uuid_unparse_lower_compact(uuid, global_boot_id);
+ }
+
+ if(global_stream_id[0] == '\0') {
+ uuid_t uuid;
+ uuid_generate_random(uuid);
+ uuid_unparse_lower_compact(uuid, global_stream_id);
+ }
+
+ if(global_hostname[0] == '\0') {
+ if(gethostname(global_hostname, sizeof(global_hostname)) != 0) {
+ fprintf(stderr, "WARNING: cannot get system's hostname. Will use internal default.\n");
+ snprintfz(global_hostname, sizeof(global_hostname), "systemd-cat-native-unknown-hostname");
+ }
+ }
+
+ if(global_systemd_invocation_id[0] == '\0' && getenv("INVOCATION_ID"))
+ snprintfz(global_systemd_invocation_id, sizeof(global_systemd_invocation_id), "_SYSTEMD_INVOCATION_ID=%s\n", getenv("INVOCATION_ID"));
+
+ if(!key)
+ key = DEFAULT_PRIVATE_KEY;
+
+ if(!cert)
+ cert = DEFAULT_PUBLIC_KEY;
+
+ if(!trust)
+ trust = DEFAULT_CA_CERT;
+
+ char full_url[4096];
+ snprintfz(full_url, sizeof(full_url), "%s/upload", url);
+
+ CURL *curl;
+ CURLcode res = CURLE_OK;
+ struct curl_slist *headers = NULL;
+
+ curl_global_init(CURL_GLOBAL_ALL);
+ curl = initialize_connection_to_systemd_journal_remote(full_url, key, cert, trust, &headers);
+
+ if(!curl)
+ return LOG_TO_JOURNAL_REMOTE_CANNOT_INITIALIZE;
+
+ struct buffered_reader reader;
+ buffered_reader_init(&reader);
+ CLEAN_BUFFER *line = buffer_create(sizeof(reader.read_buffer), NULL);
+ CLEAN_BUFFER *msg = buffer_create(sizeof(reader.read_buffer), NULL);
+
+ size_t msg_full_events = 0;
+ size_t msg_partial_fields = 0;
+ usec_t msg_started_ut = 0;
+ size_t failures = 0;
+ size_t messages_logged = 0;
+
+ log_to_journal_remote_ret_t ret = 0;
+
+ while(true) {
+ buffered_reader_ret_t rc = get_next_line(&reader, line, timeout_ms);
+ if(rc == BUFFERED_READER_READ_POLL_TIMEOUT) {
+ if(msg_full_events && !msg_partial_fields) {
+ res = journal_remote_send_buffer(curl, msg);
+ if(res != CURLE_OK) {
+ fprintf(stderr, "journal_remote_send_buffer() failed: %s\n", curl_easy_strerror(res));
+ failures++;
+ ret = LOG_TO_JOURNAL_REMOTE_CANNOT_SEND;
+ goto cleanup;
+ }
+ else
+ messages_logged++;
+
+ msg_full_events = 0;
+ buffer_flush(msg);
+ }
+ }
+ else if(rc == BUFFERED_READER_READ_OK) {
+ if(!line->len) {
+ // an empty line - we are done for this message
+ if(msg_partial_fields) {
+ msg_partial_fields = 0;
+
+ usec_t ut;
+ journal_remote_complete_event(msg, &ut);
+ if(!msg_full_events)
+ msg_started_ut = ut;
+
+ msg_full_events++;
+
+ if(ut - msg_started_ut >= USEC_PER_SEC / 2) {
+ res = journal_remote_send_buffer(curl, msg);
+ if(res != CURLE_OK) {
+ fprintf(stderr, "journal_remote_send_buffer() failed: %s\n", curl_easy_strerror(res));
+ failures++;
+ ret = LOG_TO_JOURNAL_REMOTE_CANNOT_SEND;
+ goto cleanup;
+ }
+ else
+ messages_logged++;
+
+ msg_full_events = 0;
+ buffer_flush(msg);
+ }
+ }
+ }
+ else {
+ buffer_memcat_replacing_newlines(msg, line->buffer, line->len, newline);
+ msg_partial_fields++;
+ }
+
+ buffer_flush(line);
+ }
+ else {
+ fprintf(stderr, "cannot read input data, failed with code %d\n", rc);
+ ret = LOG_TO_JOURNAL_REMOTE_CANNOT_READ;
+ break;
+ }
+ }
+
+ if (msg_full_events || msg_partial_fields) {
+ if(msg_partial_fields) {
+ msg_partial_fields = 0;
+ msg_full_events++;
+ journal_remote_complete_event(msg, NULL);
+ }
+
+ if(msg_full_events) {
+ res = journal_remote_send_buffer(curl, msg);
+ if(res != CURLE_OK) {
+ fprintf(stderr, "journal_remote_send_buffer() failed: %s\n", curl_easy_strerror(res));
+ failures++;
+ }
+ else
+ messages_logged++;
+
+ msg_full_events = 0;
+ buffer_flush(msg);
+ }
+ }
+
+cleanup:
+ curl_easy_cleanup(curl);
+ curl_slist_free_all(headers);
+ curl_global_cleanup();
+
+ return ret;
+}
+
+#endif
+
+static int help(void) {
+ fprintf(stderr,
+ "\n"
+ "Netdata systemd-cat-native " PACKAGE_VERSION "\n"
+ "\n"
+ "This program reads from its standard input, lines in the format:\n"
+ "\n"
+ "KEY1=VALUE1\\n\n"
+ "KEY2=VALUE2\\n\n"
+ "KEYN=VALUEN\\n\n"
+ "\\n\n"
+ "\n"
+ "and sends them to systemd-journal.\n"
+ "\n"
+ " - Binary journal fields are not accepted at its input\n"
+ " - Binary journal fields can be generated after newline processing\n"
+ " - Messages have to be separated by an empty line\n"
+ " - Keys starting with underscore are not accepted (by journald)\n"
+ " - Other rules imposed by systemd-journald are imposed (by journald)\n"
+ "\n"
+ "Usage:\n"
+ "\n"
+ " %s\n"
+ " [--newline=STRING]\n"
+ " [--log-as-netdata|-N]\n"
+ " [--namespace=NAMESPACE] [--socket=PATH]\n"
+#ifdef HAVE_CURL
+ " [--url=URL [--key=FILENAME] [--cert=FILENAME] [--trust=FILENAME|all]]\n"
+#endif
+ "\n"
+ "The program has the following modes of logging:\n"
+ "\n"
+ " * Log to a local systemd-journald or stderr\n"
+ "\n"
+ " This is the default mode. If systemd-journald is available, logs will be\n"
+ " sent to systemd, otherwise logs will be printed on stderr, using logfmt\n"
+ " formatting. Options --socket and --namespace are available to configure\n"
+ " the journal destination:\n"
+ "\n"
+ " --socket=PATH\n"
+ " The path of a systemd-journald UNIX socket.\n"
+ " The program will use the default systemd-journald socket when this\n"
+ " option is not used.\n"
+ "\n"
+ " --namespace=NAMESPACE\n"
+ " The name of a configured and running systemd-journald namespace.\n"
+ " The program will produce the socket path based on its internal\n"
+ " defaults, to send the messages to the systemd journal namespace.\n"
+ "\n"
+ " * Log as Netdata, enabled with --log-as-netdata or -N\n"
+ "\n"
+ " In this mode the program uses environment variables set by Netdata for\n"
+ " the log destination. Only log fields defined by Netdata are accepted.\n"
+ " If the environment variables expected by Netdata are not found, it\n"
+ " falls back to stderr logging in logfmt format.\n"
+#ifdef HAVE_CURL
+ "\n"
+ " * Log to a systemd-journal-remote TCP socket, enabled with --url=URL\n"
+ "\n"
+ " In this mode, the program will directly sent logs to a remote systemd\n"
+ " journal (systemd-journal-remote expected at the destination)\n"
+ " This mode is available even when the local system does not support\n"
+ " systemd, or even it is not Linux, allowing a remote Linux systemd\n"
+ " journald to become the logs database of the local system.\n"
+ "\n"
+ " Unfortunately systemd-journal-remote does not accept compressed\n"
+ " data over the network, so the stream will be uncompressed.\n"
+ "\n"
+ " --url=URL\n"
+ " The destination systemd-journal-remote address and port, similarly\n"
+ " to what /etc/systemd/journal-upload.conf accepts.\n"
+ " Usually it is in the form: https://ip.address:19532\n"
+ " Both http and https URLs are accepted. When using https, the\n"
+ " following additional options are accepted:\n"
+ "\n"
+ " --key=FILENAME\n"
+ " The filename of the private key of the server.\n"
+ " The default is: " DEFAULT_PRIVATE_KEY "\n"
+ "\n"
+ " --cert=FILENAME\n"
+ " The filename of the public key of the server.\n"
+ " The default is: " DEFAULT_PUBLIC_KEY "\n"
+ "\n"
+ " --trust=FILENAME | all\n"
+ " The filename of the trusted CA public key.\n"
+ " The default is: " DEFAULT_CA_CERT "\n"
+ " The keyword 'all' can be used to trust all CAs.\n"
+ "\n"
+ " --namespace=NAMESPACE\n"
+ " Set the namespace of the messages sent.\n"
+ "\n"
+ " --keep-trying\n"
+ " Keep trying to send the message, if the remote journal is not there.\n"
+#endif
+ "\n"
+ " NEWLINES PROCESSING\n"
+ " systemd-journal logs entries may have newlines in them. However the\n"
+ " Journal Export Format uses binary formatted data to achieve this,\n"
+ " making it hard for text processing.\n"
+ "\n"
+ " To overcome this limitation, this program allows single-line text\n"
+ " formatted values at its input, to be binary formatted multi-line Journal\n"
+ " Export Format at its output.\n"
+ "\n"
+ " To achieve that it allows replacing a given string to a newline.\n"
+ " The parameter --newline=STRING allows setting the string to be replaced\n"
+ " with newlines.\n"
+ "\n"
+ " For example by setting --newline='--NEWLINE--', the program will replace\n"
+ " all occurrences of --NEWLINE-- with the newline character, within each\n"
+ " VALUE of the KEY=VALUE lines. Once this this done, the program will\n"
+ " switch the field to the binary Journal Export Format before sending the\n"
+ " log event to systemd-journal.\n"
+ "\n",
+ program_name);
+
+ return 1;
+}
+
+// ----------------------------------------------------------------------------
+// log as Netdata
+
+static void lgs_reset(struct log_stack_entry *lgs) {
+ for(size_t i = 1; i < _NDF_MAX ;i++) {
+ if(lgs[i].type == NDFT_TXT && lgs[i].set && lgs[i].txt)
+ freez((void *)lgs[i].txt);
+
+ lgs[i] = ND_LOG_FIELD_TXT(i, NULL);
+ }
+
+ lgs[0] = ND_LOG_FIELD_TXT(NDF_MESSAGE, NULL);
+ lgs[_NDF_MAX] = ND_LOG_FIELD_END();
+}
+
+static const char *strdupz_replacing_newlines(const char *src, const char *newline) {
+ if(!src) src = "";
+
+ size_t src_len = strlen(src);
+ char *buffer = mallocz(src_len + 1);
+ copy_replacing_newlines(buffer, src_len + 1, src, src_len, newline);
+ return buffer;
+}
+
+static int log_input_as_netdata(const char *newline, int timeout_ms) {
+ struct buffered_reader reader;
+ buffered_reader_init(&reader);
+ CLEAN_BUFFER *line = buffer_create(sizeof(reader.read_buffer), NULL);
+
+ ND_LOG_STACK lgs[_NDF_MAX + 1] = { 0 };
+ ND_LOG_STACK_PUSH(lgs);
+ lgs_reset(lgs);
+
+ size_t fields_added = 0;
+ size_t messages_logged = 0;
+ ND_LOG_FIELD_PRIORITY priority = NDLP_INFO;
+
+ while(get_next_line(&reader, line, timeout_ms) == BUFFERED_READER_READ_OK) {
+ if(!line->len) {
+ // an empty line - we are done for this message
+
+ nd_log(NDLS_HEALTH, priority,
+ "added %d fields", // if the user supplied a MESSAGE, this will be ignored
+ fields_added);
+
+ lgs_reset(lgs);
+ fields_added = 0;
+ messages_logged++;
+ }
+ else {
+ char *equal = strchr(line->buffer, '=');
+ if(equal) {
+ const char *field = line->buffer;
+ size_t field_len = equal - line->buffer;
+ ND_LOG_FIELD_ID id = nd_log_field_id_by_name(field, field_len);
+ if(id != NDF_STOP) {
+ const char *value = ++equal;
+
+ if(lgs[id].txt)
+ freez((void *) lgs[id].txt);
+
+ lgs[id].txt = strdupz_replacing_newlines(value, newline);
+ lgs[id].set = true;
+
+ fields_added++;
+
+ if(id == NDF_PRIORITY)
+ priority = nd_log_priority2id(value);
+ }
+ else {
+ struct log_stack_entry backup = lgs[NDF_MESSAGE];
+ lgs[NDF_MESSAGE] = ND_LOG_FIELD_TXT(NDF_MESSAGE, NULL);
+
+ nd_log(NDLS_COLLECTORS, NDLP_ERR,
+ "Field '%.*s' is not a Netdata field. Ignoring it.",
+ field_len, field);
+
+ lgs[NDF_MESSAGE] = backup;
+ }
+ }
+ else {
+ struct log_stack_entry backup = lgs[NDF_MESSAGE];
+ lgs[NDF_MESSAGE] = ND_LOG_FIELD_TXT(NDF_MESSAGE, NULL);
+
+ nd_log(NDLS_COLLECTORS, NDLP_ERR,
+ "Line does not contain an = sign; ignoring it: %s",
+ line->buffer);
+
+ lgs[NDF_MESSAGE] = backup;
+ }
+ }
+
+ buffer_flush(line);
+ }
+
+ if(fields_added) {
+ nd_log(NDLS_HEALTH, priority, "added %d fields", fields_added);
+ messages_logged++;
+ }
+
+ return messages_logged ? 0 : 1;
+}
+
+// ----------------------------------------------------------------------------
+// log to a local systemd-journald
+
+static bool journal_local_send_buffer(int fd, BUFFER *msg) {
+ // log_message_to_stderr(msg);
+
+ bool ret = journal_direct_send(fd, msg->buffer, msg->len);
+ if (!ret)
+ fprintf(stderr, "Cannot send message to systemd journal.\n");
+
+ return ret;
+}
+
+static int log_input_to_journal(const char *socket, const char *namespace, const char *newline, int timeout_ms) {
+ char path[FILENAME_MAX + 1];
+ int fd = -1;
+
+ if(socket)
+ snprintfz(path, sizeof(path), "%s", socket);
+ else
+ journal_construct_path(path, sizeof(path), NULL, namespace);
+
+ fd = journal_direct_fd(path);
+ if (fd == -1) {
+ fprintf(stderr, "Cannot open '%s' as a UNIX socket (errno = %d)\n",
+ path, errno);
+ return 1;
+ }
+
+ struct buffered_reader reader;
+ buffered_reader_init(&reader);
+ CLEAN_BUFFER *line = buffer_create(sizeof(reader.read_buffer), NULL);
+ CLEAN_BUFFER *msg = buffer_create(sizeof(reader.read_buffer), NULL);
+
+ size_t messages_logged = 0;
+ size_t failed_messages = 0;
+
+ while(get_next_line(&reader, line, timeout_ms) == BUFFERED_READER_READ_OK) {
+ if (!line->len) {
+ // an empty line - we are done for this message
+ if (msg->len) {
+ if(journal_local_send_buffer(fd, msg))
+ messages_logged++;
+ else {
+ failed_messages++;
+ goto cleanup;
+ }
+ }
+
+ buffer_flush(msg);
+ }
+ else
+ buffer_memcat_replacing_newlines(msg, line->buffer, line->len, newline);
+
+ buffer_flush(line);
+ }
+
+ if (msg && msg->len) {
+ if(journal_local_send_buffer(fd, msg))
+ messages_logged++;
+ else
+ failed_messages++;
+ }
+
+cleanup:
+ return !failed_messages && messages_logged ? 0 : 1;
+}
+
+int main(int argc, char *argv[]) {
+ clocks_init();
+ nd_log_initialize_for_external_plugins(argv[0]);
+
+ int timeout_ms = -1; // wait forever
+ bool log_as_netdata = false;
+ const char *newline = NULL;
+ const char *namespace = NULL;
+ const char *socket = getenv("NETDATA_SYSTEMD_JOURNAL_PATH");
+#ifdef HAVE_CURL
+ const char *url = NULL;
+ const char *key = NULL;
+ const char *cert = NULL;
+ const char *trust = NULL;
+ bool keep_trying = false;
+#endif
+
+ for(int i = 1; i < argc ;i++) {
+ const char *k = argv[i];
+
+ if(strcmp(k, "--help") == 0 || strcmp(k, "-h") == 0)
+ return help();
+
+ else if(strcmp(k, "--log-as-netdata") == 0 || strcmp(k, "-N") == 0)
+ log_as_netdata = true;
+
+ else if(strncmp(k, "--namespace=", 12) == 0)
+ namespace = &k[12];
+
+ else if(strncmp(k, "--socket=", 9) == 0)
+ socket = &k[9];
+
+ else if(strncmp(k, "--newline=", 10) == 0)
+ newline = &k[10];
+
+#ifdef HAVE_CURL
+ else if (strncmp(k, "--url=", 6) == 0)
+ url = &k[6];
+
+ else if (strncmp(k, "--key=", 6) == 0)
+ key = &k[6];
+
+ else if (strncmp(k, "--cert=", 7) == 0)
+ cert = &k[7];
+
+ else if (strncmp(k, "--trust=", 8) == 0)
+ trust = &k[8];
+
+ else if (strcmp(k, "--keep-trying") == 0)
+ keep_trying = true;
+#endif
+ else {
+ fprintf(stderr, "Unknown parameter '%s'\n", k);
+ return 1;
+ }
+ }
+
+#ifdef HAVE_CURL
+ if(log_as_netdata && url) {
+ fprintf(stderr, "Cannot log to a systemd-journal-remote URL as Netdata. "
+ "Please either give --url or --log-as-netdata, not both.\n");
+ return 1;
+ }
+
+ if(socket && url) {
+ fprintf(stderr, "Cannot log to a systemd-journal-remote URL using a UNIX socket. "
+ "Please either give --url or --socket, not both.\n");
+ return 1;
+ }
+
+#endif
+
+ if(log_as_netdata && namespace) {
+ fprintf(stderr, "Cannot log as netdata using a namespace. "
+ "Please either give --log-as-netdata or --namespace, not both.\n");
+ return 1;
+ }
+
+ if(log_as_netdata)
+ return log_input_as_netdata(newline, timeout_ms);
+
+#ifdef HAVE_CURL
+ if(url) {
+ if(url && namespace && *namespace)
+ snprintfz(global_namespace, sizeof(global_namespace), "_NAMESPACE=%s\n", namespace);
+
+ log_to_journal_remote_ret_t rc;
+ do {
+ rc = log_input_to_journal_remote(url, key, cert, trust, newline, timeout_ms);
+ } while(keep_trying && rc == LOG_TO_JOURNAL_REMOTE_CANNOT_SEND);
+ }
+#endif
+
+ return log_input_to_journal(socket, namespace, newline, timeout_ms);
+}
diff --git a/libnetdata/log/systemd-cat-native.h b/libnetdata/log/systemd-cat-native.h
new file mode 100644
index 00000000..34e7a361
--- /dev/null
+++ b/libnetdata/log/systemd-cat-native.h
@@ -0,0 +1,8 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+#ifndef NETDATA_SYSTEMD_CAT_NATIVE_H
+#define NETDATA_SYSTEMD_CAT_NATIVE_H
+
+#endif //NETDATA_SYSTEMD_CAT_NATIVE_H
diff --git a/libnetdata/log/systemd-cat-native.md b/libnetdata/log/systemd-cat-native.md
new file mode 100644
index 00000000..b0b15f40
--- /dev/null
+++ b/libnetdata/log/systemd-cat-native.md
@@ -0,0 +1,209 @@
+# systemd-cat-native
+
+`systemd` includes a utility called `systemd-cat`. This utility reads log lines from its standard input and sends them
+to the local systemd journal. Its key limitation is that despite the fact that systemd journals support structured logs,
+this command does not support sending structured logs to it.
+
+`systemd-cat-native` is a Netdata supplied utility to push structured logs to systemd journals. Key features:
+
+- reads [Journal Export Format](https://systemd.io/JOURNAL_EXPORT_FORMATS/) formatted log entries
+- converts text fields into binary journal multiline log fields
+- sends logs to any of these:
+ - local default `systemd-journald`,
+ - local namespace `systemd-journald`,
+ - remote `systemd-journal-remote` using HTTP or HTTPS, the same way `systemd-journal-upload` does.
+- is the standard external logger of Netdata shell scripts
+
+## Simple use:
+
+```bash
+printf "MESSAGE=hello world\nPRIORITY=6\n\n" | systemd-cat-native
+```
+
+The result:
+
+![image](https://github.com/netdata/netdata/assets/2662304/689d5e03-97ee-40a8-a690-82b7710cef7c)
+
+
+Sending `PRIORITY=3` (error):
+
+```bash
+printf "MESSAGE=hey, this is error\nPRIORITY=3\n\n" | systemd-cat-native
+```
+
+The result:
+![image](https://github.com/netdata/netdata/assets/2662304/faf3eaa5-ac56-415b-9de8-16e6ceed9280)
+
+Sending multi-line log entries (in this example we replace the text `--NEWLINE--` with a newline in the log entry):
+
+```bash
+printf "MESSAGE=hello--NEWLINE--world\nPRIORITY=6\n\n" | systemd-cat-native --newline='--NEWLINE--'
+```
+
+The result:
+
+![image](https://github.com/netdata/netdata/assets/2662304/d6037b4a-87da-4693-ae67-e07df0decdd9)
+
+
+Processing the standard `\n` string can be tricky due to shell escaping. This works, but note that
+we have to add a lot of backslashes to printf.
+
+```bash
+printf "MESSAGE=hello\\\\nworld\nPRIORITY=6\n\n" | systemd-cat-native --newline='\n'
+```
+
+`systemd-cat-native` needs to receive it like this for newline processing to work:
+
+```bash
+# printf "MESSAGE=hello\\\\nworld\nPRIORITY=6\n\n"
+MESSAGE=hello\nworld
+PRIORITY=6
+
+```
+
+## Best practices
+
+These are the rules about fields, enforced by `systemd-journald`:
+
+- field names can be up to **64 characters**,
+- field values can be up to **48k characters**,
+- the only allowed field characters are **A-Z**, **0-9** and **underscore**,
+- the **first** character of fields cannot be a **digit**
+- **protected** journal fields start with underscore:
+ * they are accepted by `systemd-journal-remote`,
+ * they are **NOT** accepted by a local `systemd-journald`.
+
+For best results, always include these fields:
+
+- `MESSAGE=TEXT`<br/>
+ The `MESSAGE` is the body of the log entry.
+ This field is what we usually see in our logs.
+
+- `PRIORITY=NUMBER`<br/>
+ `PRIORITY` sets the severity of the log entry.<br/>
+ `0=emerg, 1=alert, 2=crit, 3=err, 4=warn, 5=notice, 6=info, 7=debug`
+ - Emergency events (0) are usually broadcast to all terminals.
+ - Emergency, alert, critical, and error (0-3) are usually colored red.
+ - Warning (4) entries are usually colored yellow.
+ - Notice (5) entries are usually bold or have a brighter white color.
+ - Info (6) entries are the default.
+ - Debug (7) entries are usually grayed or dimmed.
+
+- `SYSLOG_IDENTIFIER=NAME`<br/>
+ `SYSLOG_IDENTIFIER` sets the name of application.
+ Use something descriptive, like: `SYSLOG_IDENTIFIER=myapp`
+
+You can find the most common fields at `man systemd.journal-fields`.
+
+
+## Usage
+
+```
+Netdata systemd-cat-native v1.43.0-333-g5af71b875
+
+This program reads from its standard input, lines in the format:
+
+KEY1=VALUE1\n
+KEY2=VALUE2\n
+KEYN=VALUEN\n
+\n
+
+and sends them to systemd-journal.
+
+ - Binary journal fields are not accepted at its input
+ - Binary journal fields can be generated after newline processing
+ - Messages have to be separated by an empty line
+ - Keys starting with underscore are not accepted (by journald)
+ - Other rules imposed by systemd-journald are imposed (by journald)
+
+Usage:
+
+ systemd-cat-native
+ [--newline=STRING]
+ [--log-as-netdata|-N]
+ [--namespace=NAMESPACE] [--socket=PATH]
+ [--url=URL [--key=FILENAME] [--cert=FILENAME] [--trust=FILENAME|all]]
+
+The program has the following modes of logging:
+
+ * Log to a local systemd-journald or stderr
+
+ This is the default mode. If systemd-journald is available, logs will be
+ sent to systemd, otherwise logs will be printed on stderr, using logfmt
+ formatting. Options --socket and --namespace are available to configure
+ the journal destination:
+
+ --socket=PATH
+ The path of a systemd-journald UNIX socket.
+ The program will use the default systemd-journald socket when this
+ option is not used.
+
+ --namespace=NAMESPACE
+ The name of a configured and running systemd-journald namespace.
+ The program will produce the socket path based on its internal
+ defaults, to send the messages to the systemd journal namespace.
+
+ * Log as Netdata, enabled with --log-as-netdata or -N
+
+ In this mode the program uses environment variables set by Netdata for
+ the log destination. Only log fields defined by Netdata are accepted.
+ If the environment variables expected by Netdata are not found, it
+ falls back to stderr logging in logfmt format.
+
+ * Log to a systemd-journal-remote TCP socket, enabled with --url=URL
+
+ In this mode, the program will directly sent logs to a remote systemd
+ journal (systemd-journal-remote expected at the destination)
+ This mode is available even when the local system does not support
+ systemd, or even it is not Linux, allowing a remote Linux systemd
+ journald to become the logs database of the local system.
+
+ Unfortunately systemd-journal-remote does not accept compressed
+ data over the network, so the stream will be uncompressed.
+
+ --url=URL
+ The destination systemd-journal-remote address and port, similarly
+ to what /etc/systemd/journal-upload.conf accepts.
+ Usually it is in the form: https://ip.address:19532
+ Both http and https URLs are accepted. When using https, the
+ following additional options are accepted:
+
+ --key=FILENAME
+ The filename of the private key of the server.
+ The default is: /etc/ssl/private/journal-upload.pem
+
+ --cert=FILENAME
+ The filename of the public key of the server.
+ The default is: /etc/ssl/certs/journal-upload.pem
+
+ --trust=FILENAME | all
+ The filename of the trusted CA public key.
+ The default is: /etc/ssl/ca/trusted.pem
+ The keyword 'all' can be used to trust all CAs.
+
+ --namespace=NAMESPACE
+ Set the namespace of the messages sent.
+
+ --keep-trying
+ Keep trying to send the message, if the remote journal is not there.
+
+ NEWLINES PROCESSING
+ systemd-journal logs entries may have newlines in them. However the
+ Journal Export Format uses binary formatted data to achieve this,
+ making it hard for text processing.
+
+ To overcome this limitation, this program allows single-line text
+ formatted values at its input, to be binary formatted multi-line Journal
+ Export Format at its output.
+
+ To achieve that it allows replacing a given string to a newline.
+ The parameter --newline=STRING allows setting the string to be replaced
+ with newlines.
+
+ For example by setting --newline='--NEWLINE--', the program will replace
+ all occurrences of --NEWLINE-- with the newline character, within each
+ VALUE of the KEY=VALUE lines. Once this this done, the program will
+ switch the field to the binary Journal Export Format before sending the
+ log event to systemd-journal.
+
+``` \ No newline at end of file
diff --git a/libnetdata/os.c b/libnetdata/os.c
index e6475a45..e4624be6 100644
--- a/libnetdata/os.c
+++ b/libnetdata/os.c
@@ -162,7 +162,11 @@ unsigned long read_cpuset_cpus(const char *filename, long system_cpus) {
unsigned long ncpus = 0;
// parse the cpuset string and calculate the number of cpus the cgroup is allowed to use
- while(*s) {
+ while (*s) {
+ if (isspace(*s)) {
+ s++;
+ continue;
+ }
unsigned long n = cpuset_str2ul(&s);
ncpus++;
if(*s == ',') {
diff --git a/libnetdata/os.h b/libnetdata/os.h
index 3cda79ed..197548b0 100644
--- a/libnetdata/os.h
+++ b/libnetdata/os.h
@@ -37,6 +37,7 @@ int getsysctl(const char *name, int *mib, size_t miblen, void *ptr, size_t *len)
#if __APPLE__
#include <sys/sysctl.h>
+#include "endian.h"
#define GETSYSCTL_BY_NAME(name, var) getsysctl_by_name(name, &(var), sizeof(var))
int getsysctl_by_name(const char *name, void *ptr, size_t len);
diff --git a/libnetdata/procfile/procfile.c b/libnetdata/procfile/procfile.c
index 1a7e47a5..0bc731d6 100644
--- a/libnetdata/procfile/procfile.c
+++ b/libnetdata/procfile/procfile.c
@@ -407,9 +407,14 @@ procfile *procfile_open(const char *filename, const char *separators, uint32_t f
int fd = open(filename, procfile_open_flags, 0666);
if(unlikely(fd == -1)) {
- if(unlikely(!(flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) collector_error(PF_PREFIX ": Cannot open file '%s'", filename);
- else if(unlikely(flags & PROCFILE_FLAG_ERROR_ON_ERROR_LOG))
+ if (unlikely(flags & PROCFILE_FLAG_ERROR_ON_ERROR_LOG))
netdata_log_error(PF_PREFIX ": Cannot open file '%s'", filename);
+ else if (unlikely(!(flags & PROCFILE_FLAG_NO_ERROR_ON_FILE_IO))) {
+ if (errno == ENOENT)
+ collector_info(PF_PREFIX ": Cannot open file '%s'", filename);
+ else
+ collector_error(PF_PREFIX ": Cannot open file '%s'", filename);
+ }
return NULL;
}
diff --git a/libnetdata/simple_hashtable.h b/libnetdata/simple_hashtable.h
new file mode 100644
index 00000000..f6b6db90
--- /dev/null
+++ b/libnetdata/simple_hashtable.h
@@ -0,0 +1,396 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_SIMPLE_HASHTABLE_H
+#define NETDATA_SIMPLE_HASHTABLE_H
+
+#ifndef XXH_INLINE_ALL
+#define XXH_INLINE_ALL
+#endif
+#include "xxhash.h"
+
+typedef uint64_t SIMPLE_HASHTABLE_HASH;
+#define SIMPLE_HASHTABLE_HASH_SECOND_HASH_SHIFTS 32
+
+#ifndef SIMPLE_HASHTABLE_NAME
+#define SIMPLE_HASHTABLE_NAME
+#endif
+
+#ifndef SIMPLE_HASHTABLE_VALUE_TYPE
+#define SIMPLE_HASHTABLE_VALUE_TYPE void
+#endif
+
+// First layer of macro for token concatenation
+#define CONCAT_INTERNAL(a, b) a ## b
+// Second layer of macro, which ensures proper expansion
+#define CONCAT(a, b) CONCAT_INTERNAL(a, b)
+
+// define names for all structures and structures
+#define simple_hashtable_init_named CONCAT(simple_hashtable_init, SIMPLE_HASHTABLE_NAME)
+#define simple_hashtable_destroy_named CONCAT(simple_hashtable_destroy, SIMPLE_HASHTABLE_NAME)
+
+#define simple_hashtable_slot_named CONCAT(simple_hashtable_slot, SIMPLE_HASHTABLE_NAME)
+#define SIMPLE_HASHTABLE_SLOT_NAMED CONCAT(SIMPLE_HASHTABLE_SLOT, SIMPLE_HASHTABLE_NAME)
+#define simple_hashtable_named CONCAT(simple_hashtable, SIMPLE_HASHTABLE_NAME)
+#define SIMPLE_HASHTABLE_NAMED CONCAT(SIMPLE_HASHTABLE, SIMPLE_HASHTABLE_NAME)
+#define simple_hashtable_resize_named CONCAT(simple_hashtable_resize, SIMPLE_HASHTABLE_NAME)
+#define simple_hashtable_get_slot_named CONCAT(simple_hashtable_get_slot, SIMPLE_HASHTABLE_NAME)
+#define simple_hashtable_del_slot_named CONCAT(simple_hashtable_del_slot, SIMPLE_HASHTABLE_NAME)
+#define simple_hashtable_set_slot_named CONCAT(simple_hashtable_set_slot, SIMPLE_HASHTABLE_NAME)
+#define simple_hashtable_first_read_only_named CONCAT(simple_hashtable_first_read_only, SIMPLE_HASHTABLE_NAME)
+#define simple_hashtable_next_read_only_named CONCAT(simple_hashtable_next_read_only, SIMPLE_HASHTABLE_NAME)
+
+#define simple_hashtable_sorted_binary_search_named CONCAT(simple_hashtable_sorted_binary_search, SIMPLE_HASHTABLE_NAME)
+#define simple_hashtable_add_value_sorted_named CONCAT(simple_hashtable_add_value_sorted, SIMPLE_HASHTABLE_NAME)
+#define simple_hashtable_del_value_sorted_named CONCAT(simple_hashtable_del_value_sorted, SIMPLE_HASHTABLE_NAME)
+#define simple_hashtable_replace_value_sorted_named CONCAT(simple_hashtable_replace_value_sorted, SIMPLE_HASHTABLE_NAME)
+#define simple_hashtable_sorted_array_first_read_only_named CONCAT(simple_hashtable_sorted_array_first_read_only, SIMPLE_HASHTABLE_NAME)
+#define simple_hashtable_sorted_array_next_read_only_named CONCAT(simple_hashtable_sorted_array_next_read_only, SIMPLE_HASHTABLE_NAME)
+
+typedef struct simple_hashtable_slot_named {
+ SIMPLE_HASHTABLE_HASH hash;
+ SIMPLE_HASHTABLE_VALUE_TYPE *data;
+} SIMPLE_HASHTABLE_SLOT_NAMED;
+
+typedef struct simple_hashtable_named {
+ size_t resizes;
+ size_t searches;
+ size_t collisions;
+ size_t deletions;
+ size_t deleted;
+ size_t used;
+ size_t size;
+ SIMPLE_HASHTABLE_SLOT_NAMED *hashtable;
+
+#ifdef SIMPLE_HASHTABLE_SORT_FUNCTION
+ struct {
+ size_t used;
+ size_t size;
+ SIMPLE_HASHTABLE_VALUE_TYPE **array;
+ } sorted;
+#endif
+} SIMPLE_HASHTABLE_NAMED;
+
+#ifdef SIMPLE_HASHTABLE_SORT_FUNCTION
+static inline size_t simple_hashtable_sorted_binary_search_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_VALUE_TYPE *value) {
+ size_t left = 0, right = ht->sorted.used;
+
+ while (left < right) {
+ size_t mid = left + (right - left) / 2;
+ if (SIMPLE_HASHTABLE_SORT_FUNCTION(ht->sorted.array[mid], value) < 0)
+ left = mid + 1;
+ else
+ right = mid;
+ }
+
+ return left;
+}
+
+static inline void simple_hashtable_add_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_VALUE_TYPE *value) {
+ size_t index = simple_hashtable_sorted_binary_search_named(ht, value);
+
+ // Ensure there's enough space in the sorted array
+ if (ht->sorted.used >= ht->sorted.size) {
+ size_t size = ht->sorted.size ? ht->sorted.size * 2 : 64;
+ SIMPLE_HASHTABLE_VALUE_TYPE **array = mallocz(size * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *));
+ if(ht->sorted.array) {
+ memcpy(array, ht->sorted.array, ht->sorted.size * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *));
+ freez(ht->sorted.array);
+ }
+ ht->sorted.array = array;
+ ht->sorted.size = size;
+ }
+
+ // Use memmove to shift elements and create space for the new element
+ memmove(&ht->sorted.array[index + 1], &ht->sorted.array[index], (ht->sorted.used - index) * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *));
+
+ ht->sorted.array[index] = value;
+ ht->sorted.used++;
+}
+
+static inline void simple_hashtable_del_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_VALUE_TYPE *value) {
+ size_t index = simple_hashtable_sorted_binary_search_named(ht, value);
+
+ // Check if the value exists at the found index
+ assert(index < ht->sorted.used && ht->sorted.array[index] == value);
+
+ // Use memmove to shift elements and close the gap
+ memmove(&ht->sorted.array[index], &ht->sorted.array[index + 1], (ht->sorted.used - index - 1) * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *));
+ ht->sorted.used--;
+}
+
+static inline void simple_hashtable_replace_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_VALUE_TYPE *old_value, SIMPLE_HASHTABLE_VALUE_TYPE *new_value) {
+ if(new_value == old_value)
+ return;
+
+ size_t old_value_index = simple_hashtable_sorted_binary_search_named(ht, old_value);
+ assert(old_value_index < ht->sorted.used && ht->sorted.array[old_value_index] == old_value);
+
+ int r = SIMPLE_HASHTABLE_SORT_FUNCTION(old_value, new_value);
+ if(r == 0) {
+ // Same value, so use the same index
+ ht->sorted.array[old_value_index] = new_value;
+ return;
+ }
+
+ size_t new_value_index = simple_hashtable_sorted_binary_search_named(ht, new_value);
+ if(old_value_index == new_value_index) {
+ // Not the same value, but still at the same index
+ ht->sorted.array[old_value_index] = new_value;
+ return;
+ }
+ else if (old_value_index < new_value_index) {
+ // The old value is before the new value
+ size_t shift_start = old_value_index + 1;
+ size_t shift_end = new_value_index - 1;
+ size_t shift_size = shift_end - old_value_index;
+
+ memmove(&ht->sorted.array[old_value_index], &ht->sorted.array[shift_start], shift_size * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *));
+ ht->sorted.array[shift_end] = new_value;
+ }
+ else {
+ // The old value is after the new value
+ size_t shift_start = new_value_index;
+ size_t shift_end = old_value_index;
+ size_t shift_size = shift_end - new_value_index;
+
+ memmove(&ht->sorted.array[new_value_index + 1], &ht->sorted.array[shift_start], shift_size * sizeof(SIMPLE_HASHTABLE_VALUE_TYPE *));
+ ht->sorted.array[new_value_index] = new_value;
+ }
+}
+
+static inline SIMPLE_HASHTABLE_VALUE_TYPE **simple_hashtable_sorted_array_first_read_only_named(SIMPLE_HASHTABLE_NAMED *ht) {
+ if (ht->sorted.used > 0) {
+ return &ht->sorted.array[0];
+ }
+ return NULL;
+}
+
+static inline SIMPLE_HASHTABLE_VALUE_TYPE **simple_hashtable_sorted_array_next_read_only_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_VALUE_TYPE **last) {
+ if (!last) return NULL;
+
+ // Calculate the current position in the sorted array
+ size_t currentIndex = last - ht->sorted.array;
+
+ // Proceed to the next element if it exists
+ if (currentIndex + 1 < ht->sorted.used) {
+ return &ht->sorted.array[currentIndex + 1];
+ }
+
+ // If no more elements, return NULL
+ return NULL;
+}
+
+#define SIMPLE_HASHTABLE_SORTED_FOREACH_READ_ONLY(ht, var, type, name) \
+ for (type **(var) = simple_hashtable_sorted_array_first_read_only ## name(ht); \
+ var; \
+ (var) = simple_hashtable_sorted_array_next_read_only ## name(ht, var))
+
+#define SIMPLE_HASHTABLE_SORTED_FOREACH_READ_ONLY_VALUE(var) (*(var))
+
+#else
+static inline void simple_hashtable_add_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht __maybe_unused, SIMPLE_HASHTABLE_VALUE_TYPE *value __maybe_unused) { ; }
+static inline void simple_hashtable_del_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht __maybe_unused, SIMPLE_HASHTABLE_VALUE_TYPE *value __maybe_unused) { ; }
+static inline void simple_hashtable_replace_value_sorted_named(SIMPLE_HASHTABLE_NAMED *ht __maybe_unused, SIMPLE_HASHTABLE_VALUE_TYPE *old_value __maybe_unused, SIMPLE_HASHTABLE_VALUE_TYPE *new_value __maybe_unused) { ; }
+#endif
+
+static void simple_hashtable_init_named(SIMPLE_HASHTABLE_NAMED *ht, size_t size) {
+ memset(ht, 0, sizeof(*ht));
+ ht->size = size;
+ ht->hashtable = callocz(ht->size, sizeof(*ht->hashtable));
+}
+
+static void simple_hashtable_destroy_named(SIMPLE_HASHTABLE_NAMED *ht) {
+#ifdef SIMPLE_HASHTABLE_SORT_FUNCTION
+ freez(ht->sorted.array);
+#endif
+
+ freez(ht->hashtable);
+ memset(ht, 0, sizeof(*ht));
+}
+
+static inline void simple_hashtable_resize_named(SIMPLE_HASHTABLE_NAMED *ht);
+
+#define SHTS_DATA_UNSET ((void *)NULL)
+#define SHTS_DATA_DELETED ((void *)0x01)
+#define SHTS_DATA_USERNULL ((void *)0x02)
+#define SHTS_IS_UNSET(sl) ((sl)->data == SHTS_DATA_UNSET)
+#define SHTS_IS_DELETED(sl) ((sl)->data == SHTS_DATA_DELETED)
+#define SHTS_IS_USERNULL(sl) ((sl)->data == SHTS_DATA_USERNULL)
+#define SIMPLE_HASHTABLE_SLOT_DATA(sl) ((SHTS_IS_UNSET(sl) || SHTS_IS_DELETED(sl) || SHTS_IS_USERNULL(sl)) ? NULL : (sl)->data)
+#define SIMPLE_HASHTABLE_SLOT_UNSET_OR_DELETED(sl) ((SHTS_IS_UNSET(sl) || SHTS_IS_DELETED(sl)) ? NULL : (sl)->data)
+
+// IMPORTANT
+// The pointer returned by this call is valid up to the next call of this function (or the resize one)
+// If you need to cache something, cache the hash, not the slot pointer.
+static inline SIMPLE_HASHTABLE_SLOT_NAMED *simple_hashtable_get_slot_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_HASH hash, bool resize) {
+ ht->searches++;
+
+ size_t slot;
+ SIMPLE_HASHTABLE_SLOT_NAMED *sl;
+ SIMPLE_HASHTABLE_SLOT_NAMED *deleted;
+
+ slot = hash % ht->size;
+ sl = &ht->hashtable[slot];
+ deleted = SHTS_IS_DELETED(sl) ? sl : NULL;
+ if(likely(!SIMPLE_HASHTABLE_SLOT_UNSET_OR_DELETED(sl) || sl->hash == hash))
+ return (SHTS_IS_UNSET(sl) && deleted) ? deleted : sl;
+
+ ht->collisions++;
+
+ if(unlikely(resize && (ht->size <= (ht->used << 1) || ht->used >= ht->size))) {
+ simple_hashtable_resize_named(ht);
+
+ slot = hash % ht->size;
+ sl = &ht->hashtable[slot];
+ deleted = (!deleted && SHTS_IS_DELETED(sl)) ? sl : deleted;
+ if(likely(!SIMPLE_HASHTABLE_SLOT_UNSET_OR_DELETED(sl) || sl->hash == hash))
+ return (SHTS_IS_UNSET(sl) && deleted) ? deleted : sl;
+
+ ht->collisions++;
+ }
+
+ slot = ((hash >> SIMPLE_HASHTABLE_HASH_SECOND_HASH_SHIFTS) + 1) % ht->size;
+ sl = &ht->hashtable[slot];
+ deleted = (!deleted && SHTS_IS_DELETED(sl)) ? sl : deleted;
+
+ // Linear probing until we find it
+ while (SIMPLE_HASHTABLE_SLOT_UNSET_OR_DELETED(sl) && sl->hash != hash) {
+ slot = (slot + 1) % ht->size; // Wrap around if necessary
+ sl = &ht->hashtable[slot];
+ deleted = (!deleted && SHTS_IS_DELETED(sl)) ? sl : deleted;
+ ht->collisions++;
+ }
+
+ return (SHTS_IS_UNSET(sl) && deleted) ? deleted : sl;
+}
+
+static inline bool simple_hashtable_del_slot_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_SLOT_NAMED *sl) {
+ if(SHTS_IS_UNSET(sl) || SHTS_IS_DELETED(sl))
+ return false;
+
+ ht->deletions++;
+ ht->deleted++;
+
+ simple_hashtable_del_value_sorted_named(ht, SIMPLE_HASHTABLE_SLOT_DATA(sl));
+
+ sl->data = SHTS_DATA_DELETED;
+ return true;
+}
+
+static inline void simple_hashtable_set_slot_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_SLOT_NAMED *sl, SIMPLE_HASHTABLE_HASH hash, SIMPLE_HASHTABLE_VALUE_TYPE *data) {
+ if(data == NULL)
+ data = SHTS_DATA_USERNULL;
+
+ if(unlikely(data == SHTS_DATA_UNSET || data == SHTS_DATA_DELETED)) {
+ simple_hashtable_del_slot_named(ht, sl);
+ return;
+ }
+
+ if(likely(SHTS_IS_UNSET(sl))) {
+ simple_hashtable_add_value_sorted_named(ht, data);
+ ht->used++;
+ }
+
+ else if(unlikely(SHTS_IS_DELETED(sl))) {
+ ht->deleted--;
+ }
+
+ else
+ simple_hashtable_replace_value_sorted_named(ht, SIMPLE_HASHTABLE_SLOT_DATA(sl), data);
+
+ sl->hash = hash;
+ sl->data = data;
+}
+
+// IMPORTANT
+// this call invalidates all SIMPLE_HASHTABLE_SLOT_NAMED pointers
+static inline void simple_hashtable_resize_named(SIMPLE_HASHTABLE_NAMED *ht) {
+ SIMPLE_HASHTABLE_SLOT_NAMED *old = ht->hashtable;
+ size_t old_size = ht->size;
+
+ ht->resizes++;
+ ht->size = (ht->size << 1) - ((ht->size > 16) ? 1 : 0);
+ ht->hashtable = callocz(ht->size, sizeof(*ht->hashtable));
+ ht->used = ht->deleted = 0;
+ for(size_t i = 0 ; i < old_size ; i++) {
+ if(!SIMPLE_HASHTABLE_SLOT_UNSET_OR_DELETED(&old[i]))
+ continue;
+
+ SIMPLE_HASHTABLE_SLOT_NAMED *slot = simple_hashtable_get_slot_named(ht, old[i].hash, false);
+ *slot = old[i];
+ ht->used++;
+ }
+
+ freez(old);
+}
+
+// ----------------------------------------------------------------------------
+// hashtable traversal, in read-only mode
+// the hashtable should not be modified while the traversal is taking place
+
+static inline SIMPLE_HASHTABLE_SLOT_NAMED *simple_hashtable_first_read_only_named(SIMPLE_HASHTABLE_NAMED *ht) {
+ for(size_t i = 0; i < ht->used ;i++) {
+ SIMPLE_HASHTABLE_SLOT_NAMED *sl = &ht->hashtable[i];
+ if(!SIMPLE_HASHTABLE_SLOT_UNSET_OR_DELETED(sl))
+ return sl;
+ }
+
+ return NULL;
+}
+
+static inline SIMPLE_HASHTABLE_SLOT_NAMED *simple_hashtable_next_read_only_named(SIMPLE_HASHTABLE_NAMED *ht, SIMPLE_HASHTABLE_SLOT_NAMED *last) {
+ if (!last) return NULL;
+
+ // Calculate the current position in the array
+ size_t currentIndex = last - ht->hashtable;
+
+ // Iterate over the hashtable starting from the next element
+ for (size_t i = currentIndex + 1; i < ht->size; i++) {
+ SIMPLE_HASHTABLE_SLOT_NAMED *sl = &ht->hashtable[i];
+ if (!SIMPLE_HASHTABLE_SLOT_UNSET_OR_DELETED(sl)) {
+ return sl;
+ }
+ }
+
+ // If no more data slots are found, return NULL
+ return NULL;
+}
+
+#define SIMPLE_HASHTABLE_FOREACH_READ_ONLY(ht, var, name) \
+ for(struct simple_hashtable_slot ## name *(var) = simple_hashtable_first_read_only ## name(ht); \
+ var; \
+ (var) = simple_hashtable_next_read_only ## name(ht, var))
+
+#define SIMPLE_HASHTABLE_FOREACH_READ_ONLY_VALUE(var) SIMPLE_HASHTABLE_SLOT_DATA(var)
+
+// ----------------------------------------------------------------------------
+// high level implementation
+
+#ifdef SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION
+
+#define simple_hashtable_set_named CONCAT(simple_hashtable_set, SIMPLE_HASHTABLE_NAME)
+#define simple_hashtable_get_named CONCAT(simple_hashtable_get, SIMPLE_HASHTABLE_NAME)
+#define simple_hashtable_del_named CONCAT(simple_hashtable_del, SIMPLE_HASHTABLE_NAME)
+
+static inline SIMPLE_HASHTABLE_VALUE_TYPE *simple_hashtable_set_named(SIMPLE_HASHTABLE_NAMED *ht, void *key, size_t key_len, SIMPLE_HASHTABLE_VALUE_TYPE *data) {
+ XXH64_hash_t hash = XXH3_64bits(key, key_len);
+ SIMPLE_HASHTABLE_SLOT_NAMED *sl = simple_hashtable_get_slot_named(ht, hash, true);
+ simple_hashtable_set_slot_named(ht, sl, hash, data);
+ return SIMPLE_HASHTABLE_SLOT_DATA(sl);
+}
+
+static inline SIMPLE_HASHTABLE_VALUE_TYPE *simple_hashtable_get_named(SIMPLE_HASHTABLE_NAMED *ht, void *key, size_t key_len, SIMPLE_HASHTABLE_VALUE_TYPE *data) {
+ XXH64_hash_t hash = XXH3_64bits(key, key_len);
+ SIMPLE_HASHTABLE_SLOT_NAMED *sl = simple_hashtable_get_slot_named(ht, hash, true);
+ return SIMPLE_HASHTABLE_SLOT_DATA(sl);
+}
+
+static inline bool simple_hashtable_del_named(SIMPLE_HASHTABLE_NAMED *ht, void *key, size_t key_len, SIMPLE_HASHTABLE_VALUE_TYPE *data) {
+ XXH64_hash_t hash = XXH3_64bits(key, key_len);
+ SIMPLE_HASHTABLE_SLOT_NAMED *sl = simple_hashtable_get_slot_named(ht, hash, true);
+ return simple_hashtable_del_slot_named(ht, sl);
+}
+
+#endif // SIMPLE_HASHTABLE_SAMPLE_IMPLEMENTATION
+
+#endif //NETDATA_SIMPLE_HASHTABLE_H
diff --git a/libnetdata/socket/security.c b/libnetdata/socket/security.c
index 3a3a171e..4deb7662 100644
--- a/libnetdata/socket/security.c
+++ b/libnetdata/socket/security.c
@@ -24,7 +24,7 @@ static SOCKET_PEERS netdata_ssl_peers(NETDATA_SSL *ssl) {
}
static void netdata_ssl_log_error_queue(const char *call, NETDATA_SSL *ssl, unsigned long err) {
- error_limit_static_thread_var(erl, 1, 0);
+ nd_log_limit_static_thread_var(erl, 1, 0);
if(err == SSL_ERROR_NONE)
err = ERR_get_error();
@@ -103,13 +103,14 @@ static void netdata_ssl_log_error_queue(const char *call, NETDATA_SSL *ssl, unsi
ERR_error_string_n(err, str, 1024);
str[1024] = '\0';
SOCKET_PEERS peers = netdata_ssl_peers(ssl);
- error_limit(&erl, "SSL: %s() on socket local [[%s]:%d] <-> remote [[%s]:%d], returned error %lu (%s): %s",
- call, peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, err, code, str);
+ nd_log_limit(&erl, NDLS_DAEMON, NDLP_ERR,
+ "SSL: %s() on socket local [[%s]:%d] <-> remote [[%s]:%d], returned error %lu (%s): %s",
+ call, peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, err, code, str);
} while((err = ERR_get_error()));
}
-bool netdata_ssl_open(NETDATA_SSL *ssl, SSL_CTX *ctx, int fd) {
+bool netdata_ssl_open_ext(NETDATA_SSL *ssl, SSL_CTX *ctx, int fd, const unsigned char *alpn_protos, unsigned int alpn_protos_len) {
errno = 0;
ssl->ssl_errno = 0;
@@ -138,6 +139,8 @@ bool netdata_ssl_open(NETDATA_SSL *ssl, SSL_CTX *ctx, int fd) {
ssl->state = NETDATA_SSL_STATE_FAILED;
return false;
}
+ if (alpn_protos && alpn_protos_len > 0)
+ SSL_set_alpn_protos(ssl->conn, alpn_protos, alpn_protos_len);
}
if(SSL_set_fd(ssl->conn, fd) != 1) {
@@ -153,6 +156,10 @@ bool netdata_ssl_open(NETDATA_SSL *ssl, SSL_CTX *ctx, int fd) {
return true;
}
+bool netdata_ssl_open(NETDATA_SSL *ssl, SSL_CTX *ctx, int fd) {
+ return netdata_ssl_open_ext(ssl, ctx, fd, NULL, 0);
+}
+
void netdata_ssl_close(NETDATA_SSL *ssl) {
errno = 0;
ssl->ssl_errno = 0;
@@ -173,7 +180,7 @@ void netdata_ssl_close(NETDATA_SSL *ssl) {
}
static inline bool is_handshake_complete(NETDATA_SSL *ssl, const char *op) {
- error_limit_static_thread_var(erl, 1, 0);
+ nd_log_limit_static_thread_var(erl, 1, 0);
if(unlikely(!ssl->conn)) {
internal_error(true, "SSL: trying to %s on a NULL connection", op);
@@ -183,22 +190,25 @@ static inline bool is_handshake_complete(NETDATA_SSL *ssl, const char *op) {
switch(ssl->state) {
case NETDATA_SSL_STATE_NOT_SSL: {
SOCKET_PEERS peers = netdata_ssl_peers(ssl);
- error_limit(&erl, "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on non-SSL connection",
- peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
+ nd_log_limit(&erl, NDLS_DAEMON, NDLP_WARNING,
+ "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on non-SSL connection",
+ peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
return false;
}
case NETDATA_SSL_STATE_INIT: {
SOCKET_PEERS peers = netdata_ssl_peers(ssl);
- error_limit(&erl, "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on an incomplete connection",
- peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
+ nd_log_limit(&erl, NDLS_DAEMON, NDLP_WARNING,
+ "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on an incomplete connection",
+ peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
return false;
}
case NETDATA_SSL_STATE_FAILED: {
SOCKET_PEERS peers = netdata_ssl_peers(ssl);
- error_limit(&erl, "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on a failed connection",
- peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
+ nd_log_limit(&erl, NDLS_DAEMON, NDLP_WARNING,
+ "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on a failed connection",
+ peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
return false;
}
@@ -290,7 +300,7 @@ ssize_t netdata_ssl_write(NETDATA_SSL *ssl, const void *buf, size_t num) {
}
static inline bool is_handshake_initialized(NETDATA_SSL *ssl, const char *op) {
- error_limit_static_thread_var(erl, 1, 0);
+ nd_log_limit_static_thread_var(erl, 1, 0);
if(unlikely(!ssl->conn)) {
internal_error(true, "SSL: trying to %s on a NULL connection", op);
@@ -300,8 +310,9 @@ static inline bool is_handshake_initialized(NETDATA_SSL *ssl, const char *op) {
switch(ssl->state) {
case NETDATA_SSL_STATE_NOT_SSL: {
SOCKET_PEERS peers = netdata_ssl_peers(ssl);
- error_limit(&erl, "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on non-SSL connection",
- peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
+ nd_log_limit(&erl, NDLS_DAEMON, NDLP_WARNING,
+ "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on non-SSL connection",
+ peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
return false;
}
@@ -311,15 +322,17 @@ static inline bool is_handshake_initialized(NETDATA_SSL *ssl, const char *op) {
case NETDATA_SSL_STATE_FAILED: {
SOCKET_PEERS peers = netdata_ssl_peers(ssl);
- error_limit(&erl, "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on a failed connection",
- peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
+ nd_log_limit(&erl, NDLS_DAEMON, NDLP_WARNING,
+ "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on a failed connection",
+ peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
return false;
}
case NETDATA_SSL_STATE_COMPLETE: {
SOCKET_PEERS peers = netdata_ssl_peers(ssl);
- error_limit(&erl, "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on an complete connection",
- peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
+ nd_log_limit(&erl, NDLS_DAEMON, NDLP_WARNING,
+ "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on an complete connection",
+ peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
return false;
}
}
diff --git a/libnetdata/socket/security.h b/libnetdata/socket/security.h
index a4082a07..fd17b6f3 100644
--- a/libnetdata/socket/security.h
+++ b/libnetdata/socket/security.h
@@ -64,6 +64,7 @@ bool netdata_ssl_connect(NETDATA_SSL *ssl);
bool netdata_ssl_accept(NETDATA_SSL *ssl);
bool netdata_ssl_open(NETDATA_SSL *ssl, SSL_CTX *ctx, int fd);
+bool netdata_ssl_open_ext(NETDATA_SSL *ssl, SSL_CTX *ctx, int fd, const unsigned char *alpn_protos, unsigned int alpn_protos_len);
void netdata_ssl_close(NETDATA_SSL *ssl);
ssize_t netdata_ssl_read(NETDATA_SSL *ssl, void *buf, size_t num);
diff --git a/libnetdata/socket/socket.c b/libnetdata/socket/socket.c
index 67dc4c71..605e8563 100644
--- a/libnetdata/socket/socket.c
+++ b/libnetdata/socket/socket.c
@@ -158,7 +158,9 @@ int sock_setnonblock(int fd) {
int ret = fcntl(fd, F_SETFL, flags);
if(ret < 0)
- netdata_log_error("Failed to set O_NONBLOCK on socket %d", fd);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Failed to set O_NONBLOCK on socket %d",
+ fd);
return ret;
}
@@ -171,7 +173,9 @@ int sock_delnonblock(int fd) {
int ret = fcntl(fd, F_SETFL, flags);
if(ret < 0)
- netdata_log_error("Failed to remove O_NONBLOCK on socket %d", fd);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Failed to remove O_NONBLOCK on socket %d",
+ fd);
return ret;
}
@@ -180,7 +184,9 @@ int sock_setreuse(int fd, int reuse) {
int ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuse, sizeof(reuse));
if(ret == -1)
- netdata_log_error("Failed to set SO_REUSEADDR on socket %d", fd);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Failed to set SO_REUSEADDR on socket %d",
+ fd);
return ret;
}
@@ -191,7 +197,9 @@ int sock_setreuse_port(int fd, int reuse) {
#ifdef SO_REUSEPORT
ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &reuse, sizeof(reuse));
if(ret == -1 && errno != ENOPROTOOPT)
- netdata_log_error("failed to set SO_REUSEPORT on socket %d", fd);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "failed to set SO_REUSEPORT on socket %d",
+ fd);
#else
ret = -1;
#endif
@@ -205,7 +213,9 @@ int sock_enlarge_in(int fd) {
ret = setsockopt(fd, SOL_SOCKET, SO_RCVBUF, &bs, sizeof(bs));
if(ret == -1)
- netdata_log_error("Failed to set SO_RCVBUF on socket %d", fd);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Failed to set SO_RCVBUF on socket %d",
+ fd);
return ret;
}
@@ -215,7 +225,9 @@ int sock_enlarge_out(int fd) {
ret = setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &bs, sizeof(bs));
if(ret == -1)
- netdata_log_error("Failed to set SO_SNDBUF on socket %d", fd);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Failed to set SO_SNDBUF on socket %d",
+ fd);
return ret;
}
@@ -228,16 +240,16 @@ char *strdup_client_description(int family, const char *protocol, const char *ip
switch(family) {
case AF_INET:
- snprintfz(buffer, 100, "%s:%s:%d", protocol, ip, port);
+ snprintfz(buffer, sizeof(buffer) - 1, "%s:%s:%d", protocol, ip, port);
break;
case AF_INET6:
default:
- snprintfz(buffer, 100, "%s:[%s]:%d", protocol, ip, port);
+ snprintfz(buffer, sizeof(buffer) - 1, "%s:[%s]:%d", protocol, ip, port);
break;
case AF_UNIX:
- snprintfz(buffer, 100, "%s:%s", protocol, ip);
+ snprintfz(buffer, sizeof(buffer) - 1, "%s:%s", protocol, ip);
break;
}
@@ -250,11 +262,12 @@ char *strdup_client_description(int family, const char *protocol, const char *ip
int create_listen_socket_unix(const char *path, int listen_backlog) {
int sock;
- netdata_log_debug(D_LISTENER, "LISTENER: UNIX creating new listening socket on path '%s'", path);
-
sock = socket(AF_UNIX, SOCK_STREAM, 0);
if(sock < 0) {
- netdata_log_error("LISTENER: UNIX socket() on path '%s' failed.", path);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: UNIX socket() on path '%s' failed.",
+ path);
+
return -1;
}
@@ -268,42 +281,52 @@ int create_listen_socket_unix(const char *path, int listen_backlog) {
errno = 0;
if (unlink(path) == -1 && errno != ENOENT)
- netdata_log_error("LISTENER: failed to remove existing (probably obsolete or left-over) file on UNIX socket path '%s'.", path);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: failed to remove existing (probably obsolete or left-over) file on UNIX socket path '%s'.",
+ path);
if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
close(sock);
- netdata_log_error("LISTENER: UNIX bind() on path '%s' failed.", path);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: UNIX bind() on path '%s' failed.",
+ path);
+
return -1;
}
// we have to chmod this to 0777 so that the client will be able
// to read from and write to this socket.
if(chmod(path, 0777) == -1)
- netdata_log_error("LISTENER: failed to chmod() socket file '%s'.", path);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: failed to chmod() socket file '%s'.",
+ path);
if(listen(sock, listen_backlog) < 0) {
close(sock);
- netdata_log_error("LISTENER: UNIX listen() on path '%s' failed.", path);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: UNIX listen() on path '%s' failed.",
+ path);
+
return -1;
}
- netdata_log_debug(D_LISTENER, "LISTENER: Listening on UNIX path '%s'", path);
return sock;
}
int create_listen_socket4(int socktype, const char *ip, uint16_t port, int listen_backlog) {
int sock;
- netdata_log_debug(D_LISTENER, "LISTENER: IPv4 creating new listening socket on ip '%s' port %d, socktype %d", ip, port, socktype);
-
sock = socket(AF_INET, socktype, 0);
if(sock < 0) {
- netdata_log_error("LISTENER: IPv4 socket() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: IPv4 socket() on ip '%s' port %d, socktype %d failed.",
+ ip, port, socktype);
+
return -1;
}
sock_setreuse(sock, 1);
- sock_setreuse_port(sock, 1);
+ sock_setreuse_port(sock, 0);
sock_setnonblock(sock);
sock_enlarge_in(sock);
@@ -314,24 +337,36 @@ int create_listen_socket4(int socktype, const char *ip, uint16_t port, int liste
int ret = inet_pton(AF_INET, ip, (void *)&name.sin_addr.s_addr);
if(ret != 1) {
- netdata_log_error("LISTENER: Failed to convert IP '%s' to a valid IPv4 address.", ip);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: Failed to convert IP '%s' to a valid IPv4 address.",
+ ip);
+
close(sock);
return -1;
}
if(bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
close(sock);
- netdata_log_error("LISTENER: IPv4 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: IPv4 bind() on ip '%s' port %d, socktype %d failed.",
+ ip, port, socktype);
+
return -1;
}
if(socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) {
close(sock);
- netdata_log_error("LISTENER: IPv4 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: IPv4 listen() on ip '%s' port %d, socktype %d failed.",
+ ip, port, socktype);
+
return -1;
}
- netdata_log_debug(D_LISTENER, "LISTENER: Listening on IPv4 ip '%s' port %d, socktype %d", ip, port, socktype);
+ nd_log(NDLS_DAEMON, NDLP_DEBUG,
+ "LISTENER: Listening on IPv4 ip '%s' port %d, socktype %d",
+ ip, port, socktype);
+
return sock;
}
@@ -339,22 +374,25 @@ int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int p
int sock;
int ipv6only = 1;
- netdata_log_debug(D_LISTENER, "LISTENER: IPv6 creating new listening socket on ip '%s' port %d, socktype %d", ip, port, socktype);
-
sock = socket(AF_INET6, socktype, 0);
if (sock < 0) {
- netdata_log_error("LISTENER: IPv6 socket() on ip '%s' port %d, socktype %d, failed.", ip, port, socktype);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: IPv6 socket() on ip '%s' port %d, socktype %d, failed.",
+ ip, port, socktype);
+
return -1;
}
sock_setreuse(sock, 1);
- sock_setreuse_port(sock, 1);
+ sock_setreuse_port(sock, 0);
sock_setnonblock(sock);
sock_enlarge_in(sock);
/* IPv6 only */
if(setsockopt(sock, IPPROTO_IPV6, IPV6_V6ONLY, (void*)&ipv6only, sizeof(ipv6only)) != 0)
- netdata_log_error("LISTENER: Cannot set IPV6_V6ONLY on ip '%s' port %d, socktype %d.", ip, port, socktype);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: Cannot set IPV6_V6ONLY on ip '%s' port %d, socktype %d.",
+ ip, port, socktype);
struct sockaddr_in6 name;
memset(&name, 0, sizeof(struct sockaddr_in6));
@@ -364,7 +402,10 @@ int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int p
int ret = inet_pton(AF_INET6, ip, (void *)&name.sin6_addr.s6_addr);
if(ret != 1) {
- netdata_log_error("LISTENER: Failed to convert IP '%s' to a valid IPv6 address.", ip);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: Failed to convert IP '%s' to a valid IPv6 address.",
+ ip);
+
close(sock);
return -1;
}
@@ -373,23 +414,35 @@ int create_listen_socket6(int socktype, uint32_t scope_id, const char *ip, int p
if (bind (sock, (struct sockaddr *) &name, sizeof (name)) < 0) {
close(sock);
- netdata_log_error("LISTENER: IPv6 bind() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: IPv6 bind() on ip '%s' port %d, socktype %d failed.",
+ ip, port, socktype);
+
return -1;
}
if (socktype == SOCK_STREAM && listen(sock, listen_backlog) < 0) {
close(sock);
- netdata_log_error("LISTENER: IPv6 listen() on ip '%s' port %d, socktype %d failed.", ip, port, socktype);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: IPv6 listen() on ip '%s' port %d, socktype %d failed.",
+ ip, port, socktype);
+
return -1;
}
- netdata_log_debug(D_LISTENER, "LISTENER: Listening on IPv6 ip '%s' port %d, socktype %d", ip, port, socktype);
+ nd_log(NDLS_DAEMON, NDLP_DEBUG,
+ "LISTENER: Listening on IPv6 ip '%s' port %d, socktype %d",
+ ip, port, socktype);
+
return sock;
}
static inline int listen_sockets_add(LISTEN_SOCKETS *sockets, int fd, int family, int socktype, const char *protocol, const char *ip, uint16_t port, int acl_flags) {
if(sockets->opened >= MAX_LISTEN_FDS) {
- netdata_log_error("LISTENER: Too many listening sockets. Failed to add listening %s socket at ip '%s' port %d, protocol %s, socktype %d", protocol, ip, port, protocol, socktype);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: Too many listening sockets. Failed to add listening %s socket at ip '%s' port %d, protocol %s, socktype %d",
+ protocol, ip, port, protocol, socktype);
+
close(fd);
return -1;
}
@@ -519,7 +572,10 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition,
protocol_str = "unix";
int fd = create_listen_socket_unix(path, listen_backlog);
if (fd == -1) {
- netdata_log_error("LISTENER: Cannot create unix socket '%s'", path);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: Cannot create unix socket '%s'",
+ path);
+
sockets->failed++;
} else {
acl_flags = WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_REGISTRY | WEB_CLIENT_ACL_BADGE | WEB_CLIENT_ACL_MGMT | WEB_CLIENT_ACL_NETDATACONF | WEB_CLIENT_ACL_STREAMING | WEB_CLIENT_ACL_SSL_DEFAULT;
@@ -585,7 +641,10 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition,
if(*interface) {
scope_id = if_nametoindex(interface);
if(!scope_id)
- netdata_log_error("LISTENER: Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: Cannot find a network interface named '%s'. "
+ "Continuing with limiting the network interface",
+ interface);
}
if(!*ip || *ip == '*' || !strcmp(ip, "any") || !strcmp(ip, "all"))
@@ -605,7 +664,10 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition,
int r = getaddrinfo(ip, port, &hints, &result);
if (r != 0) {
- netdata_log_error("LISTENER: getaddrinfo('%s', '%s'): %s\n", ip, port, gai_strerror(r));
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: getaddrinfo('%s', '%s'): %s\n",
+ ip, port, gai_strerror(r));
+
return -1;
}
@@ -622,7 +684,6 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition,
struct sockaddr_in *sin = (struct sockaddr_in *) rp->ai_addr;
inet_ntop(AF_INET, &sin->sin_addr, rip, INET_ADDRSTRLEN);
rport = ntohs(sin->sin_port);
- // netdata_log_info("Attempting to listen on IPv4 '%s' ('%s'), port %d ('%s'), socktype %d", rip, ip, rport, port, socktype);
fd = create_listen_socket4(socktype, rip, rport, listen_backlog);
break;
}
@@ -631,18 +692,23 @@ static inline int bind_to_this(LISTEN_SOCKETS *sockets, const char *definition,
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) rp->ai_addr;
inet_ntop(AF_INET6, &sin6->sin6_addr, rip, INET6_ADDRSTRLEN);
rport = ntohs(sin6->sin6_port);
- // netdata_log_info("Attempting to listen on IPv6 '%s' ('%s'), port %d ('%s'), socktype %d", rip, ip, rport, port, socktype);
fd = create_listen_socket6(socktype, scope_id, rip, rport, listen_backlog);
break;
}
default:
- netdata_log_debug(D_LISTENER, "LISTENER: Unknown socket family %d", family);
+ nd_log(NDLS_DAEMON, NDLP_DEBUG,
+ "LISTENER: Unknown socket family %d",
+ family);
+
break;
}
if (fd == -1) {
- netdata_log_error("LISTENER: Cannot bind to ip '%s', port %d", rip, rport);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: Cannot bind to ip '%s', port %d",
+ rip, rport);
+
sockets->failed++;
}
else {
@@ -664,13 +730,14 @@ int listen_sockets_setup(LISTEN_SOCKETS *sockets) {
long long int old_port = sockets->default_port;
long long int new_port = appconfig_get_number(sockets->config, sockets->config_section, "default port", sockets->default_port);
if(new_port < 1 || new_port > 65535) {
- netdata_log_error("LISTENER: Invalid listen port %lld given. Defaulting to %lld.", new_port, old_port);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: Invalid listen port %lld given. Defaulting to %lld.",
+ new_port, old_port);
+
sockets->default_port = (uint16_t) appconfig_set_number(sockets->config, sockets->config_section, "default port", old_port);
}
else sockets->default_port = (uint16_t)new_port;
- netdata_log_debug(D_OPTIONS, "LISTENER: Default listen port set to %d.", sockets->default_port);
-
char *s = appconfig_get(sockets->config, sockets->config_section, "bind to", sockets->default_bind_to);
while(*s) {
char *e = s;
@@ -694,7 +761,9 @@ int listen_sockets_setup(LISTEN_SOCKETS *sockets) {
if(sockets->failed) {
size_t i;
for(i = 0; i < sockets->opened ;i++)
- netdata_log_info("LISTENER: Listen socket %s opened successfully.", sockets->fds_names[i]);
+ nd_log(NDLS_DAEMON, NDLP_DEBUG,
+ "LISTENER: Listen socket %s opened successfully.",
+ sockets->fds_names[i]);
}
return (int)sockets->opened;
@@ -711,13 +780,18 @@ int listen_sockets_setup(LISTEN_SOCKETS *sockets) {
static inline int connect_to_unix(const char *path, struct timeval *timeout) {
int fd = socket(AF_UNIX, SOCK_STREAM, 0);
if(fd == -1) {
- netdata_log_error("Failed to create UNIX socket() for '%s'", path);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Failed to create UNIX socket() for '%s'",
+ path);
+
return -1;
}
if(timeout) {
if(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (char *) timeout, sizeof(struct timeval)) < 0)
- netdata_log_error("Failed to set timeout on UNIX socket '%s'", path);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Failed to set timeout on UNIX socket '%s'",
+ path);
}
struct sockaddr_un addr;
@@ -726,12 +800,17 @@ static inline int connect_to_unix(const char *path, struct timeval *timeout) {
strncpy(addr.sun_path, path, sizeof(addr.sun_path)-1);
if (connect(fd, (struct sockaddr*)&addr, sizeof(addr)) == -1) {
- netdata_log_error("Cannot connect to UNIX socket on path '%s'.", path);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Cannot connect to UNIX socket on path '%s'.",
+ path);
+
close(fd);
return -1;
}
- netdata_log_debug(D_CONNECT_TO, "Connected to UNIX socket on path '%s'.", path);
+ nd_log(NDLS_DAEMON, NDLP_DEBUG,
+ "Connected to UNIX socket on path '%s'.",
+ path);
return fd;
}
@@ -757,10 +836,24 @@ int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t
int ai_err = getaddrinfo(host, service, &hints, &ai_head);
if (ai_err != 0) {
- netdata_log_error("Cannot resolve host '%s', port '%s': %s", host, service, gai_strerror(ai_err));
+
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Cannot resolve host '%s', port '%s': %s",
+ host, service, gai_strerror(ai_err));
+
return -1;
}
+ char hostBfr[NI_MAXHOST + 1];
+ char servBfr[NI_MAXSERV + 1];
+
+ ND_LOG_STACK lgs[] = {
+ ND_LOG_FIELD_TXT(NDF_DST_IP, hostBfr),
+ ND_LOG_FIELD_TXT(NDF_DST_PORT, servBfr),
+ ND_LOG_FIELD_END(),
+ };
+ ND_LOG_STACK_PUSH(lgs);
+
int fd = -1;
for (ai = ai_head; ai != NULL && fd == -1; ai = ai->ai_next) {
@@ -771,9 +864,6 @@ int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t
}
}
- char hostBfr[NI_MAXHOST + 1];
- char servBfr[NI_MAXSERV + 1];
-
getnameinfo(ai->ai_addr,
ai->ai_addrlen,
hostBfr,
@@ -782,54 +872,21 @@ int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t
sizeof(servBfr),
NI_NUMERICHOST | NI_NUMERICSERV);
- netdata_log_debug(D_CONNECT_TO, "Address info: host = '%s', service = '%s', ai_flags = 0x%02X, ai_family = %d (PF_INET = %d, PF_INET6 = %d), ai_socktype = %d (SOCK_STREAM = %d, SOCK_DGRAM = %d), ai_protocol = %d (IPPROTO_TCP = %d, IPPROTO_UDP = %d), ai_addrlen = %lu (sockaddr_in = %lu, sockaddr_in6 = %lu)",
- hostBfr,
- servBfr,
- (unsigned int)ai->ai_flags,
- ai->ai_family,
- PF_INET,
- PF_INET6,
- ai->ai_socktype,
- SOCK_STREAM,
- SOCK_DGRAM,
- ai->ai_protocol,
- IPPROTO_TCP,
- IPPROTO_UDP,
- (unsigned long)ai->ai_addrlen,
- (unsigned long)sizeof(struct sockaddr_in),
- (unsigned long)sizeof(struct sockaddr_in6));
-
switch (ai->ai_addr->sa_family) {
case PF_INET: {
struct sockaddr_in *pSadrIn = (struct sockaddr_in *)ai->ai_addr;
(void)pSadrIn;
-
- netdata_log_debug(D_CONNECT_TO, "ai_addr = sin_family: %d (AF_INET = %d, AF_INET6 = %d), sin_addr: '%s', sin_port: '%s'",
- pSadrIn->sin_family,
- AF_INET,
- AF_INET6,
- hostBfr,
- servBfr);
break;
}
case PF_INET6: {
struct sockaddr_in6 *pSadrIn6 = (struct sockaddr_in6 *) ai->ai_addr;
(void)pSadrIn6;
-
- netdata_log_debug(D_CONNECT_TO,"ai_addr = sin6_family: %d (AF_INET = %d, AF_INET6 = %d), sin6_addr: '%s', sin6_port: '%s', sin6_flowinfo: %u, sin6_scope_id: %u",
- pSadrIn6->sin6_family,
- AF_INET,
- AF_INET6,
- hostBfr,
- servBfr,
- pSadrIn6->sin6_flowinfo,
- pSadrIn6->sin6_scope_id);
break;
}
default: {
- netdata_log_debug(D_CONNECT_TO, "Unknown protocol family %d.", ai->ai_family);
+ // Unknown protocol family
continue;
}
}
@@ -838,13 +895,17 @@ int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t
if(fd != -1) {
if(timeout) {
if(setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (char *) timeout, sizeof(struct timeval)) < 0)
- netdata_log_error("Failed to set timeout on the socket to ip '%s' port '%s'", hostBfr, servBfr);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Failed to set timeout on the socket to ip '%s' port '%s'",
+ hostBfr, servBfr);
}
errno = 0;
if(connect(fd, ai->ai_addr, ai->ai_addrlen) < 0) {
if(errno == EALREADY || errno == EINPROGRESS) {
- internal_error(true, "Waiting for connection to ip %s port %s to be established", hostBfr, servBfr);
+ nd_log(NDLS_DAEMON, NDLP_DEBUG,
+ "Waiting for connection to ip %s port %s to be established",
+ hostBfr, servBfr);
// Convert 'struct timeval' to milliseconds for poll():
int timeout_milliseconds = timeout->tv_sec * 1000 + timeout->tv_usec / 1000;
@@ -858,11 +919,19 @@ int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t
// poll() completed normally. We can check the revents to see what happened
if (fds[0].revents & POLLOUT) {
// connect() completed successfully, socket is writable.
- netdata_log_info("connect() to ip %s port %s completed successfully", hostBfr, servBfr);
+
+ nd_log(NDLS_DAEMON, NDLP_DEBUG,
+ "connect() to ip %s port %s completed successfully",
+ hostBfr, servBfr);
+
}
else {
// This means that the socket is in error. We will close it and set fd to -1
- netdata_log_error("Failed to connect to '%s', port '%s'.", hostBfr, servBfr);
+
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Failed to connect to '%s', port '%s'.",
+ hostBfr, servBfr);
+
close(fd);
fd = -1;
}
@@ -870,27 +939,38 @@ int connect_to_this_ip46(int protocol, int socktype, const char *host, uint32_t
else if (ret == 0) {
// poll() timed out, the connection is not established within the specified timeout.
errno = 0;
- netdata_log_error("Timed out while connecting to '%s', port '%s'.", hostBfr, servBfr);
+
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Timed out while connecting to '%s', port '%s'.",
+ hostBfr, servBfr);
+
close(fd);
fd = -1;
}
- else {
+ else { // ret < 0
// poll() returned an error.
- netdata_log_error("Failed to connect to '%s', port '%s'. poll() returned %d", hostBfr, servBfr, ret);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Failed to connect to '%s', port '%s'. poll() returned %d",
+ hostBfr, servBfr, ret);
+
close(fd);
fd = -1;
}
}
else {
- netdata_log_error("Failed to connect to '%s', port '%s'", hostBfr, servBfr);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Failed to connect to '%s', port '%s'",
+ hostBfr, servBfr);
+
close(fd);
fd = -1;
}
}
-
- if(fd != -1)
- netdata_log_debug(D_CONNECT_TO, "Connected to '%s' on port '%s'.", hostBfr, servBfr);
}
+ else
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Failed to socket() to '%s', port '%s'",
+ hostBfr, servBfr);
}
freeaddrinfo(ai_head);
@@ -965,17 +1045,20 @@ int connect_to_this(const char *definition, int default_port, struct timeval *ti
service = e;
}
- netdata_log_debug(D_CONNECT_TO, "Attempting connection to host = '%s', service = '%s', interface = '%s', protocol = %d (tcp = %d, udp = %d)", host, service, interface, protocol, IPPROTO_TCP, IPPROTO_UDP);
-
if(!*host) {
- netdata_log_error("Definition '%s' does not specify a host.", definition);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Definition '%s' does not specify a host.",
+ definition);
+
return -1;
}
if(*interface) {
scope_id = if_nametoindex(interface);
if(!scope_id)
- netdata_log_error("Cannot find a network interface named '%s'. Continuing with limiting the network interface", interface);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Cannot find a network interface named '%s'. Continuing with limiting the network interface",
+ interface);
}
if(!*service)
@@ -1117,7 +1200,6 @@ ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout)
}
#endif
- internal_error(true, "%s(): calling recv()", __FUNCTION__ );
return recv(sockfd, buf, len, flags);
}
@@ -1160,7 +1242,9 @@ ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout)
return netdata_ssl_write(ssl, buf, len);
}
else {
- netdata_log_error("cannot write to SSL connection - connection is not ready.");
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "cannot write to SSL connection - connection is not ready.");
+
return -1;
}
}
@@ -1224,7 +1308,6 @@ int accept4(int sock, struct sockaddr *addr, socklen_t *addrlen, int flags) {
int connection_allowed(int fd, char *client_ip, char *client_host, size_t hostsize, SIMPLE_PATTERN *access_list,
const char *patname, int allow_dns)
{
- netdata_log_debug(D_LISTENER,"checking %s... (allow_dns=%d)", patname, allow_dns);
if (!access_list)
return 1;
if (simple_pattern_matches(access_list, client_ip))
@@ -1239,16 +1322,21 @@ int connection_allowed(int fd, char *client_ip, char *client_host, size_t hostsi
if (err != 0 ||
(err = getnameinfo((struct sockaddr *)&sadr, addrlen, client_host, (socklen_t)hostsize,
NULL, 0, NI_NAMEREQD)) != 0) {
- netdata_log_error("Incoming %s on '%s' does not match a numeric pattern, and host could not be resolved (err=%s)",
- patname, client_ip, gai_strerror(err));
+
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Incoming %s on '%s' does not match a numeric pattern, and host could not be resolved (err=%s)",
+ patname, client_ip, gai_strerror(err));
+
if (hostsize >= 8)
strcpy(client_host,"UNKNOWN");
return 0;
}
struct addrinfo *addr_infos = NULL;
if (getaddrinfo(client_host, NULL, NULL, &addr_infos) !=0 ) {
- netdata_log_error("LISTENER: cannot validate hostname '%s' from '%s' by resolving it",
- client_host, client_ip);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: cannot validate hostname '%s' from '%s' by resolving it",
+ client_host, client_ip);
+
if (hostsize >= 8)
strcpy(client_host,"UNKNOWN");
return 0;
@@ -1266,8 +1354,6 @@ int connection_allowed(int fd, char *client_ip, char *client_host, size_t hostsi
inet_ntop(AF_INET6, &((struct sockaddr_in6*)(scan->ai_addr))->sin6_addr, address, INET6_ADDRSTRLEN);
break;
}
- netdata_log_debug(D_LISTENER, "Incoming ip %s rev-resolved onto %s, validating against forward-resolution %s",
- client_ip, client_host, address);
if (!strcmp(client_ip, address)) {
validated = 1;
break;
@@ -1275,18 +1361,19 @@ int connection_allowed(int fd, char *client_ip, char *client_host, size_t hostsi
scan = scan->ai_next;
}
if (!validated) {
- netdata_log_error("LISTENER: Cannot validate '%s' as ip of '%s', not listed in DNS", client_ip, client_host);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: Cannot validate '%s' as ip of '%s', not listed in DNS",
+ client_ip, client_host);
+
if (hostsize >= 8)
strcpy(client_host,"UNKNOWN");
}
if (addr_infos!=NULL)
freeaddrinfo(addr_infos);
}
- if (!simple_pattern_matches(access_list, client_host)) {
- netdata_log_debug(D_LISTENER, "Incoming connection on '%s' (%s) does not match allowed pattern for %s",
- client_ip, client_host, patname);
+ if (!simple_pattern_matches(access_list, client_host))
return 0;
- }
+
return 1;
}
@@ -1301,7 +1388,10 @@ int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *clien
if (likely(nfd >= 0)) {
if (getnameinfo((struct sockaddr *)&sadr, addrlen, client_ip, (socklen_t)ipsize,
client_port, (socklen_t)portsize, NI_NUMERICHOST | NI_NUMERICSERV) != 0) {
- netdata_log_error("LISTENER: cannot getnameinfo() on received client connection.");
+
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "LISTENER: cannot getnameinfo() on received client connection.");
+
strncpyz(client_ip, "UNKNOWN", ipsize);
strncpyz(client_port, "UNKNOWN", portsize);
}
@@ -1319,31 +1409,34 @@ int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *clien
switch (((struct sockaddr *)&sadr)->sa_family) {
case AF_UNIX:
- netdata_log_debug(D_LISTENER, "New UNIX domain web client from %s on socket %d.", client_ip, fd);
+ // netdata_log_debug(D_LISTENER, "New UNIX domain web client from %s on socket %d.", client_ip, fd);
// set the port - certain versions of libc return garbage on unix sockets
strncpyz(client_port, "UNIX", portsize);
break;
case AF_INET:
- netdata_log_debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ // netdata_log_debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd);
break;
case AF_INET6:
if (strncmp(client_ip, "::ffff:", 7) == 0) {
memmove(client_ip, &client_ip[7], strlen(&client_ip[7]) + 1);
- netdata_log_debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ // netdata_log_debug(D_LISTENER, "New IPv4 web client from %s port %s on socket %d.", client_ip, client_port, fd);
}
- else
- netdata_log_debug(D_LISTENER, "New IPv6 web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ // else
+ // netdata_log_debug(D_LISTENER, "New IPv6 web client from %s port %s on socket %d.", client_ip, client_port, fd);
break;
default:
- netdata_log_debug(D_LISTENER, "New UNKNOWN web client from %s port %s on socket %d.", client_ip, client_port, fd);
+ // netdata_log_debug(D_LISTENER, "New UNKNOWN web client from %s port %s on socket %d.", client_ip, client_port, fd);
break;
}
if (!connection_allowed(nfd, client_ip, client_host, hostsize, access_list, "connection", allow_dns)) {
errno = 0;
- netdata_log_error("Permission denied for client '%s', port '%s'", client_ip, client_port);
+ nd_log(NDLS_DAEMON, NDLP_WARNING,
+ "Permission denied for client '%s', port '%s'",
+ client_ip, client_port);
+
close(nfd);
nfd = -1;
errno = EPERM;
@@ -1351,7 +1444,9 @@ int accept_socket(int fd, int flags, char *client_ip, size_t ipsize, char *clien
}
#ifdef HAVE_ACCEPT4
else if (errno == ENOSYS)
- netdata_log_error("netdata has been compiled with the assumption that the system has the accept4() call, but it is not here. Recompile netdata like this: ./configure --disable-accept4 ...");
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Netdata has been compiled with the assumption that the system has the accept4() call, but it is not here. "
+ "Recompile netdata like this: ./configure --disable-accept4 ...");
#endif
return nfd;
@@ -1379,19 +1474,16 @@ inline POLLINFO *poll_add_fd(POLLJOB *p
, int (*snd_callback)(POLLINFO * /*pi*/, short int * /*events*/)
, void *data
) {
- netdata_log_debug(D_POLLFD, "POLLFD: ADD: request to add fd %d, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", fd, p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
-
if(unlikely(fd < 0)) return NULL;
//if(p->limit && p->used >= p->limit) {
- // netdata_log_info("Max sockets limit reached (%zu sockets), dropping connection", p->used);
+ // nd_log(NDLS_DAEMON, NDLP_WARNING, "Max sockets limit reached (%zu sockets), dropping connection", p->used);
// close(fd);
// return NULL;
//}
if(unlikely(!p->first_free)) {
size_t new_slots = p->slots + POLL_FDS_INCREASE_STEP;
- netdata_log_debug(D_POLLFD, "POLLFD: ADD: increasing size (current = %zu, new = %zu, used = %zu, min = %zu, max = %zu)", p->slots, new_slots, p->used, p->min, p->max);
p->fds = reallocz(p->fds, sizeof(struct pollfd) * new_slots);
p->inf = reallocz(p->inf, sizeof(POLLINFO) * new_slots);
@@ -1399,7 +1491,6 @@ inline POLLINFO *poll_add_fd(POLLJOB *p
// reset all the newly added slots
ssize_t i;
for(i = new_slots - 1; i >= (ssize_t)p->slots ; i--) {
- netdata_log_debug(D_POLLFD, "POLLFD: ADD: resetting new slot %zd", i);
p->fds[i].fd = -1;
p->fds[i].events = 0;
p->fds[i].revents = 0;
@@ -1430,8 +1521,6 @@ inline POLLINFO *poll_add_fd(POLLJOB *p
POLLINFO *pi = p->first_free;
p->first_free = p->first_free->next;
- netdata_log_debug(D_POLLFD, "POLLFD: ADD: selected slot %zu, next free is %zd", pi->slot, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
-
struct pollfd *pf = &p->fds[pi->slot];
pf->fd = fd;
pf->events = POLLIN;
@@ -1472,8 +1561,6 @@ inline POLLINFO *poll_add_fd(POLLJOB *p
}
netdata_thread_enable_cancelability();
- netdata_log_debug(D_POLLFD, "POLLFD: ADD: completed, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
-
return pi;
}
@@ -1481,7 +1568,6 @@ inline void poll_close_fd(POLLINFO *pi) {
POLLJOB *p = pi->p;
struct pollfd *pf = &p->fds[pi->slot];
- netdata_log_debug(D_POLLFD, "POLLFD: DEL: request to clear slot %zu (fd %d), old next free was %zd", pi->slot, pf->fd, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
if(unlikely(pf->fd == -1)) return;
@@ -1492,7 +1578,9 @@ inline void poll_close_fd(POLLINFO *pi) {
if(likely(!(pi->flags & POLLINFO_FLAG_DONT_CLOSE))) {
if(close(pf->fd) == -1)
- netdata_log_error("Failed to close() poll_events() socket %d", pf->fd);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "Failed to close() poll_events() socket %d",
+ pf->fd);
}
}
@@ -1533,8 +1621,6 @@ inline void poll_close_fd(POLLINFO *pi) {
}
}
netdata_thread_enable_cancelability();
-
- netdata_log_debug(D_POLLFD, "POLLFD: DEL: completed, slots = %zu, used = %zu, min = %zu, max = %zu, next free = %zd", p->slots, p->used, p->min, p->max, p->first_free?(ssize_t)p->first_free->slot:(ssize_t)-1);
}
void *poll_default_add_callback(POLLINFO *pi, short int *events, void *data) {
@@ -1542,14 +1628,13 @@ void *poll_default_add_callback(POLLINFO *pi, short int *events, void *data) {
(void)events;
(void)data;
- // netdata_log_error("POLLFD: internal error: poll_default_add_callback() called");
-
return NULL;
}
void poll_default_del_callback(POLLINFO *pi) {
if(pi->data)
- netdata_log_error("POLLFD: internal error: del_callback_default() called with data pointer - possible memory leak");
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "POLLFD: internal error: del_callback_default() called with data pointer - possible memory leak");
}
int poll_default_rcv_callback(POLLINFO *pi, short int *events) {
@@ -1563,12 +1648,17 @@ int poll_default_rcv_callback(POLLINFO *pi, short int *events) {
if (rc < 0) {
// read failed
if (errno != EWOULDBLOCK && errno != EAGAIN) {
- netdata_log_error("POLLFD: poll_default_rcv_callback(): recv() failed with %zd.", rc);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "POLLFD: poll_default_rcv_callback(): recv() failed with %zd.",
+ rc);
+
return -1;
}
} else if (rc) {
// data received
- netdata_log_info("POLLFD: internal error: poll_default_rcv_callback() is discarding %zd bytes received on socket %d", rc, pi->fd);
+ nd_log(NDLS_DAEMON, NDLP_WARNING,
+ "POLLFD: internal error: poll_default_rcv_callback() is discarding %zd bytes received on socket %d",
+ rc, pi->fd);
}
} while (rc != -1);
@@ -1578,7 +1668,10 @@ int poll_default_rcv_callback(POLLINFO *pi, short int *events) {
int poll_default_snd_callback(POLLINFO *pi, short int *events) {
*events &= ~POLLOUT;
- netdata_log_info("POLLFD: internal error: poll_default_snd_callback(): nothing to send on socket %d", pi->fd);
+ nd_log(NDLS_DAEMON, NDLP_WARNING,
+ "POLLFD: internal error: poll_default_snd_callback(): nothing to send on socket %d",
+ pi->fd);
+
return 0;
}
@@ -1600,17 +1693,25 @@ static void poll_events_cleanup(void *data) {
}
static int poll_process_error(POLLINFO *pi, struct pollfd *pf, short int revents) {
- netdata_log_error("POLLFD: LISTENER: received %s %s %s on socket at slot %zu (fd %d) client '%s' port '%s' expecting %s %s %s, having %s %s %s"
- , revents & POLLERR ? "POLLERR" : ""
- , revents & POLLHUP ? "POLLHUP" : ""
- , revents & POLLNVAL ? "POLLNVAL" : ""
- , pi->slot
- , pi->fd
- , pi->client_ip ? pi->client_ip : "<undefined-ip>"
- , pi->client_port ? pi->client_port : "<undefined-port>"
- , pf->events & POLLIN ? "POLLIN" : "", pf->events & POLLOUT ? "POLLOUT" : "", pf->events & POLLPRI ? "POLLPRI" : ""
- , revents & POLLIN ? "POLLIN" : "", revents & POLLOUT ? "POLLOUT" : "", revents & POLLPRI ? "POLLPRI" : ""
- );
+ ND_LOG_STACK lgs[] = {
+ ND_LOG_FIELD_TXT(NDF_SRC_IP, pi->client_ip),
+ ND_LOG_FIELD_TXT(NDF_SRC_PORT, pi->client_port),
+ ND_LOG_FIELD_END(),
+ };
+ ND_LOG_STACK_PUSH(lgs);
+
+ nd_log(NDLS_DAEMON, NDLP_DEBUG,
+ "POLLFD: LISTENER: received %s %s %s on socket at slot %zu (fd %d) client '%s' port '%s' expecting %s %s %s, having %s %s %s"
+ , revents & POLLERR ? "POLLERR" : ""
+ , revents & POLLHUP ? "POLLHUP" : ""
+ , revents & POLLNVAL ? "POLLNVAL" : ""
+ , pi->slot
+ , pi->fd
+ , pi->client_ip ? pi->client_ip : "<undefined-ip>"
+ , pi->client_port ? pi->client_port : "<undefined-port>"
+ , pf->events & POLLIN ? "POLLIN" : "", pf->events & POLLOUT ? "POLLOUT" : "", pf->events & POLLPRI ? "POLLPRI" : ""
+ , revents & POLLIN ? "POLLIN" : "", revents & POLLOUT ? "POLLOUT" : "", revents & POLLPRI ? "POLLPRI" : ""
+ );
pf->events = 0;
poll_close_fd(pi);
@@ -1621,8 +1722,6 @@ static inline int poll_process_send(POLLJOB *p, POLLINFO *pi, struct pollfd *pf,
pi->last_sent_t = now;
pi->send_count++;
- netdata_log_debug(D_POLLFD, "POLLFD: LISTENER: sending data to socket on slot %zu (fd %d)", pi->slot, pf->fd);
-
pf->events = 0;
// remember the slot, in case we need to close it later
@@ -1642,8 +1741,6 @@ static inline int poll_process_tcp_read(POLLJOB *p, POLLINFO *pi, struct pollfd
pi->last_received_t = now;
pi->recv_count++;
- netdata_log_debug(D_POLLFD, "POLLFD: LISTENER: reading data from TCP client slot %zu (fd %d)", pi->slot, pf->fd);
-
pf->events = 0;
// remember the slot, in case we need to close it later
@@ -1663,8 +1760,6 @@ static inline int poll_process_udp_read(POLLINFO *pi, struct pollfd *pf, time_t
pi->last_received_t = now;
pi->recv_count++;
- netdata_log_debug(D_POLLFD, "POLLFD: LISTENER: reading data from UDP slot %zu (fd %d)", pi->slot, pf->fd);
-
// TODO: access_list is not applied to UDP
// but checking the access list on every UDP packet will destroy
// performance, especially for statsd.
@@ -1683,14 +1778,10 @@ static int poll_process_new_tcp_connection(POLLJOB *p, POLLINFO *pi, struct poll
pi->last_received_t = now;
pi->recv_count++;
- netdata_log_debug(D_POLLFD, "POLLFD: LISTENER: accepting connections from slot %zu (fd %d)", pi->slot, pf->fd);
-
char client_ip[INET6_ADDRSTRLEN] = "";
char client_port[NI_MAXSERV] = "";
char client_host[NI_MAXHOST] = "";
- netdata_log_debug(D_POLLFD, "POLLFD: LISTENER: calling accept4() slot %zu (fd %d)", pi->slot, pf->fd);
-
int nfd = accept_socket(
pf->fd,SOCK_NONBLOCK,
client_ip, INET6_ADDRSTRLEN, client_port,NI_MAXSERV, client_host, NI_MAXHOST,
@@ -1700,15 +1791,15 @@ static int poll_process_new_tcp_connection(POLLJOB *p, POLLINFO *pi, struct poll
if (unlikely(nfd < 0)) {
// accept failed
- netdata_log_debug(D_POLLFD, "POLLFD: LISTENER: accept4() slot %zu (fd %d) failed.", pi->slot, pf->fd);
-
if(unlikely(errno == EMFILE)) {
- error_limit_static_global_var(erl, 10, 1000);
- error_limit(&erl, "POLLFD: LISTENER: too many open files - used by this thread %zu, max for this thread %zu",
- p->used, p->limit);
+ nd_log_limit_static_global_var(erl, 10, 1000);
+ nd_log_limit(&erl, NDLS_DAEMON, NDLP_ERR,
+ "POLLFD: LISTENER: too many open files - used by this thread %zu, max for this thread %zu",
+ p->used, p->limit);
}
else if(unlikely(errno != EWOULDBLOCK && errno != EAGAIN))
- netdata_log_error("POLLFD: LISTENER: accept() failed.");
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "POLLFD: LISTENER: accept() failed.");
}
else {
@@ -1755,7 +1846,8 @@ void poll_events(LISTEN_SOCKETS *sockets
, size_t max_tcp_sockets
) {
if(!sockets || !sockets->opened) {
- netdata_log_error("POLLFD: internal error: no listening sockets are opened");
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "POLLFD: internal error: no listening sockets are opened");
return;
}
@@ -1808,7 +1900,9 @@ void poll_events(LISTEN_SOCKETS *sockets
);
pi->data = data;
- netdata_log_info("POLLFD: LISTENER: listening on '%s'", (sockets->fds_names[i])?sockets->fds_names[i]:"UNKNOWN");
+ nd_log(NDLS_DAEMON, NDLP_DEBUG,
+ "POLLFD: LISTENER: listening on '%s'",
+ (sockets->fds_names[i])?sockets->fds_names[i]:"UNKNOWN");
}
int listen_sockets_active = 1;
@@ -1832,7 +1926,6 @@ void poll_events(LISTEN_SOCKETS *sockets
now_usec = now_boottime_usec();
if(unlikely(timer_usec && now_usec >= next_timer_usec)) {
- netdata_log_debug(D_POLLFD, "Calling timer callback after %zu usec", (size_t)(now_usec - last_timer_usec));
last_timer_usec = now_usec;
p.tmr_callback(p.timer_data);
now_usec = now_boottime_usec();
@@ -1849,7 +1942,11 @@ void poll_events(LISTEN_SOCKETS *sockets
// enable or disable the TCP listening sockets, based on the current number of sockets used and the limit set
if((listen_sockets_active && (p.limit && p.used >= p.limit)) || (!listen_sockets_active && (!p.limit || p.used < p.limit))) {
listen_sockets_active = !listen_sockets_active;
- netdata_log_info("%s listening sockets (used TCP sockets %zu, max allowed for this worker %zu)", (listen_sockets_active)?"ENABLING":"DISABLING", p.used, p.limit);
+
+ nd_log(NDLS_DAEMON, NDLP_DEBUG,
+ "%s listening sockets (used TCP sockets %zu, max allowed for this worker %zu)",
+ (listen_sockets_active)?"ENABLING":"DISABLING", p.used, p.limit);
+
for (i = 0; i <= p.max; i++) {
if(p.inf[i].flags & POLLINFO_FLAG_SERVER_SOCKET && p.inf[i].socktype == SOCK_STREAM) {
p.fds[i].events = (short int) ((listen_sockets_active) ? POLLIN : 0);
@@ -1857,16 +1954,19 @@ void poll_events(LISTEN_SOCKETS *sockets
}
}
- netdata_log_debug(D_POLLFD, "POLLFD: LISTENER: Waiting on %zu sockets for %zu ms...", p.max + 1, (size_t)timeout_ms);
retval = poll(p.fds, p.max + 1, timeout_ms);
time_t now = now_boottime_sec();
if(unlikely(retval == -1)) {
- netdata_log_error("POLLFD: LISTENER: poll() failed while waiting on %zu sockets.", p.max + 1);
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "POLLFD: LISTENER: poll() failed while waiting on %zu sockets.",
+ p.max + 1);
+
break;
}
else if(unlikely(!retval)) {
- netdata_log_debug(D_POLLFD, "POLLFD: LISTENER: poll() timeout.");
+ // timeout
+ ;
}
else {
POLLINFO *pi;
@@ -1920,31 +2020,34 @@ void poll_events(LISTEN_SOCKETS *sockets
conns[conns_max++] = i;
}
else
- netdata_log_error("POLLFD: LISTENER: server slot %zu (fd %d) connection from %s port %s using unhandled socket type %d."
- , i
- , pi->fd
- , pi->client_ip ? pi->client_ip : "<undefined-ip>"
- , pi->client_port ? pi->client_port : "<undefined-port>"
- , pi->socktype
- );
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "POLLFD: LISTENER: server slot %zu (fd %d) connection from %s port %s using unhandled socket type %d."
+ , i
+ , pi->fd
+ , pi->client_ip ? pi->client_ip : "<undefined-ip>"
+ , pi->client_port ? pi->client_port : "<undefined-port>"
+ , pi->socktype
+ );
}
else
- netdata_log_error("POLLFD: LISTENER: client slot %zu (fd %d) data from %s port %s using flags %08X is neither client nor server."
- , i
- , pi->fd
- , pi->client_ip ? pi->client_ip : "<undefined-ip>"
- , pi->client_port ? pi->client_port : "<undefined-port>"
- , pi->flags
- );
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "POLLFD: LISTENER: client slot %zu (fd %d) data from %s port %s using flags %08X is neither client nor server."
+ , i
+ , pi->fd
+ , pi->client_ip ? pi->client_ip : "<undefined-ip>"
+ , pi->client_port ? pi->client_port : "<undefined-port>"
+ , pi->flags
+ );
}
else
- netdata_log_error("POLLFD: LISTENER: socket slot %zu (fd %d) client %s port %s unhandled event id %d."
- , i
- , pi->fd
- , pi->client_ip ? pi->client_ip : "<undefined-ip>"
- , pi->client_port ? pi->client_port : "<undefined-port>"
- , revents
- );
+ nd_log(NDLS_DAEMON, NDLP_ERR,
+ "POLLFD: LISTENER: socket slot %zu (fd %d) client %s port %s unhandled event id %d."
+ , i
+ , pi->fd
+ , pi->client_ip ? pi->client_ip : "<undefined-ip>"
+ , pi->client_port ? pi->client_port : "<undefined-port>"
+ , revents
+ );
}
// process sends
@@ -1997,23 +2100,25 @@ void poll_events(LISTEN_SOCKETS *sockets
if(likely(pi->flags & POLLINFO_FLAG_CLIENT_SOCKET)) {
if (unlikely(pi->send_count == 0 && p.complete_request_timeout > 0 && (now - pi->connected_t) >= p.complete_request_timeout)) {
- netdata_log_info("POLLFD: LISTENER: client slot %zu (fd %d) from %s port %s has not sent a complete request in %zu seconds - closing it. "
- , i
- , pi->fd
- , pi->client_ip ? pi->client_ip : "<undefined-ip>"
- , pi->client_port ? pi->client_port : "<undefined-port>"
- , (size_t) p.complete_request_timeout
- );
+ nd_log(NDLS_DAEMON, NDLP_DEBUG,
+ "POLLFD: LISTENER: client slot %zu (fd %d) from %s port %s has not sent a complete request in %zu seconds - closing it. "
+ , i
+ , pi->fd
+ , pi->client_ip ? pi->client_ip : "<undefined-ip>"
+ , pi->client_port ? pi->client_port : "<undefined-port>"
+ , (size_t) p.complete_request_timeout
+ );
poll_close_fd(pi);
}
else if(unlikely(pi->recv_count && p.idle_timeout > 0 && now - ((pi->last_received_t > pi->last_sent_t) ? pi->last_received_t : pi->last_sent_t) >= p.idle_timeout )) {
- netdata_log_info("POLLFD: LISTENER: client slot %zu (fd %d) from %s port %s is idle for more than %zu seconds - closing it. "
- , i
- , pi->fd
- , pi->client_ip ? pi->client_ip : "<undefined-ip>"
- , pi->client_port ? pi->client_port : "<undefined-port>"
- , (size_t) p.idle_timeout
- );
+ nd_log(NDLS_DAEMON, NDLP_DEBUG,
+ "POLLFD: LISTENER: client slot %zu (fd %d) from %s port %s is idle for more than %zu seconds - closing it. "
+ , i
+ , pi->fd
+ , pi->client_ip ? pi->client_ip : "<undefined-ip>"
+ , pi->client_port ? pi->client_port : "<undefined-port>"
+ , (size_t) p.idle_timeout
+ );
poll_close_fd(pi);
}
}
@@ -2022,5 +2127,4 @@ void poll_events(LISTEN_SOCKETS *sockets
}
netdata_thread_cleanup_pop(1);
- netdata_log_debug(D_POLLFD, "POLLFD: LISTENER: cleanup completed");
}
diff --git a/libnetdata/string/string.c b/libnetdata/string/string.c
index 54b8f171..e1c8352a 100644
--- a/libnetdata/string/string.c
+++ b/libnetdata/string/string.c
@@ -338,10 +338,6 @@ inline const char *string2str(STRING *string) {
return string->str;
}
-int string_strcmp(STRING *string, const char *s) {
- return strcmp(string2str(string), s);
-}
-
STRING *string_2way_merge(STRING *a, STRING *b) {
static STRING *X = NULL;
@@ -420,7 +416,7 @@ static char **string_unittest_generate_names(size_t entries) {
char **names = mallocz(sizeof(char *) * entries);
for(size_t i = 0; i < entries ;i++) {
char buf[25 + 1] = "";
- snprintfz(buf, 25, "name.%zu.0123456789.%zu \t !@#$%%^&*(),./[]{}\\|~`", i, entries / 2 + i);
+ snprintfz(buf, sizeof(buf) - 1, "name.%zu.0123456789.%zu \t !@#$%%^&*(),./[]{}\\|~`", i, entries / 2 + i);
names[i] = strdupz(buf);
}
return names;
diff --git a/libnetdata/string/string.h b/libnetdata/string/string.h
index 70840ee9..ba0e3876 100644
--- a/libnetdata/string/string.h
+++ b/libnetdata/string/string.h
@@ -13,7 +13,6 @@ STRING *string_dup(STRING *string);
void string_freez(STRING *string);
size_t string_strlen(STRING *string);
const char *string2str(STRING *string) NEVERNULL;
-int string_strcmp(STRING *string, const char *s);
// keep common prefix/suffix and replace everything else with [x]
STRING *string_2way_merge(STRING *a, STRING *b);
@@ -24,6 +23,10 @@ static inline int string_cmp(STRING *s1, STRING *s2) {
return (s1 == s2)?0:strcmp(string2str(s1), string2str(s2));
}
+static inline int string_strcmp(STRING *string, const char *s) {
+ return strcmp(string2str(string), s);
+}
+
void string_statistics(size_t *inserts, size_t *deletes, size_t *searches, size_t *entries, size_t *references, size_t *memory, size_t *duplications, size_t *releases);
int string_unittest(size_t entries);
diff --git a/libnetdata/threads/threads.c b/libnetdata/threads/threads.c
index cc569060..c14f9a7e 100644
--- a/libnetdata/threads/threads.c
+++ b/libnetdata/threads/threads.c
@@ -10,7 +10,6 @@ static pthread_attr_t *netdata_threads_attr = NULL;
typedef struct {
void *arg;
char tag[NETDATA_THREAD_NAME_MAX + 1];
- SPINLOCK detach_lock;
void *(*start_routine) (void *);
NETDATA_THREAD_OPTIONS options;
} NETDATA_THREAD;
@@ -134,8 +133,6 @@ size_t netdata_threads_init(void) {
i = pthread_attr_getstacksize(netdata_threads_attr, &stacksize);
if(i != 0)
fatal("pthread_attr_getstacksize() failed with code %d.", i);
- else
- netdata_log_debug(D_OPTIONS, "initial pthread stack size is %zu bytes", stacksize);
return stacksize;
}
@@ -152,12 +149,12 @@ void netdata_threads_init_after_fork(size_t stacksize) {
if(netdata_threads_attr && stacksize > (size_t)PTHREAD_STACK_MIN) {
i = pthread_attr_setstacksize(netdata_threads_attr, stacksize);
if(i != 0)
- netdata_log_error("pthread_attr_setstacksize() to %zu bytes, failed with code %d.", stacksize, i);
+ nd_log(NDLS_DAEMON, NDLP_WARNING, "pthread_attr_setstacksize() to %zu bytes, failed with code %d.", stacksize, i);
else
- netdata_log_info("Set threads stack size to %zu bytes", stacksize);
+ nd_log(NDLS_DAEMON, NDLP_DEBUG, "Set threads stack size to %zu bytes", stacksize);
}
else
- netdata_log_error("Invalid pthread stacksize %zu", stacksize);
+ nd_log(NDLS_DAEMON, NDLP_WARNING, "Invalid pthread stacksize %zu", stacksize);
}
// ----------------------------------------------------------------------------
@@ -183,12 +180,11 @@ void rrd_collector_finished(void);
static void thread_cleanup(void *ptr) {
if(netdata_thread != ptr) {
NETDATA_THREAD *info = (NETDATA_THREAD *)ptr;
- netdata_log_error("THREADS: internal error - thread local variable does not match the one passed to this function. Expected thread '%s', passed thread '%s'", netdata_thread->tag, info->tag);
+ nd_log(NDLS_DAEMON, NDLP_ERR, "THREADS: internal error - thread local variable does not match the one passed to this function. Expected thread '%s', passed thread '%s'", netdata_thread->tag, info->tag);
}
- spinlock_lock(&netdata_thread->detach_lock);
if(!(netdata_thread->options & NETDATA_THREAD_OPTION_DONT_LOG_CLEANUP))
- netdata_log_info("thread with task id %d finished", gettid());
+ nd_log(NDLS_DAEMON, NDLP_DEBUG, "thread with task id %d finished", gettid());
rrd_collector_finished();
sender_thread_buffer_free();
@@ -200,32 +196,34 @@ static void thread_cleanup(void *ptr) {
netdata_thread->tag[0] = '\0';
- spinlock_unlock(&netdata_thread->detach_lock);
freez(netdata_thread);
netdata_thread = NULL;
}
-static void thread_set_name_np(NETDATA_THREAD *nt) {
+void netdata_thread_set_tag(const char *tag) {
+ if(!tag || !*tag)
+ return;
- if (nt && nt->tag[0]) {
- int ret = 0;
+ int ret = 0;
- char threadname[NETDATA_THREAD_NAME_MAX+1];
- strncpyz(threadname, nt->tag, NETDATA_THREAD_NAME_MAX);
+ char threadname[NETDATA_THREAD_NAME_MAX+1];
+ strncpyz(threadname, tag, NETDATA_THREAD_NAME_MAX);
#if defined(__FreeBSD__)
- pthread_set_name_np(pthread_self(), threadname);
+ pthread_set_name_np(pthread_self(), threadname);
#elif defined(__APPLE__)
- ret = pthread_setname_np(threadname);
+ ret = pthread_setname_np(threadname);
#else
- ret = pthread_setname_np(pthread_self(), threadname);
+ ret = pthread_setname_np(pthread_self(), threadname);
#endif
- if (ret != 0)
- netdata_log_error("cannot set pthread name of %d to %s. ErrCode: %d", gettid(), threadname, ret);
- else
- netdata_log_info("set name of thread %d to %s", gettid(), threadname);
+ if (ret != 0)
+ nd_log(NDLS_DAEMON, NDLP_WARNING, "cannot set pthread name of %d to %s. ErrCode: %d", gettid(), threadname, ret);
+ else
+ nd_log(NDLS_DAEMON, NDLP_DEBUG, "set name of thread %d to %s", gettid(), threadname);
+ if(netdata_thread) {
+ strncpyz(netdata_thread->tag, threadname, sizeof(netdata_thread->tag) - 1);
}
}
@@ -247,7 +245,7 @@ void uv_thread_set_name_np(uv_thread_t ut, const char* name) {
thread_name_get(true);
if (ret)
- netdata_log_info("cannot set libuv thread name to %s. Err: %d", threadname, ret);
+ nd_log(NDLS_DAEMON, NDLP_NOTICE, "cannot set libuv thread name to %s. Err: %d", threadname, ret);
}
void os_thread_get_current_name_np(char threadname[NETDATA_THREAD_NAME_MAX + 1])
@@ -264,15 +262,23 @@ static void *netdata_thread_init(void *ptr) {
netdata_thread = (NETDATA_THREAD *)ptr;
if(!(netdata_thread->options & NETDATA_THREAD_OPTION_DONT_LOG_STARTUP))
- netdata_log_info("thread created with task id %d", gettid());
+ nd_log(NDLS_DAEMON, NDLP_DEBUG, "thread created with task id %d", gettid());
if(pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL) != 0)
- netdata_log_error("cannot set pthread cancel type to DEFERRED.");
+ nd_log(NDLS_DAEMON, NDLP_WARNING, "cannot set pthread cancel type to DEFERRED.");
if(pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL) != 0)
- netdata_log_error("cannot set pthread cancel state to ENABLE.");
+ nd_log(NDLS_DAEMON, NDLP_WARNING, "cannot set pthread cancel state to ENABLE.");
- thread_set_name_np(ptr);
+ netdata_thread_set_tag(netdata_thread->tag);
+
+ if (!(netdata_thread->options & NETDATA_THREAD_OPTION_JOINABLE)) {
+ int rc = pthread_detach(pthread_self());
+ if (rc != 0)
+ nd_log(NDLS_DAEMON, NDLP_WARNING,
+ "cannot request detach of newly created %s thread. pthread_detach() failed with code %d",
+ netdata_thread->tag, rc);
+ }
void *ret = NULL;
pthread_cleanup_push(thread_cleanup, ptr);
@@ -289,22 +295,10 @@ int netdata_thread_create(netdata_thread_t *thread, const char *tag, NETDATA_THR
info->options = options;
strncpyz(info->tag, tag, NETDATA_THREAD_NAME_MAX);
- spinlock_init(&info->detach_lock);
- spinlock_lock(&info->detach_lock);
-
int ret = pthread_create(thread, netdata_threads_attr, netdata_thread_init, info);
if(ret != 0)
- netdata_log_error("failed to create new thread for %s. pthread_create() failed with code %d", tag, ret);
-
- else {
- if (!(options & NETDATA_THREAD_OPTION_JOINABLE)) {
- int ret2 = pthread_detach(*thread);
- if (ret2 != 0)
- netdata_log_error("cannot request detach of newly created %s thread. pthread_detach() failed with code %d", tag, ret2);
- }
- }
+ nd_log(NDLS_DAEMON, NDLP_ERR, "failed to create new thread for %s. pthread_create() failed with code %d", tag, ret);
- spinlock_unlock(&info->detach_lock);
return ret;
}
@@ -318,9 +312,9 @@ int netdata_thread_cancel(netdata_thread_t thread) {
int ret = pthread_cancel(thread);
if(ret != 0)
#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_error("cannot cancel thread. pthread_cancel() failed with code %d at %d@%s, function %s()", ret, line, file, function);
+ nd_log(NDLS_DAEMON, NDLP_WARNING, "cannot cancel thread. pthread_cancel() failed with code %d at %d@%s, function %s()", ret, line, file, function);
#else
- netdata_log_error("cannot cancel thread. pthread_cancel() failed with code %d.", ret);
+ nd_log(NDLS_DAEMON, NDLP_WARNING, "cannot cancel thread. pthread_cancel() failed with code %d.", ret);
#endif
return ret;
@@ -332,7 +326,7 @@ int netdata_thread_cancel(netdata_thread_t thread) {
int netdata_thread_join(netdata_thread_t thread, void **retval) {
int ret = pthread_join(thread, retval);
if(ret != 0)
- netdata_log_error("cannot join thread. pthread_join() failed with code %d.", ret);
+ nd_log(NDLS_DAEMON, NDLP_WARNING, "cannot join thread. pthread_join() failed with code %d.", ret);
return ret;
}
@@ -340,7 +334,7 @@ int netdata_thread_join(netdata_thread_t thread, void **retval) {
int netdata_thread_detach(pthread_t thread) {
int ret = pthread_detach(thread);
if(ret != 0)
- netdata_log_error("cannot detach thread. pthread_detach() failed with code %d.", ret);
+ nd_log(NDLS_DAEMON, NDLP_WARNING, "cannot detach thread. pthread_detach() failed with code %d.", ret);
return ret;
}
diff --git a/libnetdata/threads/threads.h b/libnetdata/threads/threads.h
index acb4e6ba..97c3c803 100644
--- a/libnetdata/threads/threads.h
+++ b/libnetdata/threads/threads.h
@@ -18,6 +18,8 @@ typedef enum {
#define netdata_thread_cleanup_push(func, arg) pthread_cleanup_push(func, arg)
#define netdata_thread_cleanup_pop(execute) pthread_cleanup_pop(execute)
+void netdata_thread_set_tag(const char *tag);
+
typedef pthread_t netdata_thread_t;
struct netdata_static_thread {
diff --git a/libnetdata/uuid/Makefile.am b/libnetdata/uuid/Makefile.am
new file mode 100644
index 00000000..161784b8
--- /dev/null
+++ b/libnetdata/uuid/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/uuid/README.md b/libnetdata/uuid/README.md
new file mode 100644
index 00000000..fbadfd63
--- /dev/null
+++ b/libnetdata/uuid/README.md
@@ -0,0 +1,13 @@
+<!--
+title: "UUID"
+custom_edit_url: https://github.com/netdata/netdata/edit/master/libnetdata/uuid/README.md
+sidebar_label: "UUID"
+learn_topic_type: "Tasks"
+learn_rel_path: "Developers/libnetdata"
+-->
+
+# UUID
+
+Netdata uses libuuid for managing UUIDs.
+
+In this folder are a few custom helpers. \ No newline at end of file
diff --git a/libnetdata/uuid/uuid.c b/libnetdata/uuid/uuid.c
new file mode 100644
index 00000000..55b66db9
--- /dev/null
+++ b/libnetdata/uuid/uuid.c
@@ -0,0 +1,179 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "../libnetdata.h"
+
+void uuid_unparse_lower_compact(const uuid_t uuid, char *out) {
+ static const char *hex_chars = "0123456789abcdef";
+ for (int i = 0; i < 16; i++) {
+ out[i * 2] = hex_chars[(uuid[i] >> 4) & 0x0F];
+ out[i * 2 + 1] = hex_chars[uuid[i] & 0x0F];
+ }
+ out[32] = '\0'; // Null-terminate the string
+}
+
+inline int uuid_parse_compact(const char *in, uuid_t uuid) {
+ if (strlen(in) != 32)
+ return -1; // Invalid input length
+
+ for (int i = 0; i < 16; i++) {
+ int high = hex_char_to_int(in[i * 2]);
+ int low = hex_char_to_int(in[i * 2 + 1]);
+
+ if (high < 0 || low < 0)
+ return -1; // Invalid hexadecimal character
+
+ uuid[i] = (high << 4) | low;
+ }
+
+ return 0; // Success
+}
+
+int uuid_parse_flexi(const char *in, uuid_t uu) {
+ if(!in || !*in)
+ return -1;
+
+ size_t hexCharCount = 0;
+ size_t hyphenCount = 0;
+ const char *s = in;
+ int byteIndex = 0;
+ uuid_t uuid; // work on a temporary place, to not corrupt the previous value of uu if we fail
+
+ while (*s && byteIndex < 16) {
+ if (*s == '-') {
+ s++;
+ hyphenCount++;
+
+ if (unlikely(hyphenCount > 4))
+ // Too many hyphens
+ return -2;
+ }
+
+ if (likely(isxdigit(*s))) {
+ int high = hex_char_to_int(*s++);
+ hexCharCount++;
+
+ if (likely(isxdigit(*s))) {
+ int low = hex_char_to_int(*s++);
+ hexCharCount++;
+
+ uuid[byteIndex++] = (high << 4) | low;
+ }
+ else
+ // Not a valid UUID (expected a pair of hex digits)
+ return -3;
+ }
+ else
+ // Not a valid UUID
+ return -4;
+ }
+
+ if (unlikely(byteIndex < 16))
+ // Not enough data to form a UUID
+ return -5;
+
+ if (unlikely(hexCharCount != 32))
+ // wrong number of hex digits
+ return -6;
+
+ if(unlikely(hyphenCount != 0 && hyphenCount != 4))
+ // wrong number of hyphens
+ return -7;
+
+ // copy the final value
+ memcpy(uu, uuid, sizeof(uuid_t));
+
+ return 0;
+}
+
+
+// ----------------------------------------------------------------------------
+// unit test
+
+static inline void remove_hyphens(const char *uuid_with_hyphens, char *uuid_without_hyphens) {
+ while (*uuid_with_hyphens) {
+ if (*uuid_with_hyphens != '-') {
+ *uuid_without_hyphens++ = *uuid_with_hyphens;
+ }
+ uuid_with_hyphens++;
+ }
+ *uuid_without_hyphens = '\0';
+}
+
+int uuid_unittest(void) {
+ const int num_tests = 100000;
+ int failed_tests = 0;
+
+ int i;
+ for (i = 0; i < num_tests; i++) {
+ uuid_t original_uuid, parsed_uuid;
+ char uuid_str_with_hyphens[UUID_STR_LEN], uuid_str_without_hyphens[UUID_COMPACT_STR_LEN];
+
+ // Generate a random UUID
+ switch(i % 2) {
+ case 0:
+ uuid_generate(original_uuid);
+ break;
+
+ case 1:
+ uuid_generate_random(original_uuid);
+ break;
+ }
+
+ // Unparse it with hyphens
+ bool lower = false;
+ switch(i % 3) {
+ case 0:
+ uuid_unparse_lower(original_uuid, uuid_str_with_hyphens);
+ lower = true;
+ break;
+
+ case 1:
+ uuid_unparse(original_uuid, uuid_str_with_hyphens);
+ break;
+
+ case 2:
+ uuid_unparse_upper(original_uuid, uuid_str_with_hyphens);
+ break;
+ }
+
+ // Remove the hyphens
+ remove_hyphens(uuid_str_with_hyphens, uuid_str_without_hyphens);
+
+ if(lower) {
+ char test[UUID_COMPACT_STR_LEN];
+ uuid_unparse_lower_compact(original_uuid, test);
+ if(strcmp(test, uuid_str_without_hyphens) != 0) {
+ printf("uuid_unparse_lower_compact() failed, expected '%s', got '%s'\n",
+ uuid_str_without_hyphens, test);
+ failed_tests++;
+ }
+ }
+
+ // Parse the UUID string with hyphens
+ int parse_result = uuid_parse_flexi(uuid_str_with_hyphens, parsed_uuid);
+ if (parse_result != 0) {
+ printf("uuid_parse_flexi() returned -1 (parsing error) for UUID with hyphens: %s\n", uuid_str_with_hyphens);
+ failed_tests++;
+ } else if (uuid_compare(original_uuid, parsed_uuid) != 0) {
+ printf("uuid_parse_flexi() parsed value mismatch for UUID with hyphens: %s\n", uuid_str_with_hyphens);
+ failed_tests++;
+ }
+
+ // Parse the UUID string without hyphens
+ parse_result = uuid_parse_flexi(uuid_str_without_hyphens, parsed_uuid);
+ if (parse_result != 0) {
+ printf("uuid_parse_flexi() returned -1 (parsing error) for UUID without hyphens: %s\n", uuid_str_without_hyphens);
+ failed_tests++;
+ }
+ else if(uuid_compare(original_uuid, parsed_uuid) != 0) {
+ printf("uuid_parse_flexi() parsed value mismatch for UUID without hyphens: %s\n", uuid_str_without_hyphens);
+ failed_tests++;
+ }
+
+ if(failed_tests)
+ break;
+ }
+
+ printf("UUID: failed %d out of %d tests.\n", failed_tests, i);
+ return failed_tests;
+}
diff --git a/libnetdata/uuid/uuid.h b/libnetdata/uuid/uuid.h
new file mode 100644
index 00000000..56764684
--- /dev/null
+++ b/libnetdata/uuid/uuid.h
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_UUID_H
+#define NETDATA_UUID_H
+
+UUID_DEFINE(streaming_from_child_msgid, 0xed,0x4c,0xdb, 0x8f, 0x1b, 0xeb, 0x4a, 0xd3, 0xb5, 0x7c, 0xb3, 0xca, 0xe2, 0xd1, 0x62, 0xfa);
+UUID_DEFINE(streaming_to_parent_msgid, 0x6e, 0x2e, 0x38, 0x39, 0x06, 0x76, 0x48, 0x96, 0x8b, 0x64, 0x60, 0x45, 0xdb, 0xf2, 0x8d, 0x66);
+UUID_DEFINE(health_alert_transition_msgid, 0x9c, 0xe0, 0xcb, 0x58, 0xab, 0x8b, 0x44, 0xdf, 0x82, 0xc4, 0xbf, 0x1a, 0xd9, 0xee, 0x22, 0xde);
+
+// this is also defined in alarm-notify.sh.in
+UUID_DEFINE(health_alert_notification_msgid, 0x6d, 0xb0, 0x01, 0x8e, 0x83, 0xe3, 0x43, 0x20, 0xae, 0x2a, 0x65, 0x9d, 0x78, 0x01, 0x9f, 0xb7);
+
+#define UUID_COMPACT_STR_LEN 33
+void uuid_unparse_lower_compact(const uuid_t uuid, char *out);
+int uuid_parse_compact(const char *in, uuid_t uuid);
+int uuid_parse_flexi(const char *in, uuid_t uuid);
+
+static inline int uuid_memcmp(const uuid_t *uu1, const uuid_t *uu2) {
+ return memcmp(uu1, uu2, sizeof(uuid_t));
+}
+
+static inline int hex_char_to_int(char c) {
+ if (c >= '0' && c <= '9') return c - '0';
+ if (c >= 'a' && c <= 'f') return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F') return c - 'A' + 10;
+ return -1; // Invalid hexadecimal character
+}
+
+#endif //NETDATA_UUID_H
diff --git a/libnetdata/worker_utilization/worker_utilization.c b/libnetdata/worker_utilization/worker_utilization.c
index ad45dbc7..f39cea8a 100644
--- a/libnetdata/worker_utilization/worker_utilization.c
+++ b/libnetdata/worker_utilization/worker_utilization.c
@@ -50,13 +50,16 @@ struct workers_workname { // this is what we add to Ju
};
static struct workers_globals {
+ bool enabled;
+
SPINLOCK spinlock;
Pvoid_t worknames_JudyHS;
size_t memory;
} workers_globals = { // workers globals, the base of all worknames
- .spinlock = NETDATA_SPINLOCK_INITIALIZER, // a lock for the worknames index
- .worknames_JudyHS = NULL, // the worknames index
+ .enabled = false,
+ .spinlock = NETDATA_SPINLOCK_INITIALIZER, // a lock for the worknames index
+ .worknames_JudyHS = NULL, // the worknames index
};
static __thread struct worker *worker = NULL; // the current thread worker
@@ -69,7 +72,14 @@ static inline usec_t worker_now_monotonic_usec(void) {
#endif
}
+void workers_utilization_enable(void) {
+ workers_globals.enabled = true;
+}
+
size_t workers_allocated_memory(void) {
+ if(!workers_globals.enabled)
+ return 0;
+
spinlock_lock(&workers_globals.spinlock);
size_t memory = workers_globals.memory;
spinlock_unlock(&workers_globals.spinlock);
@@ -78,7 +88,8 @@ size_t workers_allocated_memory(void) {
}
void worker_register(const char *name) {
- if(unlikely(worker)) return;
+ if(unlikely(worker || !workers_globals.enabled))
+ return;
worker = callocz(1, sizeof(struct worker));
worker->pid = gettid();
@@ -213,6 +224,7 @@ void worker_is_busy(size_t job_id) {
void worker_set_metric(size_t job_id, NETDATA_DOUBLE value) {
if(unlikely(!worker)) return;
+
if(unlikely(job_id >= WORKER_UTILIZATION_MAX_JOB_TYPES))
return;
@@ -247,6 +259,9 @@ void workers_foreach(const char *name, void (*callback)(
, NETDATA_DOUBLE *job_custom_values
)
, void *data) {
+ if(!workers_globals.enabled)
+ return;
+
spinlock_lock(&workers_globals.spinlock);
usec_t busy_time, delta;
size_t i, jobs_started, jobs_running;
diff --git a/libnetdata/worker_utilization/worker_utilization.h b/libnetdata/worker_utilization/worker_utilization.h
index cc3f8268..e2f46c5a 100644
--- a/libnetdata/worker_utilization/worker_utilization.h
+++ b/libnetdata/worker_utilization/worker_utilization.h
@@ -15,6 +15,7 @@ typedef enum __attribute__((packed)) {
WORKER_METRIC_INCREMENTAL_TOTAL = 4,
} WORKER_METRIC_TYPE;
+void workers_utilization_enable(void);
size_t workers_allocated_memory(void);
void worker_register(const char *name);
void worker_register_job_name(size_t job_id, const char *name);