summaryrefslogtreecommitdiffstats
path: root/libnetdata
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-08-10 09:18:52 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-08-10 09:19:08 +0000
commita2d7dede737947d7c6afa20a88e1f0c64e0eb96c (patch)
treefed4aff7dbe0be00cf91de6261d98bc0eb9a2449 /libnetdata
parentReleasing debian version 1.41.0-1. (diff)
downloadnetdata-a2d7dede737947d7c6afa20a88e1f0c64e0eb96c.tar.xz
netdata-a2d7dede737947d7c6afa20a88e1f0c64e0eb96c.zip
Merging upstream version 1.42.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'libnetdata')
-rw-r--r--libnetdata/Makefile.am1
-rw-r--r--libnetdata/buffer/buffer.c13
-rw-r--r--libnetdata/buffer/buffer.h94
-rw-r--r--libnetdata/dyn_conf/dyn_conf.c909
-rw-r--r--libnetdata/dyn_conf/dyn_conf.h136
-rw-r--r--libnetdata/ebpf/ebpf.h28
-rw-r--r--libnetdata/facets/Makefile.am8
-rw-r--r--libnetdata/facets/README.md0
-rw-r--r--libnetdata/facets/facets.c851
-rw-r--r--libnetdata/facets/facets.h62
-rw-r--r--libnetdata/http/http_defs.h8
-rw-r--r--libnetdata/inlined.h23
-rw-r--r--libnetdata/libnetdata.c134
-rw-r--r--libnetdata/libnetdata.h12
-rw-r--r--libnetdata/socket/socket.h30
-rw-r--r--libnetdata/url/url.c2
-rw-r--r--libnetdata/worker_utilization/worker_utilization.h2
17 files changed, 2257 insertions, 56 deletions
diff --git a/libnetdata/Makefile.am b/libnetdata/Makefile.am
index b81d620ba..e85f4abe1 100644
--- a/libnetdata/Makefile.am
+++ b/libnetdata/Makefile.am
@@ -14,6 +14,7 @@ SUBDIRS = \
dictionary \
ebpf \
eval \
+ facets \
json \
july \
health \
diff --git a/libnetdata/buffer/buffer.c b/libnetdata/buffer/buffer.c
index b43762863..2d09bb1ff 100644
--- a/libnetdata/buffer/buffer.c
+++ b/libnetdata/buffer/buffer.c
@@ -15,6 +15,7 @@ void buffer_reset(BUFFER *wb) {
wb->options = 0;
wb->date = 0;
wb->expires = 0;
+ buffer_no_cacheable(wb);
buffer_overflow_check(wb);
}
@@ -254,6 +255,7 @@ BUFFER *buffer_create(size_t size, size_t *statistics)
b->size = size;
b->content_type = CT_TEXT_PLAIN;
b->statistics = statistics;
+ buffer_no_cacheable(b);
buffer_overflow_init(b);
buffer_overflow_check(b);
@@ -305,11 +307,11 @@ void buffer_increase(BUFFER *b, size_t free_size_required) {
// ----------------------------------------------------------------------------
void buffer_json_initialize(BUFFER *wb, const char *key_quote, const char *value_quote, int depth,
- bool add_anonymous_object, bool minify) {
+ bool add_anonymous_object, BUFFER_JSON_OPTIONS options) {
strncpyz(wb->json.key_quote, key_quote, BUFFER_QUOTE_MAX_SIZE);
strncpyz(wb->json.value_quote, value_quote, BUFFER_QUOTE_MAX_SIZE);
- wb->json.minify = minify;
+ wb->json.options = options;
wb->json.depth = (int8_t)(depth - 1);
_buffer_json_depth_push(wb, BUFFER_JSON_OBJECT);
@@ -317,6 +319,7 @@ void buffer_json_initialize(BUFFER *wb, const char *key_quote, const char *value
buffer_fast_strcat(wb, "{", 1);
wb->content_type = CT_APPLICATION_JSON;
+ buffer_no_cacheable(wb);
}
void buffer_json_finalize(BUFFER *wb) {
@@ -336,7 +339,7 @@ void buffer_json_finalize(BUFFER *wb) {
}
}
- if(!wb->json.minify)
+ if(!(wb->json.options & BUFFER_JSON_OPTIONS_MINIFY))
buffer_fast_strcat(wb, "\n", 1);
}
@@ -487,13 +490,13 @@ int buffer_unittest(void) {
buffer_flush(wb);
- buffer_json_initialize(wb, "\"", "\"", 0, true, false);
+ buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
buffer_json_finalize(wb);
errors += buffer_expect(wb, "{\n}\n");
buffer_flush(wb);
- buffer_json_initialize(wb, "\"", "\"", 0, true, false);
+ buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_DEFAULT);
buffer_json_member_add_string(wb, "hello", "world");
buffer_json_member_add_string(wb, "alpha", "this: \" is a double quote");
buffer_json_member_add_object(wb, "object1");
diff --git a/libnetdata/buffer/buffer.h b/libnetdata/buffer/buffer.h
index d0078a521..0f1540287 100644
--- a/libnetdata/buffer/buffer.h
+++ b/libnetdata/buffer/buffer.h
@@ -68,6 +68,12 @@ typedef enum __attribute__ ((__packed__)) {
CT_APPLICATION_ZIP,
} HTTP_CONTENT_TYPE;
+typedef enum __attribute__ ((__packed__)) {
+ BUFFER_JSON_OPTIONS_DEFAULT = 0,
+ BUFFER_JSON_OPTIONS_MINIFY = (1 << 0),
+ BUFFER_JSON_OPTIONS_NEWLINE_ON_ARRAY_ITEMS = (1 << 1),
+} BUFFER_JSON_OPTIONS;
+
typedef struct web_buffer {
size_t size; // allocation size of buffer, in bytes
size_t len; // current data length in buffer, in bytes
@@ -82,7 +88,7 @@ typedef struct web_buffer {
char key_quote[BUFFER_QUOTE_MAX_SIZE + 1];
char value_quote[BUFFER_QUOTE_MAX_SIZE + 1];
int8_t depth;
- bool minify;
+ BUFFER_JSON_OPTIONS options;
BUFFER_JSON_NODE stack[BUFFER_JSON_MAX_DEPTH];
} json;
} BUFFER;
@@ -148,7 +154,7 @@ static inline void buffer_need_bytes(BUFFER *buffer, size_t needed_free_size) {
}
void buffer_json_initialize(BUFFER *wb, const char *key_quote, const char *value_quote, int depth,
- bool add_anonymous_object, bool minify);
+ bool add_anonymous_object, BUFFER_JSON_OPTIONS options);
void buffer_json_finalize(BUFFER *wb);
@@ -664,11 +670,16 @@ static inline void buffer_print_spaces(BUFFER *wb, size_t spaces) {
buffer_overflow_check(wb);
}
-static inline void buffer_print_json_comma_newline_spacing(BUFFER *wb) {
+static inline void buffer_print_json_comma(BUFFER *wb) {
if(wb->json.stack[wb->json.depth].count)
buffer_fast_strcat(wb, ",", 1);
+}
+
+static inline void buffer_print_json_comma_newline_spacing(BUFFER *wb) {
+ buffer_print_json_comma(wb);
- if(wb->json.minify)
+ if((wb->json.options & BUFFER_JSON_OPTIONS_MINIFY) ||
+ (wb->json.stack[wb->json.depth].type == BUFFER_JSON_ARRAY && !(wb->json.options & BUFFER_JSON_OPTIONS_NEWLINE_ON_ARRAY_ITEMS)))
return;
buffer_fast_strcat(wb, "\n", 1);
@@ -715,7 +726,7 @@ static inline void buffer_json_object_close(BUFFER *wb) {
assert(wb->json.depth >= 0 && "BUFFER JSON: nothing is open to close it");
assert(wb->json.stack[wb->json.depth].type == BUFFER_JSON_OBJECT && "BUFFER JSON: an object is not open to close it");
#endif
- if(!wb->json.minify) {
+ if(!(wb->json.options & BUFFER_JSON_OPTIONS_MINIFY)) {
buffer_fast_strcat(wb, "\n", 1);
buffer_print_spaces(wb, wb->json.depth);
}
@@ -792,7 +803,14 @@ static inline void buffer_json_member_add_array(BUFFER *wb, const char *key) {
}
static inline void buffer_json_add_array_item_array(BUFFER *wb) {
- buffer_print_json_comma_newline_spacing(wb);
+ if(!(wb->json.options & BUFFER_JSON_OPTIONS_MINIFY) && wb->json.stack[wb->json.depth].type == BUFFER_JSON_ARRAY) {
+ // an array inside another array
+ buffer_print_json_comma(wb);
+ buffer_fast_strcat(wb, "\n", 1);
+ buffer_print_spaces(wb, wb->json.depth + 1);
+ }
+ else
+ buffer_print_json_comma_newline_spacing(wb);
buffer_fast_strcat(wb, "[", 1);
wb->json.stack[wb->json.depth].count++;
@@ -801,48 +819,42 @@ static inline void buffer_json_add_array_item_array(BUFFER *wb) {
}
static inline void buffer_json_add_array_item_string(BUFFER *wb, const char *value) {
- if(wb->json.stack[wb->json.depth].count)
- buffer_fast_strcat(wb, ",", 1);
+ buffer_print_json_comma_newline_spacing(wb);
buffer_json_add_string_value(wb, value);
wb->json.stack[wb->json.depth].count++;
}
static inline void buffer_json_add_array_item_double(BUFFER *wb, NETDATA_DOUBLE value) {
- if(wb->json.stack[wb->json.depth].count)
- buffer_fast_strcat(wb, ",", 1);
+ buffer_print_json_comma_newline_spacing(wb);
buffer_print_netdata_double(wb, value);
wb->json.stack[wb->json.depth].count++;
}
static inline void buffer_json_add_array_item_int64(BUFFER *wb, int64_t value) {
- if(wb->json.stack[wb->json.depth].count)
- buffer_fast_strcat(wb, ",", 1);
+ buffer_print_json_comma_newline_spacing(wb);
buffer_print_int64(wb, value);
wb->json.stack[wb->json.depth].count++;
}
static inline void buffer_json_add_array_item_uint64(BUFFER *wb, uint64_t value) {
- if(wb->json.stack[wb->json.depth].count)
- buffer_fast_strcat(wb, ",", 1);
+ buffer_print_json_comma_newline_spacing(wb);
buffer_print_uint64(wb, value);
wb->json.stack[wb->json.depth].count++;
}
static inline void buffer_json_add_array_item_time_t(BUFFER *wb, time_t value) {
- if(wb->json.stack[wb->json.depth].count)
- buffer_fast_strcat(wb, ",", 1);
+ buffer_print_json_comma_newline_spacing(wb);
buffer_print_int64(wb, value);
wb->json.stack[wb->json.depth].count++;
}
static inline void buffer_json_add_array_item_time_ms(BUFFER *wb, time_t value) {
- if(wb->json.stack[wb->json.depth].count)
- buffer_fast_strcat(wb, ",", 1);
+ buffer_print_json_comma_newline_spacing(wb);
buffer_print_int64(wb, value);
buffer_fast_strcat(wb, "000", 3);
@@ -850,8 +862,7 @@ static inline void buffer_json_add_array_item_time_ms(BUFFER *wb, time_t value)
}
static inline void buffer_json_add_array_item_time_t2ms(BUFFER *wb, time_t value) {
- if(wb->json.stack[wb->json.depth].count)
- buffer_fast_strcat(wb, ",", 1);
+ buffer_print_json_comma_newline_spacing(wb);
buffer_print_int64(wb, value);
buffer_fast_strcat(wb, "000", 3);
@@ -859,8 +870,7 @@ static inline void buffer_json_add_array_item_time_t2ms(BUFFER *wb, time_t value
}
static inline void buffer_json_add_array_item_object(BUFFER *wb) {
- if(wb->json.stack[wb->json.depth].count)
- buffer_fast_strcat(wb, ",", 1);
+ buffer_print_json_comma_newline_spacing(wb);
buffer_fast_strcat(wb, "{", 1);
wb->json.stack[wb->json.depth].count++;
@@ -919,6 +929,11 @@ static inline void buffer_json_array_close(BUFFER *wb) {
assert(wb->json.depth >= 0 && "BUFFER JSON: nothing is open to close it");
assert(wb->json.stack[wb->json.depth].type == BUFFER_JSON_ARRAY && "BUFFER JSON: an array is not open to close it");
#endif
+ if(wb->json.options & BUFFER_JSON_OPTIONS_NEWLINE_ON_ARRAY_ITEMS) {
+ buffer_fast_strcat(wb, "\n", 1);
+ buffer_print_spaces(wb, wb->json.depth);
+ }
+
buffer_fast_strcat(wb, "]", 1);
_buffer_json_depth_pop(wb);
}
@@ -928,6 +943,8 @@ typedef enum __attribute__((packed)) {
RRDF_FIELD_OPTS_UNIQUE_KEY = (1 << 0), // the field is the unique key of the row
RRDF_FIELD_OPTS_VISIBLE = (1 << 1), // the field should be visible by default
RRDF_FIELD_OPTS_STICKY = (1 << 2), // the field should be sticky
+ RRDF_FIELD_OPTS_FULL_WIDTH = (1 << 3), // the field should get full width
+ RRDF_FIELD_OPTS_WRAP = (1 << 4), // the field should get full width
} RRDF_FIELD_OPTIONS;
typedef enum __attribute__((packed)) {
@@ -969,7 +986,8 @@ static inline const char *rrdf_field_type_to_string(RRDF_FIELD_TYPE type) {
typedef enum __attribute__((packed)) {
RRDF_FIELD_VISUAL_VALUE, // show the value, possibly applying a transformation
RRDF_FIELD_VISUAL_BAR, // show the value and a bar, respecting the max field to fill the bar at 100%
- RRDF_FIELD_VISUAL_PILL, // array of values (transformation is respected)
+ RRDF_FIELD_VISUAL_PILL, //
+ RRDF_FIELD_VISUAL_MARKDOC, //
} RRDF_FIELD_VISUAL;
static inline const char *rrdf_field_visual_to_string(RRDF_FIELD_VISUAL visual) {
@@ -983,14 +1001,18 @@ static inline const char *rrdf_field_visual_to_string(RRDF_FIELD_VISUAL visual)
case RRDF_FIELD_VISUAL_PILL:
return "pill";
+
+ case RRDF_FIELD_VISUAL_MARKDOC:
+ return "markdoc";
}
}
typedef enum __attribute__((packed)) {
RRDF_FIELD_TRANSFORM_NONE, // show the value as-is
- RRDF_FIELD_TRANSFORM_NUMBER, // show the value repsecting the decimal_points
- RRDF_FIELD_TRANSFORM_DURATION, // transform as duration in second to a human readable duration
- RRDF_FIELD_TRANSFORM_DATETIME, // UNIX epoch timestamp in ms
+ RRDF_FIELD_TRANSFORM_NUMBER, // show the value respecting the decimal_points
+ RRDF_FIELD_TRANSFORM_DURATION_S, // transform as duration in second to a human-readable duration
+ RRDF_FIELD_TRANSFORM_DATETIME_MS, // UNIX epoch timestamp in ms
+ RRDF_FIELD_TRANSFORM_DATETIME_USEC, // UNIX epoch timestamp in usec
} RRDF_FIELD_TRANSFORM;
static inline const char *rrdf_field_transform_to_string(RRDF_FIELD_TRANSFORM transform) {
@@ -1002,11 +1024,14 @@ static inline const char *rrdf_field_transform_to_string(RRDF_FIELD_TRANSFORM tr
case RRDF_FIELD_TRANSFORM_NUMBER:
return "number";
- case RRDF_FIELD_TRANSFORM_DURATION:
+ case RRDF_FIELD_TRANSFORM_DURATION_S:
return "duration";
- case RRDF_FIELD_TRANSFORM_DATETIME:
+ case RRDF_FIELD_TRANSFORM_DATETIME_MS:
return "datetime";
+
+ case RRDF_FIELD_TRANSFORM_DATETIME_USEC:
+ return "datetime_usec";
}
}
@@ -1064,18 +1089,26 @@ static inline const char *rrdf_field_summary_to_string(RRDF_FIELD_SUMMARY summar
}
typedef enum __attribute__((packed)) {
+ RRDF_FIELD_FILTER_NONE,
RRDF_FIELD_FILTER_RANGE,
RRDF_FIELD_FILTER_MULTISELECT,
+ RRDF_FIELD_FILTER_FACET,
} RRDF_FIELD_FILTER;
static inline const char *rrdf_field_filter_to_string(RRDF_FIELD_FILTER filter) {
switch(filter) {
- default:
case RRDF_FIELD_FILTER_RANGE:
return "range";
case RRDF_FIELD_FILTER_MULTISELECT:
return "multiselect";
+
+ case RRDF_FIELD_FILTER_FACET:
+ return "facet";
+
+ default:
+ case RRDF_FIELD_FILTER_NONE:
+ return "none";
}
}
@@ -1114,6 +1147,9 @@ buffer_rrdf_table_add_field(BUFFER *wb, size_t field_id, const char *key, const
buffer_json_member_add_boolean(wb, "sticky", options & RRDF_FIELD_OPTS_STICKY);
buffer_json_member_add_string(wb, "summary", rrdf_field_summary_to_string(summary));
buffer_json_member_add_string(wb, "filter", rrdf_field_filter_to_string(filter));
+
+ buffer_json_member_add_boolean(wb, "full_width", options & RRDF_FIELD_OPTS_FULL_WIDTH);
+ buffer_json_member_add_boolean(wb, "wrap", options & RRDF_FIELD_OPTS_WRAP);
}
buffer_json_object_close(wb);
}
diff --git a/libnetdata/dyn_conf/dyn_conf.c b/libnetdata/dyn_conf/dyn_conf.c
new file mode 100644
index 000000000..3e098fb7f
--- /dev/null
+++ b/libnetdata/dyn_conf/dyn_conf.c
@@ -0,0 +1,909 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "dyn_conf.h"
+
+#define DYN_CONF_PATH_MAX (4096)
+#define DYN_CONF_DIR VARLIB_DIR "/etc"
+
+#define DYN_CONF_JOB_SCHEMA "job_schema"
+#define DYN_CONF_SCHEMA "schema"
+#define DYN_CONF_MODULE_LIST "modules"
+#define DYN_CONF_JOB_LIST "jobs"
+#define DYN_CONF_CFG_EXT ".cfg"
+
+DICTIONARY *plugins_dict = NULL;
+
+struct deferred_cfg_send {
+ char *plugin_name;
+ char *module_name;
+ char *job_name;
+ struct deferred_cfg_send *next;
+};
+
+struct deferred_cfg_send *deferred_configs = NULL;
+pthread_mutex_t deferred_configs_lock = PTHREAD_MUTEX_INITIALIZER;
+pthread_cond_t deferred_configs_cond = PTHREAD_COND_INITIALIZER;
+
+static void deferred_config_push_back(const char *plugin_name, const char *module_name, const char *job_name)
+{
+ struct deferred_cfg_send *deferred = callocz(1, sizeof(struct deferred_cfg_send));
+ deferred->plugin_name = strdupz(plugin_name);
+ if (module_name != NULL) {
+ deferred->module_name = strdupz(module_name);
+ if (job_name != NULL)
+ deferred->job_name = strdupz(job_name);
+ }
+ pthread_mutex_lock(&deferred_configs_lock);
+ struct deferred_cfg_send *last = deferred_configs;
+ if (last == NULL)
+ deferred_configs = deferred;
+ else {
+ while (last->next != NULL)
+ last = last->next;
+ last->next = deferred;
+ }
+ pthread_cond_signal(&deferred_configs_cond);
+ pthread_mutex_unlock(&deferred_configs_lock);
+}
+
+static struct deferred_cfg_send *deferred_config_pop()
+{
+ pthread_mutex_lock(&deferred_configs_lock);
+ while (deferred_configs == NULL)
+ pthread_cond_wait(&deferred_configs_cond, &deferred_configs_lock);
+ struct deferred_cfg_send *deferred = deferred_configs;
+ deferred_configs = deferred_configs->next;
+ pthread_mutex_unlock(&deferred_configs_lock);
+ return deferred;
+}
+
+static void deferred_config_free(struct deferred_cfg_send *dcs)
+{
+ freez(dcs->plugin_name);
+ freez(dcs->module_name);
+ freez(dcs->job_name);
+ freez(dcs);
+}
+
+static int _get_list_of_plugins_json_cb(const DICTIONARY_ITEM *item, void *entry, void *data)
+{
+ UNUSED(item);
+ json_object *obj = (json_object *)data;
+ struct configurable_plugin *plugin = (struct configurable_plugin *)entry;
+
+ json_object *plugin_name = json_object_new_string(plugin->name);
+ json_object_array_add(obj, plugin_name);
+
+ return 0;
+}
+
+json_object *get_list_of_plugins_json()
+{
+ json_object *obj = json_object_new_array();
+
+ dictionary_walkthrough_read(plugins_dict, _get_list_of_plugins_json_cb, obj);
+
+ return obj;
+}
+
+static int _get_list_of_modules_json_cb(const DICTIONARY_ITEM *item, void *entry, void *data)
+{
+ UNUSED(item);
+ json_object *obj = (json_object *)data;
+ struct module *module = (struct module *)entry;
+
+ json_object *json_module = json_object_new_object();
+
+ json_object *json_item = json_object_new_string(module->name);
+ json_object_object_add(json_module, "name", json_item);
+ const char *module_type;
+ switch (module->type) {
+ case MOD_TYPE_SINGLE:
+ module_type = "single";
+ break;
+ case MOD_TYPE_ARRAY:
+ module_type = "job_array";
+ break;
+ default:
+ module_type = "unknown";
+ break;
+ }
+ json_item = json_object_new_string(module_type);
+ json_object_object_add(json_module, "type", json_item);
+
+ json_object_array_add(obj, json_module);
+
+ return 0;
+}
+
+json_object *get_list_of_modules_json(struct configurable_plugin *plugin)
+{
+ json_object *obj = json_object_new_array();
+
+ pthread_mutex_lock(&plugin->lock);
+
+ dictionary_walkthrough_read(plugin->modules, _get_list_of_modules_json_cb, obj);
+
+ pthread_mutex_unlock(&plugin->lock);
+
+ return obj;
+}
+
+const char *job_status2str(enum job_status status)
+{
+ switch (status) {
+ case JOB_STATUS_UNKNOWN:
+ return "unknown";
+ case JOB_STATUS_STOPPED:
+ return "stopped";
+ case JOB_STATUS_RUNNING:
+ return "running";
+ case JOB_STATUS_ERROR:
+ return "error";
+ default:
+ return "unknown";
+ }
+}
+
+static int _get_list_of_jobs_json_cb(const DICTIONARY_ITEM *item, void *entry, void *data)
+{
+ UNUSED(item);
+ json_object *obj = (json_object *)data;
+ struct job *job = (struct job *)entry;
+
+ json_object *json_job = json_object_new_object();
+ json_object *json_item = json_object_new_string(job->name);
+ json_object_object_add(json_job, "name", json_item);
+ json_item = json_object_new_string(job_status2str(job->status));
+ json_object_object_add(json_job, "state", json_item);
+ int64_t last_state_update_s = job->last_state_update / USEC_PER_SEC;
+ int64_t last_state_update_us = job->last_state_update % USEC_PER_SEC;
+
+ json_item = json_object_new_int64(last_state_update_s);
+ json_object_object_add(json_job, "last_state_update_s", json_item);
+
+ json_item = json_object_new_int64(last_state_update_us);
+ json_object_object_add(json_job, "last_state_update_us", json_item);
+
+ json_object_array_add(obj, json_job);
+
+ return 0;
+}
+
+json_object *get_list_of_jobs_json(struct module *module)
+{
+ json_object *obj = json_object_new_array();
+
+ pthread_mutex_lock(&module->lock);
+
+ dictionary_walkthrough_read(module->jobs, _get_list_of_jobs_json_cb, obj);
+
+ pthread_mutex_unlock(&module->lock);
+
+ return obj;
+}
+
+struct job *get_job_by_name(struct module *module, const char *job_name)
+{
+ return dictionary_get(module->jobs, job_name);
+}
+
+int remove_job(struct module *module, struct job *job)
+{
+ // as we are going to do unlink here we better make sure we have all to build proper path
+ if (unlikely(job->name == NULL || module == NULL || module->name == NULL || module->plugin == NULL || module->plugin->name == NULL))
+ return 0;
+
+ enum set_config_result rc = module->delete_job_cb(module->job_config_cb_usr_ctx, module->name, job->name);
+
+ if (rc != SET_CONFIG_ACCEPTED) {
+ error_report("DYNCFG module \"%s\" rejected delete job for \"%s\"", module->name, job->name);
+ return 0;
+ }
+
+ BUFFER *buffer = buffer_create(DYN_CONF_PATH_MAX, NULL);
+ buffer_sprintf(buffer, DYN_CONF_DIR "/%s/%s/%s" DYN_CONF_CFG_EXT, module->plugin->name, module->name, job->name);
+ unlink(buffer_tostring(buffer));
+ buffer_free(buffer);
+ return dictionary_del(module->jobs, job->name);
+}
+
+struct module *get_module_by_name(struct configurable_plugin *plugin, const char *module_name)
+{
+ return dictionary_get(plugin->modules, module_name);
+}
+
+inline struct configurable_plugin *get_plugin_by_name(const char *name)
+{
+ return dictionary_get(plugins_dict, name);
+}
+
+static int store_config(const char *module_name, const char *submodule_name, const char *cfg_idx, dyncfg_config_t cfg)
+{
+ BUFFER *filename = buffer_create(DYN_CONF_PATH_MAX, NULL);
+ buffer_sprintf(filename, DYN_CONF_DIR "/%s", module_name);
+ if (mkdir(buffer_tostring(filename), 0755) == -1) {
+ if (errno != EEXIST) {
+ netdata_log_error("DYNCFG store_config: failed to create module directory %s", buffer_tostring(filename));
+ buffer_free(filename);
+ return 1;
+ }
+ }
+
+ if (submodule_name != NULL) {
+ buffer_sprintf(filename, "/%s", submodule_name);
+ if (mkdir(buffer_tostring(filename), 0755) == -1) {
+ if (errno != EEXIST) {
+ netdata_log_error("DYNCFG store_config: failed to create submodule directory %s", buffer_tostring(filename));
+ buffer_free(filename);
+ return 1;
+ }
+ }
+ }
+
+ if (cfg_idx != NULL)
+ buffer_sprintf(filename, "/%s", cfg_idx);
+
+ buffer_strcat(filename, DYN_CONF_CFG_EXT);
+
+
+ error_report("DYNCFG store_config: %s", buffer_tostring(filename));
+
+ //write to file
+ FILE *f = fopen(buffer_tostring(filename), "w");
+ if (f == NULL) {
+ error_report("DYNCFG store_config: failed to open %s for writing", buffer_tostring(filename));
+ buffer_free(filename);
+ return 1;
+ }
+
+ fwrite(cfg.data, cfg.data_size, 1, f);
+ fclose(f);
+
+ buffer_free(filename);
+ return 0;
+}
+
+dyncfg_config_t load_config(const char *plugin_name, const char *module_name, const char *job_id)
+{
+ BUFFER *filename = buffer_create(DYN_CONF_PATH_MAX, NULL);
+ buffer_sprintf(filename, DYN_CONF_DIR "/%s", plugin_name);
+ if (module_name != NULL)
+ buffer_sprintf(filename, "/%s", module_name);
+
+ if (job_id != NULL)
+ buffer_sprintf(filename, "/%s", job_id);
+
+ buffer_strcat(filename, DYN_CONF_CFG_EXT);
+
+ dyncfg_config_t config;
+ long bytes;
+ config.data = read_by_filename(buffer_tostring(filename), &bytes);
+
+ if (config.data == NULL)
+ error_report("DYNCFG load_config: failed to load config from %s", buffer_tostring(filename));
+
+ config.data_size = bytes;
+
+ buffer_free(filename);
+
+ return config;
+}
+
+char *set_plugin_config(struct configurable_plugin *plugin, dyncfg_config_t cfg)
+{
+ enum set_config_result rc = plugin->set_config_cb(plugin->cb_usr_ctx, &cfg);
+ if (rc != SET_CONFIG_ACCEPTED) {
+ error_report("DYNCFG plugin \"%s\" rejected config", plugin->name);
+ return "plugin rejected config";
+ }
+
+ if (store_config(plugin->name, NULL, NULL, cfg)) {
+ error_report("DYNCFG could not store config for module \"%s\"", plugin->name);
+ return "could not store config on disk";
+ }
+ return NULL;
+}
+
+static char *set_module_config(struct module *mod, dyncfg_config_t cfg)
+{
+ struct configurable_plugin *plugin = mod->plugin;
+
+ enum set_config_result rc = mod->set_config_cb(mod->config_cb_usr_ctx, mod->name, &cfg);
+ if (rc != SET_CONFIG_ACCEPTED) {
+ error_report("DYNCFG module \"%s\" rejected config", plugin->name);
+ return "module rejected config";
+ }
+
+ if (store_config(plugin->name, mod->name, NULL, cfg)) {
+ error_report("DYNCFG could not store config for module \"%s\"", mod->name);
+ return "could not store config on disk";
+ }
+
+ return NULL;
+}
+
+struct job *job_new()
+{
+ struct job *job = callocz(1, sizeof(struct job));
+ job->state = JOB_STATUS_UNKNOWN;
+ job->last_state_update = now_realtime_usec();
+ return job;
+}
+
+static int set_job_config(struct job *job, dyncfg_config_t cfg)
+{
+ struct module *mod = job->module;
+ enum set_config_result rt = mod->set_job_config_cb(mod->job_config_cb_usr_ctx, mod->name, job->name, &cfg);
+
+ if (rt != SET_CONFIG_ACCEPTED) {
+ error_report("DYNCFG module \"%s\" rejected config for job \"%s\"", mod->name, job->name);
+ return 1;
+ }
+
+ if (store_config(mod->plugin->name, mod->name, job->name, cfg)) {
+ error_report("DYNCFG could not store config for module \"%s\"", mod->name);
+ return 1;
+ }
+
+ return 0;
+}
+
+struct job *add_job(struct module *mod, const char *job_id, dyncfg_config_t cfg)
+{
+ struct job *job = job_new();
+ job->name = strdupz(job_id);
+ job->module = mod;
+
+ if (set_job_config(job, cfg)) {
+ freez(job->name);
+ freez(job);
+ return NULL;
+ }
+
+ dictionary_set(mod->jobs, job->name, job, sizeof(job));
+
+ return job;
+
+}
+
+void module_del_cb(const DICTIONARY_ITEM *item, void *value, void *data)
+{
+ UNUSED(item);
+ UNUSED(data);
+ struct module *mod = (struct module *)value;
+ dictionary_destroy(mod->jobs);
+ freez(mod->name);
+ freez(mod);
+}
+
+
+const DICTIONARY_ITEM *register_plugin(struct configurable_plugin *plugin)
+{
+ if (get_plugin_by_name(plugin->name) != NULL) {
+ error_report("DYNCFG plugin \"%s\" already registered", plugin->name);
+ return NULL;
+ }
+
+ if (plugin->set_config_cb == NULL) {
+ error_report("DYNCFG plugin \"%s\" has no set_config_cb", plugin->name);
+ return NULL;
+ }
+
+ pthread_mutex_init(&plugin->lock, NULL);
+
+ plugin->modules = dictionary_create(DICT_OPTION_VALUE_LINK_DONT_CLONE);
+ dictionary_register_delete_callback(plugin->modules, module_del_cb, NULL);
+
+ deferred_config_push_back(plugin->name, NULL, NULL);
+
+ dictionary_set(plugins_dict, plugin->name, plugin, sizeof(plugin));
+
+ // the plugin keeps the pointer to the dictionary item, so we need to acquire it
+ return dictionary_get_and_acquire_item(plugins_dict, plugin->name);
+}
+
+void unregister_plugin(const DICTIONARY_ITEM *plugin)
+{
+ struct configurable_plugin *plug = dictionary_acquired_item_value(plugin);
+ dictionary_acquired_item_release(plugins_dict, plugin);
+ dictionary_del(plugins_dict, plug->name);
+}
+
+void job_del_cb(const DICTIONARY_ITEM *item, void *value, void *data)
+{
+ UNUSED(item);
+ UNUSED(data);
+ struct job *job = (struct job *)value;
+ freez(job->reason);
+ freez(job->name);
+ freez(job);
+}
+
+int register_module(struct configurable_plugin *plugin, struct module *module)
+{
+ if (get_module_by_name(plugin, module->name) != NULL) {
+ error_report("DYNCFG module \"%s\" already registered", module->name);
+ return 1;
+ }
+
+ pthread_mutex_init(&module->lock, NULL);
+
+ deferred_config_push_back(plugin->name, module->name, NULL);
+
+ module->plugin = plugin;
+
+ if (module->type == MOD_TYPE_ARRAY) {
+ module->jobs = dictionary_create(DICT_OPTION_VALUE_LINK_DONT_CLONE);
+ dictionary_register_delete_callback(module->jobs, job_del_cb, NULL);
+
+ // load all jobs from disk
+ BUFFER *path = buffer_create(DYN_CONF_PATH_MAX, NULL);
+ buffer_sprintf(path, "%s/%s/%s", DYN_CONF_DIR, plugin->name, module->name);
+ DIR *dir = opendir(buffer_tostring(path));
+ if (dir != NULL) {
+ struct dirent *ent;
+ while ((ent = readdir(dir)) != NULL) {
+ if (ent->d_name[0] == '.')
+ continue;
+ if (ent->d_type != DT_REG)
+ continue;
+ size_t len = strnlen(ent->d_name, NAME_MAX);
+ if (len <= strlen(DYN_CONF_CFG_EXT))
+ continue;
+ if (strcmp(ent->d_name + len - strlen(DYN_CONF_CFG_EXT), DYN_CONF_CFG_EXT) != 0)
+ continue;
+ ent->d_name[len - strlen(DYN_CONF_CFG_EXT)] = '\0';
+
+ struct job *job = job_new();
+ job->name = strdupz(ent->d_name);
+ job->module = module;
+ dictionary_set(module->jobs, job->name, job, sizeof(job));
+
+ deferred_config_push_back(plugin->name, module->name, job->name);
+ }
+ closedir(dir);
+ }
+ buffer_free(path);
+ }
+
+ dictionary_set(plugin->modules, module->name, module, sizeof(module));
+
+ return 0;
+}
+
+void freez_dyncfg(void *ptr) {
+ freez(ptr);
+}
+
+static void handle_dyncfg_root(struct uni_http_response *resp, int method)
+{
+ if (method != HTTP_METHOD_GET) {
+ resp->content = "method not allowed";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_METHOD_NOT_ALLOWED;
+ return;
+ }
+ json_object *obj = get_list_of_plugins_json();
+ json_object *wrapper = json_object_new_object();
+ json_object_object_add(wrapper, "configurable_plugins", obj);
+ resp->content = strdupz(json_object_to_json_string_ext(wrapper, JSON_C_TO_STRING_PRETTY));
+ json_object_put(wrapper);
+ resp->status = HTTP_RESP_OK;
+ resp->content_type = CT_APPLICATION_JSON;
+ resp->content_free = freez_dyncfg;
+ resp->content_length = strlen(resp->content);
+}
+
+static void handle_plugin_root(struct uni_http_response *resp, int method, struct configurable_plugin *plugin, void *post_payload, size_t post_payload_size)
+{
+ switch(method) {
+ case HTTP_METHOD_GET:
+ {
+ dyncfg_config_t cfg = plugin->get_config_cb(plugin->cb_usr_ctx);
+ resp->content = mallocz(cfg.data_size);
+ memcpy(resp->content, cfg.data, cfg.data_size);
+ resp->status = HTTP_RESP_OK;
+ resp->content_free = freez_dyncfg;
+ resp->content_length = cfg.data_size;
+ return;
+ }
+ case HTTP_METHOD_PUT:
+ {
+ char *response;
+ if (post_payload == NULL) {
+ resp->content = "no payload";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_BAD_REQUEST;
+ return;
+ }
+ dyncfg_config_t cont = {
+ .data = post_payload,
+ .data_size = post_payload_size
+ };
+ response = set_plugin_config(plugin, cont);
+ if (response == NULL) {
+ resp->status = HTTP_RESP_OK;
+ resp->content = "OK";
+ resp->content_length = strlen(resp->content);
+ } else {
+ resp->status = HTTP_RESP_BAD_REQUEST;
+ resp->content = response;
+ resp->content_length = strlen(resp->content);
+ }
+ return;
+ }
+ default:
+ resp->content = "method not allowed";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_METHOD_NOT_ALLOWED;
+ return;
+ }
+}
+
+void handle_module_root(struct uni_http_response *resp, int method, struct configurable_plugin *plugin, const char *module, void *post_payload, size_t post_payload_size)
+{
+ if (strncmp(module, DYN_CONF_SCHEMA, strlen(DYN_CONF_SCHEMA)) == 0) {
+ dyncfg_config_t cfg = plugin->get_config_schema_cb(plugin->cb_usr_ctx);
+ resp->content = mallocz(cfg.data_size);
+ memcpy(resp->content, cfg.data, cfg.data_size);
+ resp->status = HTTP_RESP_OK;
+ resp->content_free = freez_dyncfg;
+ resp->content_length = cfg.data_size;
+ return;
+ }
+ if (strncmp(module, DYN_CONF_MODULE_LIST, strlen(DYN_CONF_MODULE_LIST)) == 0) {
+ if (method != HTTP_METHOD_GET) {
+ resp->content = "method not allowed (only GET)";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_METHOD_NOT_ALLOWED;
+ return;
+ }
+ json_object *obj = get_list_of_modules_json(plugin);
+ json_object *wrapper = json_object_new_object();
+ json_object_object_add(wrapper, "modules", obj);
+ resp->content = strdupz(json_object_to_json_string_ext(wrapper, JSON_C_TO_STRING_PRETTY));
+ json_object_put(wrapper);
+ resp->status = HTTP_RESP_OK;
+ resp->content_type = CT_APPLICATION_JSON;
+ resp->content_free = freez_dyncfg;
+ resp->content_length = strlen(resp->content);
+ return;
+ }
+ struct module *mod = get_module_by_name(plugin, module);
+ if (mod == NULL) {
+ resp->content = "module not found";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_NOT_FOUND;
+ return;
+ }
+ if (method == HTTP_METHOD_GET) {
+ dyncfg_config_t cfg = mod->get_config_cb(mod->config_cb_usr_ctx, mod->name);
+ resp->content = mallocz(cfg.data_size);
+ memcpy(resp->content, cfg.data, cfg.data_size);
+ resp->status = HTTP_RESP_OK;
+ resp->content_free = freez_dyncfg;
+ resp->content_length = cfg.data_size;
+ return;
+ } else if (method == HTTP_METHOD_PUT) {
+ char *response;
+ if (post_payload == NULL) {
+ resp->content = "no payload";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_BAD_REQUEST;
+ return;
+ }
+ dyncfg_config_t cont = {
+ .data = post_payload,
+ .data_size = post_payload_size
+ };
+ response = set_module_config(mod, cont);
+ if (response == NULL) {
+ resp->status = HTTP_RESP_OK;
+ resp->content = "OK";
+ resp->content_length = strlen(resp->content);
+ } else {
+ resp->status = HTTP_RESP_BAD_REQUEST;
+ resp->content = response;
+ resp->content_length = strlen(resp->content);
+ }
+ return;
+ }
+ resp->content = "method not allowed";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_METHOD_NOT_ALLOWED;
+}
+
+static inline void _handle_job_root(struct uni_http_response *resp, int method, struct module *mod, const char *job_id, void *post_payload, size_t post_payload_size, struct job *job)
+{
+ if (method == HTTP_METHOD_POST) {
+ if (job != NULL) {
+ resp->content = "can't POST, job already exists (use PUT to update?)";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_BAD_REQUEST;
+ return;
+ }
+ if (post_payload == NULL) {
+ resp->content = "no payload";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_BAD_REQUEST;
+ return;
+ }
+ dyncfg_config_t cont = {
+ .data = post_payload,
+ .data_size = post_payload_size
+ };
+ job = add_job(mod, job_id, cont);
+ if (job == NULL) {
+ resp->content = "failed to add job";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_INTERNAL_SERVER_ERROR;
+ return;
+ }
+ resp->status = HTTP_RESP_OK;
+ resp->content = "OK";
+ resp->content_length = strlen(resp->content);
+ return;
+ }
+ if (job == NULL) {
+ resp->content = "job not found";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_NOT_FOUND;
+ return;
+ }
+ switch (method) {
+ case HTTP_METHOD_GET:
+ {
+ dyncfg_config_t cfg = mod->get_job_config_cb(mod->job_config_cb_usr_ctx, mod->name, job->name);
+ resp->content = mallocz(cfg.data_size);
+ memcpy(resp->content, cfg.data, cfg.data_size);
+ resp->status = HTTP_RESP_OK;
+ resp->content_free = freez_dyncfg;
+ resp->content_length = cfg.data_size;
+ return;
+ }
+ case HTTP_METHOD_PUT:
+ {
+ if (post_payload == NULL) {
+ resp->content = "missing payload";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_BAD_REQUEST;
+ return;
+ }
+ dyncfg_config_t cont = {
+ .data = post_payload,
+ .data_size = post_payload_size
+ };
+ if(set_job_config(job, cont)) {
+ resp->status = HTTP_RESP_BAD_REQUEST;
+ resp->content = "failed to set job config";
+ resp->content_length = strlen(resp->content);
+ return;
+ }
+ resp->status = HTTP_RESP_OK;
+ resp->content = "OK";
+ resp->content_length = strlen(resp->content);
+ return;
+ }
+ case HTTP_METHOD_DELETE:
+ {
+ if (!remove_job(mod, job)) {
+ resp->content = "failed to remove job";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_INTERNAL_SERVER_ERROR;
+ return;
+ }
+ resp->status = HTTP_RESP_OK;
+ resp->content = "OK";
+ resp->content_length = strlen(resp->content);
+ return;
+ }
+ default:
+ resp->content = "method not allowed (only GET, PUT, DELETE)";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_METHOD_NOT_ALLOWED;
+ return;
+ }
+}
+
+void handle_job_root(struct uni_http_response *resp, int method, struct module *mod, const char *job_id, void *post_payload, size_t post_payload_size)
+{
+ if (strncmp(job_id, DYN_CONF_SCHEMA, strlen(DYN_CONF_SCHEMA)) == 0) {
+ dyncfg_config_t cfg = mod->get_config_schema_cb(mod->config_cb_usr_ctx, mod->name);
+ resp->content = mallocz(cfg.data_size);
+ memcpy(resp->content, cfg.data, cfg.data_size);
+ resp->status = HTTP_RESP_OK;
+ resp->content_free = freez_dyncfg;
+ resp->content_length = cfg.data_size;
+ return;
+ }
+ if (strncmp(job_id, DYN_CONF_JOB_SCHEMA, strlen(DYN_CONF_JOB_SCHEMA)) == 0) {
+ dyncfg_config_t cfg = mod->get_job_config_schema_cb(mod->job_config_cb_usr_ctx, mod->name);
+ resp->content = mallocz(cfg.data_size);
+ memcpy(resp->content, cfg.data, cfg.data_size);
+ resp->status = HTTP_RESP_OK;
+ resp->content_free = freez_dyncfg;
+ resp->content_length = cfg.data_size;
+ return;
+ }
+ if (strncmp(job_id, DYN_CONF_JOB_LIST, strlen(DYN_CONF_JOB_LIST)) == 0) {
+ if (mod->type != MOD_TYPE_ARRAY) {
+ resp->content = "module type is not job_array (can't get the list of jobs)";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_NOT_FOUND;
+ return;
+ }
+ if (method != HTTP_METHOD_GET) {
+ resp->content = "method not allowed (only GET)";
+ resp->content_length = strlen(resp->content);
+ resp->status = HTTP_RESP_METHOD_NOT_ALLOWED;
+ return;
+ }
+ json_object *obj = get_list_of_jobs_json(mod);
+ json_object *wrapper = json_object_new_object();
+ json_object_object_add(wrapper, "jobs", obj);
+ resp->content = strdupz(json_object_to_json_string_ext(wrapper, JSON_C_TO_STRING_PRETTY));
+ json_object_put(wrapper);
+ resp->status = HTTP_RESP_OK;
+ resp->content_type = CT_APPLICATION_JSON;
+ resp->content_free = freez_dyncfg;
+ resp->content_length = strlen(resp->content);
+ return;
+ }
+ const DICTIONARY_ITEM *job_item = dictionary_get_and_acquire_item(mod->jobs, job_id);
+ struct job *job = dictionary_acquired_item_value(job_item);
+
+ _handle_job_root(resp, method, mod, job_id, post_payload, post_payload_size, job);
+
+ dictionary_acquired_item_release(mod->jobs, job_item);
+}
+
+struct uni_http_response dyn_conf_process_http_request(int method, const char *plugin, const char *module, const char *job_id, void *post_payload, size_t post_payload_size)
+{
+ struct uni_http_response resp = {
+ .status = HTTP_RESP_INTERNAL_SERVER_ERROR,
+ .content_type = CT_TEXT_PLAIN,
+ .content = HTTP_RESP_INTERNAL_SERVER_ERROR_STR,
+ .content_free = NULL,
+ .content_length = 0
+ };
+ if (plugin == NULL) {
+ handle_dyncfg_root(&resp, method);
+ return resp;
+ }
+ const DICTIONARY_ITEM *plugin_item = dictionary_get_and_acquire_item(plugins_dict, plugin);
+ if (plugin_item == NULL) {
+ resp.content = "plugin not found";
+ resp.content_length = strlen(resp.content);
+ resp.status = HTTP_RESP_NOT_FOUND;
+ return resp;
+ }
+ struct configurable_plugin *plug = dictionary_acquired_item_value(plugin_item);
+ if (module == NULL) {
+ handle_plugin_root(&resp, method, plug, post_payload, post_payload_size);
+ goto EXIT_PLUGIN;
+ }
+ if (job_id == NULL) {
+ handle_module_root(&resp, method, plug, module, post_payload, post_payload_size);
+ goto EXIT_PLUGIN;
+ }
+ // for modules we do not do get_and_acquire as modules are never removed (only together with the plugin)
+ struct module *mod = get_module_by_name(plug, module);
+ if (mod == NULL) {
+ resp.content = "module not found";
+ resp.content_length = strlen(resp.content);
+ resp.status = HTTP_RESP_NOT_FOUND;
+ goto EXIT_PLUGIN;
+ }
+ if (mod->type != MOD_TYPE_ARRAY) {
+ resp.content = "module is not array";
+ resp.content_length = strlen(resp.content);
+ resp.status = HTTP_RESP_NOT_FOUND;
+ goto EXIT_PLUGIN;
+ }
+ handle_job_root(&resp, method, mod, job_id, post_payload, post_payload_size);
+
+EXIT_PLUGIN:
+ dictionary_acquired_item_release(plugins_dict, plugin_item);
+ return resp;
+}
+
+void plugin_del_cb(const DICTIONARY_ITEM *item, void *value, void *data)
+{
+ UNUSED(item);
+ UNUSED(data);
+ struct configurable_plugin *plugin = (struct configurable_plugin *)value;
+ dictionary_destroy(plugin->modules);
+ freez(plugin->name);
+ freez(plugin);
+}
+
+void report_job_status(struct configurable_plugin *plugin, const char *module_name, const char *job_name, enum job_status status, int status_code, char *reason)
+{
+ const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(plugins_dict, plugin->name);
+ if (item == NULL) {
+ netdata_log_error("plugin %s not found", plugin->name);
+ return;
+ }
+ struct configurable_plugin *plug = dictionary_acquired_item_value(item);
+ struct module *mod = get_module_by_name(plug, module_name);
+ if (mod == NULL) {
+ netdata_log_error("module %s not found", module_name);
+ goto EXIT_PLUGIN;
+ }
+ if (mod->type != MOD_TYPE_ARRAY) {
+ netdata_log_error("module %s is not array", module_name);
+ goto EXIT_PLUGIN;
+ }
+ const DICTIONARY_ITEM *job_item = dictionary_get_and_acquire_item(mod->jobs, job_name);
+ if (job_item == NULL) {
+ netdata_log_error("job %s not found", job_name);
+ goto EXIT_PLUGIN;
+ }
+ struct job *job = dictionary_acquired_item_value(job_item);
+ job->status = status;
+ job->state = status_code;
+ if (job->reason != NULL) {
+ freez(job->reason);
+ }
+ job->reason = reason;
+ job->last_state_update = now_realtime_usec();
+
+ dictionary_acquired_item_release(mod->jobs, job_item);
+
+EXIT_PLUGIN:
+ dictionary_acquired_item_release(plugins_dict, item);
+}
+
+int dyn_conf_init(void)
+{
+ if (mkdir(DYN_CONF_DIR, 0755) == -1) {
+ if (errno != EEXIST) {
+ netdata_log_error("failed to create directory for dynamic configuration");
+ return 1;
+ }
+ }
+
+ plugins_dict = dictionary_create(DICT_OPTION_VALUE_LINK_DONT_CLONE);
+ dictionary_register_delete_callback(plugins_dict, plugin_del_cb, NULL);
+
+ return 0;
+}
+
+void *dyncfg_main(void *in)
+{
+ while (!netdata_exit) {
+ struct deferred_cfg_send *dcs = deferred_config_pop();
+ const DICTIONARY_ITEM *plugin_item = dictionary_get_and_acquire_item(plugins_dict, dcs->plugin_name);
+ if (plugin_item == NULL) {
+ error_report("DYNCFG, plugin %s not found", dcs->plugin_name);
+ deferred_config_free(dcs);
+ continue;
+ }
+ struct configurable_plugin *plugin = dictionary_acquired_item_value(plugin_item);
+ if (dcs->module_name == NULL) {
+ dyncfg_config_t cfg = load_config(dcs->plugin_name, NULL, NULL);
+ if (cfg.data != NULL) {
+ plugin->set_config_cb(plugin->cb_usr_ctx, &cfg);
+ freez(cfg.data);
+ }
+ } else if (dcs->job_name == NULL) {
+ dyncfg_config_t cfg = load_config(dcs->plugin_name, dcs->module_name, NULL);
+ if (cfg.data != NULL) {
+ struct module *mod = get_module_by_name(plugin, dcs->module_name);
+ mod->set_config_cb(mod->config_cb_usr_ctx, mod->name, &cfg);
+ freez(cfg.data);
+ }
+ } else {
+ dyncfg_config_t cfg = load_config(dcs->plugin_name, dcs->module_name, dcs->job_name);
+ if (cfg.data != NULL) {
+ struct module *mod = get_module_by_name(plugin, dcs->module_name);
+ mod->set_job_config_cb(mod->job_config_cb_usr_ctx, mod->name, dcs->job_name, &cfg);
+ freez(cfg.data);
+ }
+ }
+ deferred_config_free(dcs);
+ dictionary_acquired_item_release(plugins_dict, plugin_item);
+ }
+ return NULL;
+}
diff --git a/libnetdata/dyn_conf/dyn_conf.h b/libnetdata/dyn_conf/dyn_conf.h
new file mode 100644
index 000000000..f10ae6a12
--- /dev/null
+++ b/libnetdata/dyn_conf/dyn_conf.h
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef DYN_CONF_H
+#define DYN_CONF_H
+
+#include "../libnetdata.h"
+
+enum module_type {
+ MOD_TYPE_UNKNOWN = 0,
+ MOD_TYPE_ARRAY,
+ MOD_TYPE_SINGLE
+};
+
+static inline enum module_type str2_module_type(const char *type_name)
+{
+ if (strcmp(type_name, "job_array") == 0)
+ return MOD_TYPE_ARRAY;
+ else if (strcmp(type_name, "single") == 0)
+ return MOD_TYPE_SINGLE;
+ return MOD_TYPE_UNKNOWN;
+}
+
+struct dyncfg_config {
+ void *data;
+ size_t data_size;
+};
+
+typedef struct dyncfg_config dyncfg_config_t;
+
+struct configurable_plugin;
+struct module;
+
+enum job_status {
+ JOB_STATUS_UNKNOWN = 0, // State used until plugin reports first status
+ JOB_STATUS_STOPPED,
+ JOB_STATUS_RUNNING,
+ JOB_STATUS_ERROR
+};
+
+inline enum job_status str2job_state(const char *state_name) {
+ if (strcmp(state_name, "stopped") == 0)
+ return JOB_STATUS_STOPPED;
+ else if (strcmp(state_name, "running") == 0)
+ return JOB_STATUS_RUNNING;
+ else if (strcmp(state_name, "error") == 0)
+ return JOB_STATUS_ERROR;
+ return JOB_STATUS_UNKNOWN;
+}
+
+enum set_config_result {
+ SET_CONFIG_ACCEPTED = 0,
+ SET_CONFIG_REJECTED,
+ SET_CONFIG_DEFFER
+};
+
+struct job
+{
+ char *name;
+
+ //state reported by config
+ enum job_status status; // reported by plugin, enum as this has to be interpreted by UI
+ int state; // code reported by plugin which can mean anything plugin wants
+ char *reason; // reported by plugin, can be NULL (optional)
+
+ usec_t last_state_update;
+
+ struct module *module;
+};
+
+struct module
+{
+ pthread_mutex_t lock;
+ char *name;
+ enum module_type type;
+
+ struct configurable_plugin *plugin;
+
+ // module config
+ enum set_config_result (*set_config_cb)(void *usr_ctx, const char *module_name, dyncfg_config_t *cfg);
+ dyncfg_config_t (*get_config_cb)(void *usr_ctx, const char *name);
+ dyncfg_config_t (*get_config_schema_cb)(void *usr_ctx, const char *name);
+ void *config_cb_usr_ctx;
+
+ DICTIONARY *jobs;
+
+ // jobs config
+ dyncfg_config_t (*get_job_config_cb)(void *usr_ctx, const char *module_name, const char *job_name);
+ dyncfg_config_t (*get_job_config_schema_cb)(void *usr_ctx, const char *module_name);
+ enum set_config_result (*set_job_config_cb)(void *usr_ctx, const char *module_name, const char *job_name, dyncfg_config_t *cfg);
+ enum set_config_result (*delete_job_cb)(void *usr_ctx, const char *module_name, const char *job_name);
+ void *job_config_cb_usr_ctx;
+};
+
+struct configurable_plugin {
+ pthread_mutex_t lock;
+ char *name;
+ DICTIONARY *modules;
+ const char *schema;
+
+ dyncfg_config_t (*get_config_cb)(void *usr_ctx);
+ dyncfg_config_t (*get_config_schema_cb)(void *usr_ctx);
+ enum set_config_result (*set_config_cb)(void *usr_ctx, dyncfg_config_t *cfg);
+ void *cb_usr_ctx; // context for all callbacks (split if needed in future)
+};
+
+// API to be used by plugins
+const DICTIONARY_ITEM *register_plugin(struct configurable_plugin *plugin);
+void unregister_plugin(const DICTIONARY_ITEM *plugin);
+int register_module(struct configurable_plugin *plugin, struct module *module);
+
+void report_job_status(struct configurable_plugin *plugin, const char *module_name, const char *job_name, enum job_status status, int status_code, char *reason);
+
+// API to be used by the web server(s)
+json_object *get_list_of_plugins_json();
+struct configurable_plugin *get_plugin_by_name(const char *name);
+
+json_object *get_list_of_modules_json(struct configurable_plugin *plugin);
+struct module *get_module_by_name(struct configurable_plugin *plugin, const char *module_name);
+
+// helper struct to make interface between internal webserver and h2o same
+struct uni_http_response {
+ int status;
+ char *content;
+ size_t content_length;
+ HTTP_CONTENT_TYPE content_type;
+ void (*content_free)(void *);
+};
+
+struct uni_http_response dyn_conf_process_http_request(int method, const char *plugin, const char *module, const char *job_id, void *payload, size_t payload_size);
+
+// API to be used by main netdata process, initialization and destruction etc.
+int dyn_conf_init(void);
+void freez_dyncfg(void *ptr);
+void *dyncfg_main(void *in);
+
+#endif //DYN_CONF_H
diff --git a/libnetdata/ebpf/ebpf.h b/libnetdata/ebpf/ebpf.h
index 88dbca379..691a4c26e 100644
--- a/libnetdata/ebpf/ebpf.h
+++ b/libnetdata/ebpf/ebpf.h
@@ -181,6 +181,17 @@ enum netdata_controller {
NETDATA_CONTROLLER_APPS_ENABLED,
NETDATA_CONTROLLER_APPS_LEVEL,
+ // These index show the number of elements
+ // stored inside hash tables.
+ //
+ // We have indexes to count increase and
+ // decrease events, because __sync_fetch_and_sub
+ // generates compilation errors.
+ NETDATA_CONTROLLER_PID_TABLE_ADD,
+ NETDATA_CONTROLLER_PID_TABLE_DEL,
+ NETDATA_CONTROLLER_TEMP_TABLE_ADD,
+ NETDATA_CONTROLLER_TEMP_TABLE_DEL,
+
NETDATA_CONTROLLER_END
};
@@ -278,6 +289,17 @@ enum ebpf_threads_status {
NETDATA_THREAD_EBPF_NOT_RUNNING // thread was never started
};
+enum ebpf_global_table_values {
+ NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_ADD, // Count elements added inside PID table
+ NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_DEL, // Count elements removed from PID table
+ NETDATA_EBPF_GLOBAL_TABLE_TEMP_TABLE_ADD, // Count elements added inside TEMP table
+ NETDATA_EBPF_GLOBAL_TABLE_TEMP_TABLE_DEL, // Count elements removed from TEMP table
+
+ NETDATA_EBPF_GLOBAL_TABLE_STATUS_END
+};
+
+typedef uint64_t netdata_idx_t;
+
typedef struct ebpf_module {
const char *thread_name;
const char *config_name;
@@ -313,11 +335,13 @@ typedef struct ebpf_module {
// period to run
uint32_t running_time; // internal usage, this is used to reset a value when a new request happens.
uint32_t lifetime;
+
+ netdata_idx_t hash_table_stats[NETDATA_EBPF_GLOBAL_TABLE_STATUS_END];
} ebpf_module_t;
#define EBPF_DEFAULT_LIFETIME 300
-// This will be present until all functions are merged
-#define EBPF_NON_FUNCTION_LIFE_TIME 86400
+// This will be present until all functions are merged. The deadline is planned for 68 years since plugin start
+#define EBPF_NON_FUNCTION_LIFE_TIME UINT_MAX
int ebpf_get_kernel_version();
int get_redhat_release();
diff --git a/libnetdata/facets/Makefile.am b/libnetdata/facets/Makefile.am
new file mode 100644
index 000000000..161784b8f
--- /dev/null
+++ b/libnetdata/facets/Makefile.am
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
diff --git a/libnetdata/facets/README.md b/libnetdata/facets/README.md
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/libnetdata/facets/README.md
diff --git a/libnetdata/facets/facets.c b/libnetdata/facets/facets.c
new file mode 100644
index 000000000..8762b43b9
--- /dev/null
+++ b/libnetdata/facets/facets.c
@@ -0,0 +1,851 @@
+#include "facets.h"
+
+#define FACET_VALUE_UNSET "-"
+#define HISTOGRAM_COLUMNS 60
+
+static void facets_row_free(FACETS *facets __maybe_unused, FACET_ROW *row);
+
+// ----------------------------------------------------------------------------
+
+time_t calculate_bar_width(time_t before, time_t after) {
+ // Array of valid durations in seconds
+ static time_t valid_durations[] = {
+ 1,
+ 15,
+ 30,
+ 1 * 60, 2 * 60, 3 * 60, 5 * 60, 10 * 60, 15 * 60, 30 * 60, // minutes
+ 1 * 3600, 2 * 3600, 6 * 3600, 8 * 3600, 12 * 3600, // hours
+ 1 * 86400, 2 * 86400, 3 * 86400, 5 * 86400, 7 * 86400, 14 * 86400, // days
+ 1 * (30*86400) // months
+ };
+ static int array_size = sizeof(valid_durations) / sizeof(valid_durations[0]);
+
+ time_t duration = before - after;
+ time_t bar_width = 1;
+
+ for (int i = array_size - 1; i >= 0; --i) {
+ if (duration / valid_durations[i] >= HISTOGRAM_COLUMNS) {
+ bar_width = valid_durations[i];
+ break;
+ }
+ }
+
+ return bar_width;
+}
+
+// ----------------------------------------------------------------------------
+
+static inline void uint32_to_char(uint32_t num, char *out) {
+ static char id_encoding_characters[64 + 1] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ.abcdefghijklmnopqrstuvwxyz_0123456789";
+
+ int i;
+ for(i = 5; i >= 0; --i) {
+ out[i] = id_encoding_characters[num & 63];
+ num >>= 6;
+ }
+ out[6] = '\0';
+}
+
+inline void facets_string_hash(const char *src, char *out) {
+ uint32_t hash1 = fnv1a_hash32(src);
+ uint32_t hash2 = djb2_hash32(src);
+ uint32_t hash3 = larson_hash32(src);
+
+ uint32_to_char(hash1, out);
+ uint32_to_char(hash2, &out[6]);
+ uint32_to_char(hash3, &out[12]);
+
+ out[18] = '\0';
+}
+
+// ----------------------------------------------------------------------------
+
+typedef struct facet_value {
+ const char *name;
+
+ bool selected;
+
+ uint32_t rows_matching_facet_value;
+ uint32_t final_facet_value_counter;
+} FACET_VALUE;
+
+struct facet_key {
+ const char *name;
+
+ DICTIONARY *values;
+
+ FACET_KEY_OPTIONS options;
+
+ bool default_selected_for_values; // the default "selected" for all values in the dictionary
+
+ // members about the current row
+ uint32_t key_found_in_row;
+ uint32_t key_values_selected_in_row;
+
+ struct {
+ char hash[FACET_STRING_HASH_SIZE];
+ bool updated;
+ BUFFER *b;
+ } current_value;
+
+ uint32_t order;
+
+ struct {
+ facet_dynamic_row_t cb;
+ void *data;
+ } dynamic;
+
+ struct {
+ facets_key_transformer_t cb;
+ void *data;
+ } transform;
+
+ struct facet_key *prev, *next;
+};
+
+struct facets {
+ SIMPLE_PATTERN *visible_keys;
+ SIMPLE_PATTERN *excluded_keys;
+ SIMPLE_PATTERN *included_keys;
+
+ FACETS_OPTIONS options;
+ usec_t anchor;
+
+ SIMPLE_PATTERN *query; // the full text search pattern
+ size_t keys_filtered_by_query; // the number of fields we do full text search (constant)
+ size_t keys_matched_by_query; // the number of fields matched the full text search (per row)
+
+ DICTIONARY *accepted_params;
+
+ FACET_KEY *keys_ll;
+ DICTIONARY *keys;
+ FACET_ROW *base; // double linked list of the selected facets rows
+
+ uint32_t items_to_return;
+ uint32_t max_items_to_return;
+ uint32_t order;
+
+ struct {
+ FACET_ROW *last_added;
+
+ size_t evaluated;
+ size_t matched;
+
+ size_t first;
+ size_t forwards;
+ size_t backwards;
+ size_t skips_before;
+ size_t skips_after;
+ size_t prepends;
+ size_t appends;
+ size_t shifts;
+ } operations;
+};
+
+// ----------------------------------------------------------------------------
+
+static inline void facet_value_is_used(FACET_KEY *k, FACET_VALUE *v) {
+ if(!k->key_found_in_row)
+ v->rows_matching_facet_value++;
+
+ k->key_found_in_row++;
+
+ if(v->selected)
+ k->key_values_selected_in_row++;
+}
+
+static inline bool facets_key_is_facet(FACETS *facets, FACET_KEY *k) {
+ bool included = true, excluded = false;
+
+ if(k->options & (FACET_KEY_OPTION_FACET | FACET_KEY_OPTION_NO_FACET)) {
+ if(k->options & FACET_KEY_OPTION_FACET) {
+ included = true;
+ excluded = false;
+ }
+ else if(k->options & FACET_KEY_OPTION_NO_FACET) {
+ included = false;
+ excluded = true;
+ }
+ }
+ else {
+ if (facets->included_keys) {
+ if (!simple_pattern_matches(facets->included_keys, k->name))
+ included = false;
+ }
+
+ if (facets->excluded_keys) {
+ if (simple_pattern_matches(facets->excluded_keys, k->name))
+ excluded = true;
+ }
+ }
+
+ if(included && !excluded) {
+ k->options |= FACET_KEY_OPTION_FACET;
+ k->options &= ~FACET_KEY_OPTION_NO_FACET;
+ return true;
+ }
+
+ k->options |= FACET_KEY_OPTION_NO_FACET;
+ k->options &= ~FACET_KEY_OPTION_FACET;
+ return false;
+}
+
+// ----------------------------------------------------------------------------
+// FACET_VALUE dictionary hooks
+
+static void facet_value_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) {
+ FACET_VALUE *v = value;
+ FACET_KEY *k = data;
+
+ if(v->name) {
+ // an actual value, not a filter
+ v->name = strdupz(v->name);
+ facet_value_is_used(k, v);
+ }
+
+ if(!v->selected)
+ v->selected = k->default_selected_for_values;
+}
+
+static bool facet_value_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data) {
+ FACET_VALUE *v = old_value;
+ FACET_VALUE *nv = new_value;
+ FACET_KEY *k = data;
+
+ if(!v->name && nv->name)
+ // an actual value, not a filter
+ v->name = strdupz(nv->name);
+
+ if(v->name)
+ facet_value_is_used(k, v);
+
+ return false;
+}
+
+static void facet_value_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ FACET_VALUE *v = value;
+ freez((char *)v->name);
+}
+
+// ----------------------------------------------------------------------------
+// FACET_KEY dictionary hooks
+
+static inline void facet_key_late_init(FACETS *facets, FACET_KEY *k) {
+ if(k->values)
+ return;
+
+ if(facets_key_is_facet(facets, k)) {
+ k->values = dictionary_create_advanced(
+ DICT_OPTION_SINGLE_THREADED | DICT_OPTION_DONT_OVERWRITE_VALUE | DICT_OPTION_FIXED_SIZE,
+ NULL, sizeof(FACET_VALUE));
+ dictionary_register_insert_callback(k->values, facet_value_insert_callback, k);
+ dictionary_register_conflict_callback(k->values, facet_value_conflict_callback, k);
+ dictionary_register_delete_callback(k->values, facet_value_delete_callback, k);
+ }
+}
+
+static void facet_key_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) {
+ FACET_KEY *k = value;
+ FACETS *facets = data;
+
+ if(!(k->options & FACET_KEY_OPTION_REORDER))
+ k->order = facets->order++;
+
+ if((k->options & FACET_KEY_OPTION_FTS) || (facets->options & FACETS_OPTION_ALL_KEYS_FTS))
+ facets->keys_filtered_by_query++;
+
+ if(k->name) {
+ // an actual value, not a filter
+ k->name = strdupz(k->name);
+ facet_key_late_init(facets, k);
+ }
+
+ k->current_value.b = buffer_create(0, NULL);
+
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(facets->keys_ll, k, prev, next);
+}
+
+static bool facet_key_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data) {
+ FACET_KEY *k = old_value;
+ FACET_KEY *nk = new_value;
+ FACETS *facets = data;
+
+ if(!k->name && nk->name) {
+ // an actual value, not a filter
+ k->name = strdupz(nk->name);
+ facet_key_late_init(facets, k);
+ }
+
+ if(k->options & FACET_KEY_OPTION_REORDER) {
+ k->order = facets->order++;
+ k->options &= ~FACET_KEY_OPTION_REORDER;
+ }
+
+ return false;
+}
+
+static void facet_key_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) {
+ FACET_KEY *k = value;
+ FACETS *facets = data;
+
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(facets->keys_ll, k, prev, next);
+
+ dictionary_destroy(k->values);
+ buffer_free(k->current_value.b);
+ freez((char *)k->name);
+}
+
+// ----------------------------------------------------------------------------
+
+FACETS *facets_create(uint32_t items_to_return, usec_t anchor, FACETS_OPTIONS options, const char *visible_keys, const char *facet_keys, const char *non_facet_keys) {
+ FACETS *facets = callocz(1, sizeof(FACETS));
+ facets->options = options;
+ facets->keys = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE|DICT_OPTION_FIXED_SIZE, NULL, sizeof(FACET_KEY));
+ dictionary_register_insert_callback(facets->keys, facet_key_insert_callback, facets);
+ dictionary_register_conflict_callback(facets->keys, facet_key_conflict_callback, facets);
+ dictionary_register_delete_callback(facets->keys, facet_key_delete_callback, facets);
+
+ if(facet_keys && *facet_keys)
+ facets->included_keys = simple_pattern_create(facet_keys, "|", SIMPLE_PATTERN_EXACT, true);
+
+ if(non_facet_keys && *non_facet_keys)
+ facets->excluded_keys = simple_pattern_create(non_facet_keys, "|", SIMPLE_PATTERN_EXACT, true);
+
+ if(visible_keys && *visible_keys)
+ facets->visible_keys = simple_pattern_create(visible_keys, "|", SIMPLE_PATTERN_EXACT, true);
+
+ facets->max_items_to_return = items_to_return;
+ facets->anchor = anchor;
+ facets->order = 1;
+
+ return facets;
+}
+
+void facets_destroy(FACETS *facets) {
+ dictionary_destroy(facets->accepted_params);
+ dictionary_destroy(facets->keys);
+ simple_pattern_free(facets->visible_keys);
+ simple_pattern_free(facets->included_keys);
+ simple_pattern_free(facets->excluded_keys);
+
+ while(facets->base) {
+ FACET_ROW *r = facets->base;
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(facets->base, r, prev, next);
+
+ facets_row_free(facets, r);
+ }
+
+ freez(facets);
+}
+
+void facets_accepted_param(FACETS *facets, const char *param) {
+ if(!facets->accepted_params)
+ facets->accepted_params = dictionary_create(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE);
+
+ dictionary_set(facets->accepted_params, param, NULL, 0);
+}
+
+inline FACET_KEY *facets_register_key(FACETS *facets, const char *key, FACET_KEY_OPTIONS options) {
+ FACET_KEY tk = {
+ .name = key,
+ .options = options,
+ .default_selected_for_values = true,
+ };
+ char hash[FACET_STRING_HASH_SIZE];
+ facets_string_hash(tk.name, hash);
+ return dictionary_set(facets->keys, hash, &tk, sizeof(tk));
+}
+
+inline FACET_KEY *facets_register_key_transformation(FACETS *facets, const char *key, FACET_KEY_OPTIONS options, facets_key_transformer_t cb, void *data) {
+ FACET_KEY *k = facets_register_key(facets, key, options);
+ k->transform.cb = cb;
+ k->transform.data = data;
+ return k;
+}
+
+inline FACET_KEY *facets_register_dynamic_key(FACETS *facets, const char *key, FACET_KEY_OPTIONS options, facet_dynamic_row_t cb, void *data) {
+ FACET_KEY *k = facets_register_key(facets, key, options);
+ k->dynamic.cb = cb;
+ k->dynamic.data = data;
+ return k;
+}
+
+void facets_set_query(FACETS *facets, const char *query) {
+ if(!query)
+ return;
+
+ facets->query = simple_pattern_create(query, " \t", SIMPLE_PATTERN_SUBSTRING, false);
+}
+
+void facets_set_items(FACETS *facets, uint32_t items) {
+ facets->max_items_to_return = items;
+}
+
+void facets_set_anchor(FACETS *facets, usec_t anchor) {
+ facets->anchor = anchor;
+}
+
+void facets_register_facet_filter(FACETS *facets, const char *key_id, char *value_ids, FACET_KEY_OPTIONS options) {
+ FACET_KEY tk = {
+ .options = options,
+ };
+ FACET_KEY *k = dictionary_set(facets->keys, key_id, &tk, sizeof(tk));
+
+ k->default_selected_for_values = false;
+ k->options |= FACET_KEY_OPTION_FACET;
+ k->options &= ~FACET_KEY_OPTION_NO_FACET;
+ facet_key_late_init(facets, k);
+
+ FACET_VALUE tv = {
+ .selected = true,
+ };
+ dictionary_set(k->values, value_ids, &tv, sizeof(tv));
+}
+
+// ----------------------------------------------------------------------------
+
+static inline void facets_check_value(FACETS *facets __maybe_unused, FACET_KEY *k) {
+ if(!k->current_value.updated)
+ buffer_flush(k->current_value.b);
+
+ if(k->transform.cb)
+ k->transform.cb(facets, k->current_value.b, k->transform.data);
+
+ if(!k->current_value.updated) {
+ buffer_strcat(k->current_value.b, FACET_VALUE_UNSET);
+ k->current_value.updated = true;
+ }
+
+// bool found = false;
+// if(strstr(buffer_tostring(k->current_value), "fprintd") != NULL)
+// found = true;
+
+ if(facets->query && ((k->options & FACET_KEY_OPTION_FTS) || facets->options & FACETS_OPTION_ALL_KEYS_FTS)) {
+ if(simple_pattern_matches(facets->query, buffer_tostring(k->current_value.b)))
+ facets->keys_matched_by_query++;
+ }
+
+ if(k->values) {
+ FACET_VALUE tk = {
+ .name = buffer_tostring(k->current_value.b),
+ };
+ facets_string_hash(tk.name, k->current_value.hash);
+ dictionary_set(k->values, k->current_value.hash, &tk, sizeof(tk));
+ }
+ else {
+ k->key_found_in_row++;
+ k->key_values_selected_in_row++;
+ }
+}
+
+void facets_add_key_value(FACETS *facets, const char *key, const char *value) {
+ FACET_KEY *k = facets_register_key(facets, key, 0);
+ buffer_flush(k->current_value.b);
+ buffer_strcat(k->current_value.b, value);
+ k->current_value.updated = true;
+
+ facets_check_value(facets, k);
+}
+
+void facets_add_key_value_length(FACETS *facets, const char *key, const char *value, size_t value_len) {
+ FACET_KEY *k = facets_register_key(facets, key, 0);
+ buffer_flush(k->current_value.b);
+ buffer_strncat(k->current_value.b, value, value_len);
+ k->current_value.updated = true;
+
+ facets_check_value(facets, k);
+}
+
+// ----------------------------------------------------------------------------
+// FACET_ROW dictionary hooks
+
+static void facet_row_key_value_insert_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) {
+ FACET_ROW_KEY_VALUE *rkv = value;
+ FACET_ROW *row = data; (void)row;
+
+ rkv->wb = buffer_create(0, NULL);
+ buffer_strcat(rkv->wb, rkv->tmp && *rkv->tmp ? rkv->tmp : FACET_VALUE_UNSET);
+}
+
+static bool facet_row_key_value_conflict_callback(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data) {
+ FACET_ROW_KEY_VALUE *rkv = old_value;
+ FACET_ROW_KEY_VALUE *n_rkv = new_value;
+ FACET_ROW *row = data; (void)row;
+
+ buffer_flush(rkv->wb);
+ buffer_strcat(rkv->wb, n_rkv->tmp && *n_rkv->tmp ? n_rkv->tmp : FACET_VALUE_UNSET);
+
+ return false;
+}
+
+static void facet_row_key_value_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data) {
+ FACET_ROW_KEY_VALUE *rkv = value;
+ FACET_ROW *row = data; (void)row;
+
+ buffer_free(rkv->wb);
+}
+
+// ----------------------------------------------------------------------------
+// FACET_ROW management
+
+static void facets_row_free(FACETS *facets __maybe_unused, FACET_ROW *row) {
+ dictionary_destroy(row->dict);
+ freez(row);
+}
+
+static FACET_ROW *facets_row_create(FACETS *facets, usec_t usec, FACET_ROW *into) {
+ FACET_ROW *row;
+
+ if(into)
+ row = into;
+ else {
+ row = callocz(1, sizeof(FACET_ROW));
+ row->dict = dictionary_create_advanced(DICT_OPTION_SINGLE_THREADED|DICT_OPTION_DONT_OVERWRITE_VALUE|DICT_OPTION_FIXED_SIZE, NULL, sizeof(FACET_ROW_KEY_VALUE));
+ dictionary_register_insert_callback(row->dict, facet_row_key_value_insert_callback, row);
+ dictionary_register_conflict_callback(row->dict, facet_row_key_value_conflict_callback, row);
+ dictionary_register_delete_callback(row->dict, facet_row_key_value_delete_callback, row);
+ }
+
+ row->usec = usec;
+
+ FACET_KEY *k;
+ dfe_start_read(facets->keys, k) {
+ FACET_ROW_KEY_VALUE t = {
+ .tmp = (k->current_value.updated && buffer_strlen(k->current_value.b)) ?
+ buffer_tostring(k->current_value.b) : FACET_VALUE_UNSET,
+ .wb = NULL,
+ };
+ dictionary_set(row->dict, k->name, &t, sizeof(t));
+ }
+ dfe_done(k);
+
+ return row;
+}
+
+// ----------------------------------------------------------------------------
+
+static void facets_row_keep(FACETS *facets, usec_t usec) {
+ facets->operations.matched++;
+
+ if(usec < facets->anchor) {
+ facets->operations.skips_before++;
+ return;
+ }
+
+ if(unlikely(!facets->base)) {
+ facets->operations.last_added = facets_row_create(facets, usec, NULL);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(facets->base, facets->operations.last_added, prev, next);
+ facets->items_to_return++;
+ facets->operations.first++;
+ return;
+ }
+
+ if(likely(usec > facets->base->prev->usec))
+ facets->operations.last_added = facets->base->prev;
+
+ FACET_ROW *last = facets->operations.last_added;
+ while(last->prev != facets->base->prev && usec > last->prev->usec) {
+ last = last->prev;
+ facets->operations.backwards++;
+ }
+
+ while(last->next && usec < last->next->usec) {
+ last = last->next;
+ facets->operations.forwards++;
+ }
+
+ if(facets->items_to_return >= facets->max_items_to_return) {
+ if(last == facets->base->prev && usec < last->usec) {
+ facets->operations.skips_after++;
+ return;
+ }
+ }
+
+ facets->items_to_return++;
+
+ if(usec > last->usec) {
+ if(facets->items_to_return > facets->max_items_to_return) {
+ facets->items_to_return--;
+ facets->operations.shifts++;
+ facets->operations.last_added = facets->base->prev;
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(facets->base, facets->operations.last_added, prev, next);
+ facets->operations.last_added = facets_row_create(facets, usec, facets->operations.last_added);
+ }
+ DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(facets->base, facets->operations.last_added, prev, next);
+ facets->operations.prepends++;
+ }
+ else {
+ facets->operations.last_added = facets_row_create(facets, usec, NULL);
+ DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(facets->base, facets->operations.last_added, prev, next);
+ facets->operations.appends++;
+ }
+
+ while(facets->items_to_return > facets->max_items_to_return) {
+ // we have to remove something
+
+ FACET_ROW *tmp = facets->base->prev;
+ DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(facets->base, tmp, prev, next);
+ facets->items_to_return--;
+
+ if(unlikely(facets->operations.last_added == tmp))
+ facets->operations.last_added = facets->base->prev;
+
+ facets_row_free(facets, tmp);
+ facets->operations.shifts++;
+ }
+}
+
+void facets_rows_begin(FACETS *facets) {
+ FACET_KEY *k;
+ // dfe_start_read(facets->keys, k) {
+ for(k = facets->keys_ll ; k ; k = k->next) {
+ k->key_found_in_row = 0;
+ k->key_values_selected_in_row = 0;
+ k->current_value.updated = false;
+ k->current_value.hash[0] = '\0';
+ }
+ // dfe_done(k);
+
+ facets->keys_matched_by_query = 0;
+}
+
+void facets_row_finished(FACETS *facets, usec_t usec) {
+ if(facets->query && facets->keys_filtered_by_query && !facets->keys_matched_by_query)
+ goto cleanup;
+
+ facets->operations.evaluated++;
+
+ uint32_t total_keys = 0;
+ uint32_t selected_by = 0;
+
+ FACET_KEY *k;
+ // dfe_start_read(facets->keys, k) {
+ for(k = facets->keys_ll ; k ; k = k->next) {
+ if(!k->key_found_in_row) {
+ // put the FACET_VALUE_UNSET value into it
+ facets_check_value(facets, k);
+ }
+
+ internal_fatal(!k->key_found_in_row, "all keys should be found in the row at this point");
+ internal_fatal(k->key_found_in_row != 1, "all keys should be matched exactly once at this point");
+ internal_fatal(k->key_values_selected_in_row > 1, "key values are selected in row more than once");
+
+ k->key_found_in_row = 1;
+
+ total_keys += k->key_found_in_row;
+ selected_by += (k->key_values_selected_in_row) ? 1 : 0;
+ }
+ // dfe_done(k);
+
+ if(selected_by >= total_keys - 1) {
+ uint32_t found = 0;
+
+ // dfe_start_read(facets->keys, k){
+ for(k = facets->keys_ll ; k ; k = k->next) {
+ uint32_t counted_by = selected_by;
+
+ if (counted_by != total_keys && !k->key_values_selected_in_row)
+ counted_by++;
+
+ if(counted_by == total_keys) {
+ if(k->values) {
+ if(!k->current_value.hash[0])
+ facets_string_hash(buffer_tostring(k->current_value.b), k->current_value.hash);
+
+ FACET_VALUE *v = dictionary_get(k->values, k->current_value.hash);
+ v->final_facet_value_counter++;
+ }
+
+ found++;
+ }
+ }
+ // dfe_done(k);
+
+ internal_fatal(!found, "We should find at least one facet to count this row");
+ (void)found;
+ }
+
+ if(selected_by == total_keys)
+ facets_row_keep(facets, usec);
+
+cleanup:
+ facets_rows_begin(facets);
+}
+
+// ----------------------------------------------------------------------------
+// output
+
+void facets_report(FACETS *facets, BUFFER *wb) {
+ buffer_json_member_add_boolean(wb, "show_ids", false);
+ buffer_json_member_add_boolean(wb, "has_history", true);
+
+ buffer_json_member_add_object(wb, "pagination");
+ buffer_json_member_add_boolean(wb, "enabled", true);
+ buffer_json_member_add_string(wb, "key", "anchor");
+ buffer_json_member_add_string(wb, "column", "timestamp");
+ buffer_json_object_close(wb);
+
+ buffer_json_member_add_array(wb, "accepted_params");
+ {
+ if(facets->accepted_params) {
+ void *t;
+ dfe_start_read(facets->accepted_params, t) {
+ buffer_json_add_array_item_string(wb, t_dfe.name);
+ }
+ dfe_done(t);
+ }
+
+ FACET_KEY *k;
+ dfe_start_read(facets->keys, k) {
+ if(!k->values)
+ continue;
+
+ buffer_json_add_array_item_string(wb, k_dfe.name);
+ }
+ dfe_done(k);
+ }
+ buffer_json_array_close(wb); // accepted_params
+
+ buffer_json_member_add_array(wb, "facets");
+ {
+ FACET_KEY *k;
+ dfe_start_read(facets->keys, k) {
+ if(!k->values)
+ continue;
+
+ buffer_json_add_array_item_object(wb); // key
+ {
+ buffer_json_member_add_string(wb, "id", k_dfe.name);
+ buffer_json_member_add_string(wb, "name", k->name);
+
+ if(!k->order)
+ k->order = facets->order++;
+
+ buffer_json_member_add_uint64(wb, "order", k->order);
+ buffer_json_member_add_array(wb, "options");
+ {
+ FACET_VALUE *v;
+ dfe_start_read(k->values, v) {
+ buffer_json_add_array_item_object(wb);
+ {
+ buffer_json_member_add_string(wb, "id", v_dfe.name);
+ buffer_json_member_add_string(wb, "name", v->name);
+ buffer_json_member_add_uint64(wb, "count", v->final_facet_value_counter);
+ }
+ buffer_json_object_close(wb);
+ }
+ dfe_done(v);
+ }
+ buffer_json_array_close(wb); // options
+ }
+ buffer_json_object_close(wb); // key
+ }
+ dfe_done(k);
+ }
+ buffer_json_array_close(wb); // facets
+
+ buffer_json_member_add_object(wb, "columns");
+ {
+ size_t field_id = 0;
+ buffer_rrdf_table_add_field(
+ wb, field_id++,
+ "timestamp", "Timestamp",
+ RRDF_FIELD_TYPE_TIMESTAMP,
+ RRDF_FIELD_VISUAL_VALUE,
+ RRDF_FIELD_TRANSFORM_DATETIME_USEC, 0, NULL, NAN,
+ RRDF_FIELD_SORT_DESCENDING,
+ NULL,
+ RRDF_FIELD_SUMMARY_COUNT,
+ RRDF_FIELD_FILTER_RANGE,
+ RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_UNIQUE_KEY,
+ NULL);
+
+ FACET_KEY *k;
+ dfe_start_read(facets->keys, k) {
+ RRDF_FIELD_OPTIONS options = RRDF_FIELD_OPTS_NONE;
+ bool visible = k->options & (FACET_KEY_OPTION_VISIBLE|FACET_KEY_OPTION_STICKY);
+
+ if((facets->options & FACETS_OPTION_ALL_FACETS_VISIBLE && k->values))
+ visible = true;
+
+ if(!visible)
+ visible = simple_pattern_matches(facets->visible_keys, k->name);
+
+ if(visible)
+ options |= RRDF_FIELD_OPTS_VISIBLE;
+
+ if(k->options & FACET_KEY_OPTION_MAIN_TEXT)
+ options |= RRDF_FIELD_OPTS_FULL_WIDTH | RRDF_FIELD_OPTS_WRAP;
+
+ buffer_rrdf_table_add_field(
+ wb, field_id++,
+ k_dfe.name, k->name ? k->name : k_dfe.name,
+ RRDF_FIELD_TYPE_STRING,
+ RRDF_FIELD_VISUAL_VALUE,
+ RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
+ RRDF_FIELD_SORT_ASCENDING,
+ NULL,
+ RRDF_FIELD_SUMMARY_COUNT,
+ k->values ? RRDF_FIELD_FILTER_FACET : RRDF_FIELD_FILTER_NONE,
+ options,
+ FACET_VALUE_UNSET);
+ }
+ dfe_done(k);
+ }
+ buffer_json_object_close(wb); // columns
+
+ buffer_json_member_add_array(wb, "data");
+ {
+ for(FACET_ROW *row = facets->base ; row ;row = row->next) {
+ buffer_json_add_array_item_array(wb); // each row
+ buffer_json_add_array_item_uint64(wb, row->usec);
+
+ FACET_KEY *k;
+ dfe_start_read(facets->keys, k)
+ {
+ FACET_ROW_KEY_VALUE *rkv = dictionary_get(row->dict, k->name);
+
+ if(unlikely(k->dynamic.cb)) {
+ if(unlikely(!rkv))
+ rkv = dictionary_set(row->dict, k->name, NULL, sizeof(*rkv));
+
+ k->dynamic.cb(facets, wb, rkv, row, k->dynamic.data);
+ }
+ else
+ buffer_json_add_array_item_string(wb, rkv ? buffer_tostring(rkv->wb) : FACET_VALUE_UNSET);
+ }
+ dfe_done(k);
+ buffer_json_array_close(wb); // each row
+ }
+ }
+ buffer_json_array_close(wb); // data
+
+ buffer_json_member_add_string(wb, "default_sort_column", "timestamp");
+ buffer_json_member_add_array(wb, "default_charts");
+ buffer_json_array_close(wb);
+
+ buffer_json_member_add_object(wb, "items");
+ {
+ buffer_json_member_add_uint64(wb, "evaluated", facets->operations.evaluated);
+ buffer_json_member_add_uint64(wb, "matched", facets->operations.matched);
+ buffer_json_member_add_uint64(wb, "returned", facets->items_to_return);
+ buffer_json_member_add_uint64(wb, "max_to_return", facets->max_items_to_return);
+ buffer_json_member_add_uint64(wb, "before", facets->operations.skips_before);
+ buffer_json_member_add_uint64(wb, "after", facets->operations.skips_after + facets->operations.shifts);
+ }
+ buffer_json_object_close(wb); // items
+
+ buffer_json_member_add_object(wb, "stats");
+ {
+ buffer_json_member_add_uint64(wb, "first", facets->operations.first);
+ buffer_json_member_add_uint64(wb, "forwards", facets->operations.forwards);
+ buffer_json_member_add_uint64(wb, "backwards", facets->operations.backwards);
+ buffer_json_member_add_uint64(wb, "skips_before", facets->operations.skips_before);
+ buffer_json_member_add_uint64(wb, "skips_after", facets->operations.skips_after);
+ buffer_json_member_add_uint64(wb, "prepends", facets->operations.prepends);
+ buffer_json_member_add_uint64(wb, "appends", facets->operations.appends);
+ buffer_json_member_add_uint64(wb, "shifts", facets->operations.shifts);
+ }
+ buffer_json_object_close(wb); // items
+
+}
diff --git a/libnetdata/facets/facets.h b/libnetdata/facets/facets.h
new file mode 100644
index 000000000..c0f7f80c8
--- /dev/null
+++ b/libnetdata/facets/facets.h
@@ -0,0 +1,62 @@
+#ifndef FACETS_H
+#define FACETS_H 1
+
+#include "../libnetdata.h"
+
+typedef enum __attribute__((packed)) {
+ FACET_KEY_OPTION_FACET = (1 << 0), // filterable values
+ FACET_KEY_OPTION_NO_FACET = (1 << 1), // non-filterable value
+ FACET_KEY_OPTION_STICKY = (1 << 2), // should be sticky in the table
+ FACET_KEY_OPTION_VISIBLE = (1 << 3), // should be in the default table
+ FACET_KEY_OPTION_FTS = (1 << 4), // the key is filterable by full text search (FTS)
+ FACET_KEY_OPTION_MAIN_TEXT = (1 << 5), // full width and wrap
+ FACET_KEY_OPTION_REORDER = (1 << 6), // give the key a new order id on first encounter
+} FACET_KEY_OPTIONS;
+
+typedef struct facet_row_key_value {
+ const char *tmp;
+ BUFFER *wb;
+} FACET_ROW_KEY_VALUE;
+
+typedef struct facet_row {
+ usec_t usec;
+ DICTIONARY *dict;
+ struct facet_row *prev, *next;
+} FACET_ROW;
+
+typedef struct facets FACETS;
+typedef struct facet_key FACET_KEY;
+
+#define FACET_STRING_HASH_SIZE 19
+void facets_string_hash(const char *src, char *out);
+
+typedef void (*facets_key_transformer_t)(FACETS *facets __maybe_unused, BUFFER *wb, void *data);
+typedef void (*facet_dynamic_row_t)(FACETS *facets, BUFFER *json_array, FACET_ROW_KEY_VALUE *rkv, FACET_ROW *row, void *data);
+FACET_KEY *facets_register_dynamic_key(FACETS *facets, const char *key, FACET_KEY_OPTIONS options, facet_dynamic_row_t cb, void *data);
+FACET_KEY *facets_register_key_transformation(FACETS *facets, const char *key, FACET_KEY_OPTIONS options, facets_key_transformer_t cb, void *data);
+
+typedef enum __attribute__((packed)) {
+ FACETS_OPTION_ALL_FACETS_VISIBLE = (1 << 0), // all facets, should be visible by default in the table
+ FACETS_OPTION_ALL_KEYS_FTS = (1 << 1), // all keys are searchable by full text search
+} FACETS_OPTIONS;
+
+FACETS *facets_create(uint32_t items_to_return, usec_t anchor, FACETS_OPTIONS options, const char *visible_keys, const char *facet_keys, const char *non_facet_keys);
+void facets_destroy(FACETS *facets);
+
+void facets_accepted_param(FACETS *facets, const char *param);
+
+void facets_rows_begin(FACETS *facets);
+void facets_row_finished(FACETS *facets, usec_t usec);
+
+FACET_KEY *facets_register_key(FACETS *facets, const char *param, FACET_KEY_OPTIONS options);
+void facets_set_query(FACETS *facets, const char *query);
+void facets_set_items(FACETS *facets, uint32_t items);
+void facets_set_anchor(FACETS *facets, usec_t anchor);
+void facets_register_facet_filter(FACETS *facets, const char *key_id, char *value_ids, FACET_KEY_OPTIONS options);
+
+void facets_add_key_value(FACETS *facets, const char *key, const char *value);
+void facets_add_key_value_length(FACETS *facets, const char *key, const char *value, size_t value_len);
+
+void facets_report(FACETS *facets, BUFFER *wb);
+
+#endif
diff --git a/libnetdata/http/http_defs.h b/libnetdata/http/http_defs.h
index 906e3bdfb..ffd1c344b 100644
--- a/libnetdata/http/http_defs.h
+++ b/libnetdata/http/http_defs.h
@@ -17,15 +17,23 @@
#define HTTP_RESP_UNAUTHORIZED 401
#define HTTP_RESP_FORBIDDEN 403
#define HTTP_RESP_NOT_FOUND 404
+#define HTTP_RESP_METHOD_NOT_ALLOWED 405
+#define HTTP_RESP_METHOD_NOT_ALLOWED_STR "Method Not Allowed"
#define HTTP_RESP_CONFLICT 409
#define HTTP_RESP_PRECOND_FAIL 412
#define HTTP_RESP_CONTENT_TOO_LONG 413
// HTTP_CODES 5XX Server Errors
#define HTTP_RESP_INTERNAL_SERVER_ERROR 500
+#define HTTP_RESP_INTERNAL_SERVER_ERROR_STR "Internal Server Error"
#define HTTP_RESP_BACKEND_FETCH_FAILED 503 // 503 is right
#define HTTP_RESP_SERVICE_UNAVAILABLE 503 // 503 is right
#define HTTP_RESP_GATEWAY_TIMEOUT 504
#define HTTP_RESP_BACKEND_RESPONSE_INVALID 591
+#define HTTP_METHOD_GET (1)
+#define HTTP_METHOD_POST (2)
+#define HTTP_METHOD_PUT (3)
+#define HTTP_METHOD_DELETE (4)
+
#endif /* NETDATA_HTTP_DEFS_H */
diff --git a/libnetdata/inlined.h b/libnetdata/inlined.h
index 2697b9a03..eb55f0fe7 100644
--- a/libnetdata/inlined.h
+++ b/libnetdata/inlined.h
@@ -209,7 +209,7 @@ static inline uint64_t str2uint64_hex(const char *src, char **endptr) {
const unsigned char *s = (const unsigned char *)src;
unsigned char c;
- while ((c = hex_value_from_ascii[*s]) != 255) {
+ while ((c = hex_value_from_ascii[toupper(*s)]) != 255) {
num = (num << 4) | c;
s++;
}
@@ -510,6 +510,27 @@ static inline int read_single_signed_number_file(const char *filename, long long
return 0;
}
+static inline int read_single_base64_or_hex_number_file(const char *filename, unsigned long long *result) {
+ char buffer[30 + 1];
+
+ int ret = read_file(filename, buffer, 30);
+ if(unlikely(ret)) {
+ *result = 0;
+ return ret;
+ }
+
+ buffer[30] = '\0';
+
+ if(likely(buffer[0])){
+ *result = str2ull_encoded(buffer);
+ return 0;
+ }
+ else {
+ *result = 0;
+ return -1;
+ }
+}
+
static inline int uuid_memcmp(const uuid_t *uu1, const uuid_t *uu2) {
return memcmp(uu1, uu2, sizeof(uuid_t));
}
diff --git a/libnetdata/libnetdata.c b/libnetdata/libnetdata.c
index 272ba8f17..26582ffe2 100644
--- a/libnetdata/libnetdata.c
+++ b/libnetdata/libnetdata.c
@@ -1628,7 +1628,7 @@ void recursive_config_double_dir_load(const char *user_path, const char *stock_p
// Returns the number of bytes read from the file if file_size is not NULL.
// The actual buffer has an extra byte set to zero (not included in the count).
-char *read_by_filename(char *filename, long *file_size)
+char *read_by_filename(const char *filename, long *file_size)
{
FILE *f = fopen(filename, "r");
if (!f)
@@ -1966,3 +1966,135 @@ int hash256_string(const unsigned char *string, size_t size, char *hash) {
EVP_MD_CTX_destroy(ctx);
return 1;
}
+
+// Returns 1 if an absolute period was requested or 0 if it was a relative period
+bool rrdr_relative_window_to_absolute(time_t *after, time_t *before, time_t *now_ptr, bool unittest_running) {
+ time_t now = now_realtime_sec() - 1;
+
+ if(now_ptr)
+ *now_ptr = now;
+
+ int absolute_period_requested = -1;
+ long long after_requested, before_requested;
+
+ before_requested = *before;
+ after_requested = *after;
+
+ // allow relative for before (smaller than API_RELATIVE_TIME_MAX)
+ if(ABS(before_requested) <= API_RELATIVE_TIME_MAX) {
+ // if the user asked for a positive relative time,
+ // flip it to a negative
+ if(before_requested > 0)
+ before_requested = -before_requested;
+
+ before_requested = now + before_requested;
+ absolute_period_requested = 0;
+ }
+
+ // allow relative for after (smaller than API_RELATIVE_TIME_MAX)
+ if(ABS(after_requested) <= API_RELATIVE_TIME_MAX) {
+ if(after_requested > 0)
+ after_requested = -after_requested;
+
+ // if the user didn't give an after, use the number of points
+ // to give a sane default
+ if(after_requested == 0)
+ after_requested = -600;
+
+ // since the query engine now returns inclusive timestamps
+ // it is awkward to return 6 points when after=-5 is given
+ // so for relative queries we add 1 second, to give
+ // more predictable results to users.
+ after_requested = before_requested + after_requested + 1;
+ absolute_period_requested = 0;
+ }
+
+ if(absolute_period_requested == -1)
+ absolute_period_requested = 1;
+
+ // check if the parameters are flipped
+ if(after_requested > before_requested) {
+ long long t = before_requested;
+ before_requested = after_requested;
+ after_requested = t;
+ }
+
+ // if the query requests future data
+ // shift the query back to be in the present time
+ // (this may also happen because of the rules above)
+ if(before_requested > now) {
+ long long delta = before_requested - now;
+ before_requested -= delta;
+ after_requested -= delta;
+ }
+
+ time_t absolute_minimum_time = now - (10 * 365 * 86400);
+ time_t absolute_maximum_time = now + (1 * 365 * 86400);
+
+ if (after_requested < absolute_minimum_time && !unittest_running)
+ after_requested = absolute_minimum_time;
+
+ if (after_requested > absolute_maximum_time && !unittest_running)
+ after_requested = absolute_maximum_time;
+
+ if (before_requested < absolute_minimum_time && !unittest_running)
+ before_requested = absolute_minimum_time;
+
+ if (before_requested > absolute_maximum_time && !unittest_running)
+ before_requested = absolute_maximum_time;
+
+ *before = before_requested;
+ *after = after_requested;
+
+ return (absolute_period_requested != 1);
+}
+
+int netdata_base64_decode(const char *encoded, char *decoded, size_t decoded_size) {
+ static const unsigned char base64_table[256] = {
+ ['A'] = 0, ['B'] = 1, ['C'] = 2, ['D'] = 3, ['E'] = 4, ['F'] = 5, ['G'] = 6, ['H'] = 7,
+ ['I'] = 8, ['J'] = 9, ['K'] = 10, ['L'] = 11, ['M'] = 12, ['N'] = 13, ['O'] = 14, ['P'] = 15,
+ ['Q'] = 16, ['R'] = 17, ['S'] = 18, ['T'] = 19, ['U'] = 20, ['V'] = 21, ['W'] = 22, ['X'] = 23,
+ ['Y'] = 24, ['Z'] = 25, ['a'] = 26, ['b'] = 27, ['c'] = 28, ['d'] = 29, ['e'] = 30, ['f'] = 31,
+ ['g'] = 32, ['h'] = 33, ['i'] = 34, ['j'] = 35, ['k'] = 36, ['l'] = 37, ['m'] = 38, ['n'] = 39,
+ ['o'] = 40, ['p'] = 41, ['q'] = 42, ['r'] = 43, ['s'] = 44, ['t'] = 45, ['u'] = 46, ['v'] = 47,
+ ['w'] = 48, ['x'] = 49, ['y'] = 50, ['z'] = 51, ['0'] = 52, ['1'] = 53, ['2'] = 54, ['3'] = 55,
+ ['4'] = 56, ['5'] = 57, ['6'] = 58, ['7'] = 59, ['8'] = 60, ['9'] = 61, ['+'] = 62, ['/'] = 63,
+ [0 ... '+' - 1] = 255,
+ ['+' + 1 ... '/' - 1] = 255,
+ ['9' + 1 ... 'A' - 1] = 255,
+ ['Z' + 1 ... 'a' - 1] = 255,
+ ['z' + 1 ... 255] = 255
+ };
+
+ size_t count = 0;
+ unsigned int tmp = 0;
+ int i, bit;
+
+ if (decoded_size < 1)
+ return 0; // Buffer size must be at least 1 for null termination
+
+ for (i = 0, bit = 0; encoded[i]; i++) {
+ unsigned char value = base64_table[(unsigned char)encoded[i]];
+ if (value > 63)
+ return -1; // Invalid character in input
+
+ tmp = tmp << 6 | value;
+ if (++bit == 4) {
+ if (count + 3 >= decoded_size) break; // Stop decoding if buffer is full
+ decoded[count++] = (tmp >> 16) & 0xFF;
+ decoded[count++] = (tmp >> 8) & 0xFF;
+ decoded[count++] = tmp & 0xFF;
+ tmp = 0;
+ bit = 0;
+ }
+ }
+
+ if (bit > 0 && count + 1 < decoded_size) {
+ tmp <<= 6 * (4 - bit);
+ if (bit > 2 && count + 1 < decoded_size) decoded[count++] = (tmp >> 16) & 0xFF;
+ if (bit > 3 && count + 1 < decoded_size) decoded[count++] = (tmp >> 8) & 0xFF;
+ }
+
+ decoded[count] = '\0'; // Null terminate the output string
+ return count;
+}
diff --git a/libnetdata/libnetdata.h b/libnetdata/libnetdata.h
index 8b8c7206e..b8dedf515 100644
--- a/libnetdata/libnetdata.h
+++ b/libnetdata/libnetdata.h
@@ -578,7 +578,7 @@ void recursive_config_double_dir_load(
, void *data
, size_t depth
);
-char *read_by_filename(char *filename, long *file_size);
+char *read_by_filename(const char *filename, long *file_size);
char *find_and_replace(const char *src, const char *find, const char *replace, const char *where);
/* fix for alpine linux */
@@ -836,6 +836,8 @@ extern char *netdata_configured_host_prefix;
#include "yaml.h"
#include "http/http_defs.h"
#include "gorilla/gorilla.h"
+#include "facets/facets.h"
+#include "dyn_conf/dyn_conf.h"
// BEWARE: this exists in alarm-notify.sh
#define DEFAULT_CLOUD_BASE_URL "https://app.netdata.cloud"
@@ -978,6 +980,14 @@ typedef enum {
void timing_action(TIMING_ACTION action, TIMING_STEP step);
int hash256_string(const unsigned char *string, size_t size, char *hash);
+
+extern bool unittest_running;
+#define API_RELATIVE_TIME_MAX (3 * 365 * 86400)
+
+bool rrdr_relative_window_to_absolute(time_t *after, time_t *before, time_t *now_ptr, bool unittest_running);
+
+int netdata_base64_decode(const char *encoded, char *decoded, size_t decoded_size);
+
# ifdef __cplusplus
}
# endif
diff --git a/libnetdata/socket/socket.h b/libnetdata/socket/socket.h
index 8331ecbbe..c4bd47360 100644
--- a/libnetdata/socket/socket.h
+++ b/libnetdata/socket/socket.h
@@ -11,27 +11,27 @@
typedef enum web_client_acl {
WEB_CLIENT_ACL_NONE = (0),
- WEB_CLIENT_ACL_NOCHECK = (0), // Don't check anything - this should work on all channels
- WEB_CLIENT_ACL_DASHBOARD = (1 << 0),
- WEB_CLIENT_ACL_REGISTRY = (1 << 1),
- WEB_CLIENT_ACL_BADGE = (1 << 2),
- WEB_CLIENT_ACL_MGMT = (1 << 3),
- WEB_CLIENT_ACL_STREAMING = (1 << 4),
- WEB_CLIENT_ACL_NETDATACONF = (1 << 5),
- WEB_CLIENT_ACL_SSL_OPTIONAL = (1 << 6),
- WEB_CLIENT_ACL_SSL_FORCE = (1 << 7),
- WEB_CLIENT_ACL_SSL_DEFAULT = (1 << 8),
- WEB_CLIENT_ACL_ACLK = (1 << 9),
- WEB_CLIENT_ACL_WEBRTC = (1 << 10),
- WEB_CLIENT_ACL_BEARER_OPTIONAL = (1 << 11), // allow unprotected access if bearer is not enabled in netdata
- WEB_CLIENT_ACL_BEARER_REQUIRED = (1 << 12), // allow access only if a valid bearer is used
+ WEB_CLIENT_ACL_NOCHECK = (1 << 0), // Don't check anything - this should work on all channels
+ WEB_CLIENT_ACL_DASHBOARD = (1 << 1),
+ WEB_CLIENT_ACL_REGISTRY = (1 << 2),
+ WEB_CLIENT_ACL_BADGE = (1 << 3),
+ WEB_CLIENT_ACL_MGMT = (1 << 4),
+ WEB_CLIENT_ACL_STREAMING = (1 << 5),
+ WEB_CLIENT_ACL_NETDATACONF = (1 << 6),
+ WEB_CLIENT_ACL_SSL_OPTIONAL = (1 << 7),
+ WEB_CLIENT_ACL_SSL_FORCE = (1 << 8),
+ WEB_CLIENT_ACL_SSL_DEFAULT = (1 << 9),
+ WEB_CLIENT_ACL_ACLK = (1 << 10),
+ WEB_CLIENT_ACL_WEBRTC = (1 << 11),
+ WEB_CLIENT_ACL_BEARER_OPTIONAL = (1 << 12), // allow unprotected access if bearer is not enabled in netdata
+ WEB_CLIENT_ACL_BEARER_REQUIRED = (1 << 13), // allow access only if a valid bearer is used
} WEB_CLIENT_ACL;
#define WEB_CLIENT_ACL_DASHBOARD_ACLK_WEBRTC (WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK | WEB_CLIENT_ACL_WEBRTC | WEB_CLIENT_ACL_BEARER_OPTIONAL)
#define WEB_CLIENT_ACL_ACLK_WEBRTC_DASHBOARD_WITH_BEARER (WEB_CLIENT_ACL_DASHBOARD | WEB_CLIENT_ACL_ACLK | WEB_CLIENT_ACL_WEBRTC | WEB_CLIENT_ACL_BEARER_REQUIRED)
#ifdef NETDATA_DEV_MODE
-#define ACL_DEV_OPEN_ACCESS WEB_CLIENT_ACL_DASHBOARD
+#define ACL_DEV_OPEN_ACCESS WEB_CLIENT_ACL_NOCHECK
#else
#define ACL_DEV_OPEN_ACCESS 0
#endif
diff --git a/libnetdata/url/url.c b/libnetdata/url/url.c
index 7a671946a..39366cbe8 100644
--- a/libnetdata/url/url.c
+++ b/libnetdata/url/url.c
@@ -243,7 +243,7 @@ inline bool url_is_request_complete(char *begin, char *end, size_t length, char
if(likely(strncmp(begin, "GET ", 4)) == 0) {
return strstr(end - 4, "\r\n\r\n");
}
- else if(unlikely(strncmp(begin, "POST ", 5) == 0)) {
+ else if(unlikely(strncmp(begin, "POST ", 5) == 0 || strncmp(begin, "PUT ", 4) == 0)) {
char *cl = strstr(begin, "Content-Length: ");
if(!cl) return false;
cl = &cl[16];
diff --git a/libnetdata/worker_utilization/worker_utilization.h b/libnetdata/worker_utilization/worker_utilization.h
index 6745a010b..cc3f82688 100644
--- a/libnetdata/worker_utilization/worker_utilization.h
+++ b/libnetdata/worker_utilization/worker_utilization.h
@@ -7,7 +7,7 @@
#define WORKER_UTILIZATION_MAX_JOB_TYPES 50
-typedef enum {
+typedef enum __attribute__((packed)) {
WORKER_METRIC_EMPTY = 0,
WORKER_METRIC_IDLE_BUSY = 1,
WORKER_METRIC_ABSOLUTE = 2,