diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-07-24 09:54:23 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-07-24 09:54:44 +0000 |
commit | 836b47cb7e99a977c5a23b059ca1d0b5065d310e (patch) | |
tree | 1604da8f482d02effa033c94a84be42bc0c848c3 /src/daemon/config | |
parent | Releasing debian version 1.44.3-2. (diff) | |
download | netdata-836b47cb7e99a977c5a23b059ca1d0b5065d310e.tar.xz netdata-836b47cb7e99a977c5a23b059ca1d0b5065d310e.zip |
Merging upstream version 1.46.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/daemon/config')
-rw-r--r-- | src/daemon/config/README.md | 231 | ||||
-rw-r--r-- | src/daemon/config/dyncfg-echo.c | 175 | ||||
-rw-r--r-- | src/daemon/config/dyncfg-files.c | 264 | ||||
-rw-r--r-- | src/daemon/config/dyncfg-inline.c | 66 | ||||
-rw-r--r-- | src/daemon/config/dyncfg-intercept.c | 429 | ||||
-rw-r--r-- | src/daemon/config/dyncfg-internals.h | 145 | ||||
-rw-r--r-- | src/daemon/config/dyncfg-tree.c | 292 | ||||
-rw-r--r-- | src/daemon/config/dyncfg-unittest.c | 799 | ||||
-rw-r--r-- | src/daemon/config/dyncfg.c | 454 | ||||
-rw-r--r-- | src/daemon/config/dyncfg.h | 34 |
10 files changed, 2889 insertions, 0 deletions
diff --git a/src/daemon/config/README.md b/src/daemon/config/README.md new file mode 100644 index 000000000..c59f55620 --- /dev/null +++ b/src/daemon/config/README.md @@ -0,0 +1,231 @@ +<!-- +title: "Daemon configuration" +description: "The Netdata Agent's daemon is installed preconfigured to collect thousands of metrics every second, but is highly configurable for real-world workloads." +custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/daemon/config/README.md" +sidebar_label: "Daemon" +learn_status: "Published" +learn_rel_path: "Configuration" +learn_doc_purpose: "Explain the daemon options, the log files, the process scheduling, virtual memory, explain how the netdata.conf is used and backlink to the netdata.conf file reference" +--> + +# Daemon configuration + +<details> +<summary>The daemon configuration file is read from /etc/netdata/netdata.conf.</summary> + +Depending on your installation method, Netdata will have been installed either directly under `/`, or +under `/opt/netdata`. The paths mentioned here and in the documentation in general assume that your installation is +under `/`. If it is not, you will find the exact same paths under `/opt/netdata` as well. (i.e. `/etc/netdata` will +be `/opt/netdata/etc/netdata`). + +</details> + +This config file **is not needed by default**. Netdata works fine out of the box without it. But it does allow you to +adapt the general behavior of Netdata, in great detail. You can find all these settings, with their default values, by +accessing the URL `https://netdata.server.hostname:19999/netdata.conf`. For example check the configuration file +of [netdata.firehol.org](http://netdata.firehol.org/netdata.conf). HTTP access to this file is limited by default to +[private IPs](https://en.wikipedia.org/wiki/Private_network), via +the [web server access lists](/src/web/server/README.md#access-lists). + +`netdata.conf` has sections stated with `[section]`. You will see the following sections: + +1. `[global]` to [configure](#global-section-options) the [Netdata daemon](/src/daemon/README.md). +2. `[db]` to [configure](#db-section-options) the database of Netdata. +3. `[directories]` to [configure](#directories-section-options) the directories used by Netdata. +4. `[logs]` to [configure](#logs-section-options) the Netdata logging. +5. `[environment variables]` to [configure](#environment-variables-section-options) the environment variables used + Netdata. +6. `[sqlite]` to [configure](#sqlite-section-options) the [Netdata daemon](/src/daemon/README.md) SQLite settings. +7. `[ml]` to configure settings for [machine learning](/src/ml/README.md). +8. `[health]` to [configure](#health-section-options) general settings for [health monitoring](/src/health/README.md). +9. `[web]` to [configure the web server](/src/web/server/README.md). +10. `[registry]` for the [Netdata registry](/src/registry/README.md). +11. `[global statistics]` for the [Netdata registry](/src/registry/README.md). +12. `[statsd]` for the general settings of the [stats.d.plugin](/src/collectors/statsd.plugin/README.md). +13. `[plugins]` to [configure](#plugins-section-options) which [collectors](/src/collectors/README.md) to use and PATH + settings. +14. `[plugin:NAME]` sections for each collector plugin, under the + comment [Per plugin configuration](#per-plugin-configuration). + +The configuration file is a `name = value` dictionary. Netdata will not complain if you set options unknown to it. When +you check the running configuration by accessing the URL `/netdata.conf` on your Netdata server, Netdata will add a +comment on settings it does not currently use. + +## Applying changes + +After `netdata.conf` has been modified, Netdata needs to be [restarted](/packaging/installer/README.md#maintaining-a-netdata-agent-installation) for +changes to apply: + +```bash +sudo systemctl restart netdata +``` + +If the above does not work, try the following: + +```bash +sudo killall netdata; sleep 10; sudo netdata +``` + +Please note that your data history will be lost if you have modified `history` parameter in section `[global]`. + +## Sections + +### [global] section options + +| setting | default | info | +|:----------------------------------:|:-------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| process scheduling policy | `keep` | See [Netdata process scheduling policy](/src/daemon/README.md#netdata-process-scheduling-policy) | +| OOM score | `0` | | +| glibc malloc arena max for plugins | `1` | See [Virtual memory](/src/daemon/README.md#virtual-memory). | +| glibc malloc arena max for Netdata | `1` | See [Virtual memory](/src/daemon/README.md#virtual-memory). | +| hostname | auto-detected | The hostname of the computer running Netdata. | +| host access prefix | empty | This is used in docker environments where /proc, /sys, etc have to be accessed via another path. You may also have to set SYS_PTRACE capability on the docker for this work. Check [issue 43](https://github.com/netdata/netdata/issues/43). | +| timezone | auto-detected | The timezone retrieved from the environment variable | +| run as user | `netdata` | The user Netdata will run as. | +| pthread stack size | auto-detected | | + +### [db] section options + +| setting | default | info | +|:---------------------------------------------:|:----------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| mode | `dbengine` | `dbengine`: The default for long-term metrics storage with efficient RAM and disk usage. Can be extended with `dbengine page cache size MB` and `dbengine disk space MB`. <br />`ram`: The round-robin database will be temporary and it will be lost when Netdata exits. <br />`alloc`: Similar to `ram`, but can significantly reduce memory usage, when combined with a low retention and does not support KSM. <br />`none`: Disables the database at this host, and disables health monitoring entirely, as that requires a database of metrics. Not to be used together with streaming. | +| retention | `3600` | Used with `mode = ram/alloc`, not the default `mode = dbengine`. This number reflects the number of entries the `netdata` daemon will by default keep in memory for each chart dimension. Check [Memory Requirements](/src/database/README.md) for more information. | +| storage tiers | `3` | The number of storage tiers you want to have in your dbengine. Check the tiering mechanism in the [dbengine's reference](/src/database/engine/README.md#tiering). You can have up to 5 tiers of data (including the _Tier 0_). This number ranges between 1 and 5. | +| dbengine page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated to caching for _Tier 0_ Netdata metric values. | +| dbengine tier **`N`** page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated for caching Netdata metric values of the **`N`** tier. <br /> `N belongs to [1..4]` | +| dbengine disk space MB | `256` | Determines the amount of disk space in MiB that is dedicated to storing _Tier 0_ Netdata metric values and all related metadata describing them. This option is available **only for legacy configuration** (`Agent v1.23.2 and prior`). | +| dbengine multihost disk space MB | `256` | Same functionality as `dbengine disk space MB`, but includes support for storing metrics streamed to a parent node by its children. Can be used in single-node environments as well. This setting is only for _Tier 0_ metrics. | +| dbengine tier **`N`** multihost disk space MB | `256` | Same functionality as `dbengine multihost disk space MB`, but stores metrics of the **`N`** tier (both parent node and its children). Can be used in single-node environments as well. <br /> `N belongs to [1..4]` | +| update every | `1` | The frequency in seconds, for data collection. For more information see the [performance guide](/docs/netdata-agent/configuration/optimize-the-netdata-agents-performance.md). These metrics stored as _Tier 0_ data. Explore the tiering mechanism in the [dbengine's reference](/src/database/engine/README.md#tiering). | +| dbengine tier **`N`** update every iterations | `60` | The down sampling value of each tier from the previous one. For each Tier, the greater by one Tier has N (equal to 60 by default) less data points of any metric it collects. This setting can take values from `2` up to `255`. <br /> `N belongs to [1..4]` | +| dbengine tier **`N`** back fill | `New` | Specifies the strategy of recreating missing data on each Tier from the exact lower Tier. <br /> `New`: Sees the latest point on each Tier and save new points to it only if the exact lower Tier has available points for it's observation window (`dbengine tier N update every iterations` window). <br /> `none`: No back filling is applied. <br /> `N belongs to [1..4]` | +| memory deduplication (ksm) | `yes` | When set to `yes`, Netdata will offer its in-memory round robin database and the dbengine page cache to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](/src/database/README.md#ksm) | +| cleanup obsolete charts after secs | `3600` | See [monitoring ephemeral containers](/src/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions | +| gap when lost iterations above | `1` | | +| cleanup orphan hosts after secs | `3600` | How long to wait until automatically removing from the DB a remote Netdata host (child) that is no longer sending data. | +| enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero. | + +> ### Info +> +>The multiplication of all the **enabled** tiers `dbengine tier N update every iterations` values must be less than `65535`. + +### [directories] section options + +| setting | default | info | +|:-------------------:|:------------------------------------------------------------------:|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| config | `/etc/netdata` | The directory configuration files are kept. | +| stock config | `/usr/lib/netdata/conf.d` | | +| log | `/var/log/netdata` | The directory in which the [log files](/src/daemon/README.md#log-files) are kept. | +| web | `/usr/share/netdata/web` | The directory the web static files are kept. | +| cache | `/var/cache/netdata` | The directory the memory database will be stored if and when Netdata exits. Netdata will re-read the database when it will start again, to continue from the same point. | +| lib | `/var/lib/netdata` | Contains the alert log and the Netdata instance GUID. | +| home | `/var/cache/netdata` | Contains the db files for the collected metrics. | +| lock | `/var/lib/netdata/lock` | Contains the data collectors lock files. | +| plugins | `"/usr/libexec/netdata/plugins.d" "/etc/netdata/custom-plugins.d"` | The directory plugin programs are kept. This setting supports multiple directories, space separated. If any directory path contains spaces, enclose it in single or double quotes. | +| health config | `/etc/netdata/health.d` | The directory containing the user alert configuration files, to override the stock configurations | +| stock health config | `/usr/lib/netdata/conf.d/health.d` | Contains the stock alert configuration files for each collector | +| registry | `/opt/netdata/var/lib/netdata/registry` | Contains the [registry](/src/registry/README.md) database and GUID that uniquely identifies each Netdata Agent | + +### [logs] section options + +| setting | default | info | +|:----------------------------------:|:-----------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| debug flags | `0x0000000000000000` | Bitmap of debug options to enable. For more information check [Tracing Options](/src/daemon/README.md#debugging). | +| debug | `/var/log/netdata/debug.log` | The filename to save debug information. This file will not be created if debugging is not enabled. You can also set it to `syslog` to send the debug messages to syslog, or `none` to disable this log. For more information check [Tracing Options](/src/daemon/README.md#debugging). | +| error | `/var/log/netdata/error.log` | The filename to save error messages for Netdata daemon and all plugins (`stderr` is sent here for all Netdata programs, including the plugins). You can also set it to `syslog` to send the errors to syslog, or `none` to disable this log. | +| access | `/var/log/netdata/access.log` | The filename to save the log of web clients accessing Netdata charts. You can also set it to `syslog` to send the access log to syslog, or `none` to disable this log. | +| facility | `daemon` | A facility keyword is used to specify the type of system that is logging the message. | +| errors flood protection period | `1200` | Length of period (in sec) during which the number of errors should not exceed the `errors to trigger flood protection`. | +| errors to trigger flood protection | `200` | Number of errors written to the log in `errors flood protection period` sec before flood protection is activated. | +| severity level | `info` | Controls which log messages are logged, with error being the most important. Supported values: `info` and `error`. | + +### [environment variables] section options + +| setting | default | info | +|:----------:|:-----------------:|:-----------------------------------------------------------| +| TZ | `:/etc/localtime` | Where to find the timezone | +| PATH | `auto-detected` | Specifies the directories to be searched to find a command | +| PYTHONPATH | | Used to set a custom python path | + +### [sqlite] section options + +| setting | default | info | +|:------------------:|:-------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| auto vacuum | `INCREMENTAL` | The [auto-vacuum status](https://www.sqlite.org/pragma.html#pragma_auto_vacuum) in the database | +| synchronous | `NORMAL` | The setting of the ["synchronous"](https://www.sqlite.org/pragma.html#pragma_synchronous) flag | +| journal mode | `WAL` | The [journal mode](https://www.sqlite.org/pragma.html#pragma_journal_mode) for databases | +| temp store | `MEMORY` | Used to determine where [temporary tables and indices are stored](https://www.sqlite.org/pragma.html#pragma_temp_store) | +| journal size limit | `16777216` | Used to set a new [limit in bytes for the database](https://www.sqlite.org/pragma.html#pragma_journal_size_limit) | +| cache size | `-2000` | Used to [suggest the maximum number of database disk pages](https://www.sqlite.org/pragma.html#pragma_cache_size) that SQLite will hold in memory at once per open database file | + +### [health] section options + +This section controls the general behavior of the health monitoring capabilities of Netdata. + +Specific alerts are configured in per-collector config files under the `health.d` directory. For more info, see [health +monitoring](/src/health/README.md). + +[Alert notifications](/src/health/notifications/README.md) are configured in `health_alarm_notify.conf`. + +| setting | default | info | +|:----------------------------------------------:|:------------------------------------------------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| enabled | `yes` | Set to `no` to disable all alerts and notifications | +| in memory max health log entries | 1000 | Size of the alert history held in RAM | +| script to execute on alarm | `/usr/libexec/netdata/plugins.d/alarm-notify.sh` | The script that sends alert notifications. Note that in versions before 1.16, the plugins.d directory may be installed in a different location in certain OSs (e.g. under `/usr/lib/netdata`). | +| run at least every seconds | `10` | Controls how often all alert conditions should be evaluated. | +| postpone alarms during hibernation for seconds | `60` | Prevents false alerts. May need to be increased if you get alerts during hibernation. | +| health log history | `432000` | Specifies the history of alert events (in seconds) kept in the agent's sqlite database. | +| enabled alarms | * | Defines which alerts to load from both user and stock directories. This is a [simple pattern](/src/libnetdata/simple_pattern/README.md) list of alert or template names. Can be used to disable specific alerts. For example, `enabled alarms = !oom_kill *` will load all alerts except `oom_kill`. | + +### [web] section options + +Refer to the [web server documentation](/src/web/server/README.md) + +### [plugins] section options + +In this section you will see be a boolean (`yes`/`no`) option for each plugin (e.g. tc, cgroups, apps, proc etc.). Note +that the configuration options in this section for the orchestrator plugins `python.d` and `charts.d` control **all the +modules** written for that orchestrator. For instance, setting `python.d = no` means that all Python modules +under `collectors/python.d.plugin` will be disabled. + +Additionally, there will be the following options: + +| setting | default | info | +|:-------------------------------:|:---------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| enable running new plugins | `yes` | When set to `yes`, Netdata will enable detected plugins, even if they are not configured explicitly. Setting this to `no` will only enable plugins explicitly configured in this file with a `yes` | +| check for new plugins every | 60 | The time in seconds to check for new plugins in the plugins directory. This allows having other applications dynamically creating plugins for Netdata. | +| checks | `no` | This is a debugging plugin for the internal latency | + +### [registry] section options + +To understand what this section is and how it should be configured, please refer to +the [registry documentation](/src/registry/README.md). + +## Per-plugin configuration + +The configuration options for plugins appear in sections following the pattern `[plugin:NAME]`. + +### Internal plugins + +Most internal plugins will provide additional options. Check [Internal Plugins](/src/collectors/README.md) for more +information. + +Please note, that by default Netdata will enable monitoring metrics for disks, memory, and network only when they are +not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, +will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them +to appear though). Use `yes` instead of `auto` in plugin configuration sections to enable these charts permanently. You +can also set the `enable zero metrics` option to `yes` in the `[global]` section which enables charts with zero metrics +for all internal Netdata plugins. + +### External plugins + +External plugins will have only 2 options at `netdata.conf`: + +| setting | default | info | +|:---------------:|:--------------------------------------------:|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| update every | the value of `[global].update every` setting | The frequency in seconds the plugin should collect values. For more information check the [performance guide](/docs/netdata-agent/configuration/optimize-the-netdata-agents-performance.md). | +| command options | - | Additional command line options to pass to the plugin. | + +External plugins that need additional configuration may support a dedicated file in `/etc/netdata`. Check their +documentation. + diff --git a/src/daemon/config/dyncfg-echo.c b/src/daemon/config/dyncfg-echo.c new file mode 100644 index 000000000..95d40a025 --- /dev/null +++ b/src/daemon/config/dyncfg-echo.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "dyncfg-internals.h" +#include "dyncfg.h" + +// ---------------------------------------------------------------------------- +// echo is when we send requests to plugins without any caller +// it is used for: +// 1. the first enable/disable requests we send, and also +// 2. updates to stock or user configurations +// 3. saved dynamic jobs we need to add to templates + +struct dyncfg_echo { + const DICTIONARY_ITEM *item; + DYNCFG *df; // for additions this is the job, not the template + BUFFER *wb; + DYNCFG_CMDS cmd; + const char *cmd_str; +}; + +void dyncfg_echo_cb(BUFFER *wb __maybe_unused, int code __maybe_unused, void *result_cb_data) { + struct dyncfg_echo *e = result_cb_data; + DYNCFG *df = e->df; + + if(DYNCFG_RESP_SUCCESS(code)) { + // successful response + + if(e->cmd == DYNCFG_CMD_ADD) { + df->dyncfg.status = dyncfg_status_from_successful_response(code); + dyncfg_update_status_on_successful_add_or_update(df, code); + } + else if(e->cmd == DYNCFG_CMD_UPDATE) { + df->dyncfg.status = dyncfg_status_from_successful_response(code); + dyncfg_update_status_on_successful_add_or_update(df, code); + } + else if(e->cmd == DYNCFG_CMD_DISABLE) + df->dyncfg.status = df->current.status = DYNCFG_STATUS_DISABLED; + else if(e->cmd == DYNCFG_CMD_ENABLE) + df->dyncfg.status = df->current.status = dyncfg_status_from_successful_response(code); + } + else { + // failed response + + nd_log(NDLS_DAEMON, NDLP_ERR, + "DYNCFG: received response code %d on request to id '%s', cmd: %s", + code, dictionary_acquired_item_name(e->item), e->cmd_str); + + if(e->cmd == DYNCFG_CMD_UPDATE || e->cmd == DYNCFG_CMD_ADD) + e->df->dyncfg.plugin_rejected = true; + } + + buffer_free(e->wb); + dictionary_acquired_item_release(dyncfg_globals.nodes, e->item); + + e->wb = NULL; + e->df = NULL; + e->item = NULL; + freez((void *)e->cmd_str); + e->cmd_str = NULL; + freez(e); +} + +// ---------------------------------------------------------------------------- + +void dyncfg_echo(const DICTIONARY_ITEM *item, DYNCFG *df, const char *id __maybe_unused, DYNCFG_CMDS cmd) { + RRDHOST *host = dyncfg_rrdhost(df); + if(!host) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: cannot find host of configuration id '%s'", id); + return; + } + + if(!(df->cmds & cmd)) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: attempted to echo a cmd that is not supported"); + return; + } + + const char *cmd_str = dyncfg_id2cmd_one(cmd); + if(!cmd_str) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: command given does not resolve to a known command"); + return; + } + + struct dyncfg_echo *e = callocz(1, sizeof(struct dyncfg_echo)); + e->item = dictionary_acquired_item_dup(dyncfg_globals.nodes, item); + e->wb = buffer_create(0, NULL); + e->df = df; + e->cmd = cmd; + e->cmd_str = strdupz(cmd_str); + + char buf[string_strlen(df->function) + strlen(e->cmd_str) + 20]; + snprintfz(buf, sizeof(buf), "%s %s", string2str(df->function), e->cmd_str); + + rrd_function_run( + host, e->wb, 10, + HTTP_ACCESS_ALL, buf, false, NULL, + dyncfg_echo_cb, e, + NULL, NULL, + NULL, NULL, + NULL, string2str(df->dyncfg.source)); +} + +// ---------------------------------------------------------------------------- + +void dyncfg_echo_update(const DICTIONARY_ITEM *item, DYNCFG *df, const char *id) { + RRDHOST *host = dyncfg_rrdhost(df); + if(!host) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: cannot find host of configuration id '%s'", id); + return; + } + + if(!df->dyncfg.payload) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: requested to send an update to '%s', but there is no payload", id); + return; + } + + struct dyncfg_echo *e = callocz(1, sizeof(struct dyncfg_echo)); + e->item = dictionary_acquired_item_dup(dyncfg_globals.nodes, item); + e->wb = buffer_create(0, NULL); + e->df = df; + e->cmd = DYNCFG_CMD_UPDATE; + e->cmd_str = strdupz("update"); + + char buf[string_strlen(df->function) + strlen(e->cmd_str) + 20]; + snprintfz(buf, sizeof(buf), "%s %s", string2str(df->function), e->cmd_str); + + rrd_function_run( + host, e->wb, 10, + HTTP_ACCESS_ALL, buf, false, NULL, + dyncfg_echo_cb, e, + NULL, NULL, + NULL, NULL, + df->dyncfg.payload, string2str(df->dyncfg.source)); +} + +// ---------------------------------------------------------------------------- + +static void dyncfg_echo_payload_add(const DICTIONARY_ITEM *item_template __maybe_unused, const DICTIONARY_ITEM *item_job, DYNCFG *df_template, DYNCFG *df_job, const char *id_template, const char *cmd) { + RRDHOST *host = dyncfg_rrdhost(df_template); + if(!host) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: cannot find host of configuration id '%s'", id_template); + return; + } + + if(!df_job->dyncfg.payload) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "DYNCFG: requested to send a '%s' to '%s', but there is no payload", + cmd, id_template); + return; + } + + struct dyncfg_echo *e = callocz(1, sizeof(struct dyncfg_echo)); + e->item = dictionary_acquired_item_dup(dyncfg_globals.nodes, item_job); + e->wb = buffer_create(0, NULL); + e->df = df_job; + e->cmd = DYNCFG_CMD_ADD; + e->cmd_str = strdupz(cmd); + + char buf[string_strlen(df_template->function) + strlen(cmd) + 20]; + snprintfz(buf, sizeof(buf), "%s %s", string2str(df_template->function), cmd); + + rrd_function_run( + host, e->wb, 10, + HTTP_ACCESS_ALL, buf, false, NULL, + dyncfg_echo_cb, e, + NULL, NULL, + NULL, NULL, + df_job->dyncfg.payload, string2str(df_job->dyncfg.source)); +} + +void dyncfg_echo_add(const DICTIONARY_ITEM *item_template, const DICTIONARY_ITEM *item_job, DYNCFG *df_template, DYNCFG *df_job, const char *template_id, const char *job_name) { + char buf[strlen(job_name) + 20]; + snprintfz(buf, sizeof(buf), "add %s", job_name); + dyncfg_echo_payload_add(item_template, item_job, df_template, df_job, template_id, buf); +} + diff --git a/src/daemon/config/dyncfg-files.c b/src/daemon/config/dyncfg-files.c new file mode 100644 index 000000000..81b56918f --- /dev/null +++ b/src/daemon/config/dyncfg-files.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "dyncfg-internals.h" +#include "dyncfg.h" + +void dyncfg_file_delete(const char *id) { + CLEAN_CHAR_P *escaped_id = dyncfg_escape_id_for_filename(id); + char filename[FILENAME_MAX]; + snprintfz(filename, sizeof(filename), "%s/%s.dyncfg", dyncfg_globals.dir, escaped_id); + unlink(filename); +} + +void dyncfg_file_save(const char *id, DYNCFG *df) { + CLEAN_CHAR_P *escaped_id = dyncfg_escape_id_for_filename(id); + char filename[FILENAME_MAX]; + snprintfz(filename, sizeof(filename), "%s/%s.dyncfg", dyncfg_globals.dir, escaped_id); + + FILE *fp = fopen(filename, "w"); + if(!fp) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: cannot create file '%s'", filename); + return; + } + + df->dyncfg.modified_ut = now_realtime_usec(); + if(!df->dyncfg.created_ut) + df->dyncfg.created_ut = df->dyncfg.modified_ut; + + fprintf(fp, "version=%zu\n", DYNCFG_VERSION); + fprintf(fp, "id=%s\n", id); + + if(df->template) + fprintf(fp, "template=%s\n", string2str(df->template)); + + char uuid_str[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(df->host_uuid.uuid, uuid_str); + fprintf(fp, "host=%s\n", uuid_str); + + fprintf(fp, "path=%s\n", string2str(df->path)); + fprintf(fp, "type=%s\n", dyncfg_id2type(df->type)); + + fprintf(fp, "source_type=%s\n", dyncfg_id2source_type(df->dyncfg.source_type)); + fprintf(fp, "source=%s\n", string2str(df->dyncfg.source)); + + fprintf(fp, "created=%"PRIu64"\n", df->dyncfg.created_ut); + fprintf(fp, "modified=%"PRIu64"\n", df->dyncfg.modified_ut); + fprintf(fp, "sync=%s\n", df->sync ? "true" : "false"); + fprintf(fp, "user_disabled=%s\n", df->dyncfg.user_disabled ? "true" : "false"); + fprintf(fp, "saves=%"PRIu32"\n", ++df->dyncfg.saves); + + fprintf(fp, "cmds="); + dyncfg_cmds2fp(df->cmds, fp); + fprintf(fp, "\n"); + + if(df->dyncfg.payload && buffer_strlen(df->dyncfg.payload) > 0) { + fprintf(fp, "content_type=%s\n", content_type_id2string(df->dyncfg.payload->content_type)); + fprintf(fp, "content_length=%zu\n", buffer_strlen(df->dyncfg.payload)); + fprintf(fp, "---\n"); + fwrite(buffer_tostring(df->dyncfg.payload), 1, buffer_strlen(df->dyncfg.payload), fp); + } + + fclose(fp); +} + +void dyncfg_file_load(const char *d_name) { + char filename[PATH_MAX]; + snprintf(filename, sizeof(filename), "%s/%s", dyncfg_globals.dir, d_name); + + FILE *fp = fopen(filename, "r"); + if (!fp) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: cannot open file '%s'", filename); + return; + } + + DYNCFG tmp = { 0 }; + + char line[PLUGINSD_LINE_MAX]; + CLEAN_CHAR_P *id = NULL; + + HTTP_CONTENT_TYPE content_type = CT_NONE; + size_t content_length = 0; + bool read_payload = false; + + while (fgets(line, sizeof(line), fp)) { + if(strcmp(line, "---\n") == 0) { + read_payload = true; + break; + } + + char *value = strchr(line, '='); + if(!value) continue; + + *value++ = '\0'; + + value = trim(value); + if(!value) continue; + + char *key = trim(line); + if(!key) continue; + + // Parse key-value pairs + if (strcmp(key, "version") == 0) { + size_t version = strtoull(value, NULL, 10); + + if(version > DYNCFG_VERSION) + nd_log(NDLS_DAEMON, NDLP_NOTICE, + "DYNCFG: configuration file '%s' has version %zu, which is newer than our version %zu", + filename, version, DYNCFG_VERSION); + + } else if (strcmp(key, "id") == 0) { + freez(id); + id = strdupz(value); + } else if (strcmp(key, "template") == 0) { + tmp.template = string_strdupz(value); + } else if (strcmp(key, "host") == 0) { + uuid_parse_flexi(value, tmp.host_uuid.uuid); + } else if (strcmp(key, "path") == 0) { + tmp.path = string_strdupz(value); + } else if (strcmp(key, "type") == 0) { + tmp.type = dyncfg_type2id(value); + } else if (strcmp(key, "source_type") == 0) { + tmp.dyncfg.source_type = dyncfg_source_type2id(value); + } else if (strcmp(key, "source") == 0) { + tmp.dyncfg.source = string_strdupz(value); + } else if (strcmp(key, "created") == 0) { + tmp.dyncfg.created_ut = strtoull(value, NULL, 10); + } else if (strcmp(key, "modified") == 0) { + tmp.dyncfg.modified_ut = strtoull(value, NULL, 10); + } else if (strcmp(key, "sync") == 0) { + tmp.sync = (strcmp(value, "true") == 0); + } else if (strcmp(key, "user_disabled") == 0) { + tmp.dyncfg.user_disabled = (strcmp(value, "true") == 0); + } else if (strcmp(key, "saves") == 0) { + tmp.dyncfg.saves = strtoull(value, NULL, 10); + } else if (strcmp(key, "content_type") == 0) { + content_type = content_type_string2id(value); + } else if (strcmp(key, "content_length") == 0) { + content_length = strtoull(value, NULL, 10); + } else if (strcmp(key, "cmds") == 0) { + tmp.cmds = dyncfg_cmds2id(value); + } + } + + if (read_payload) { + // Determine the actual size of the remaining file content + long saved_position = ftell(fp); // Save current position + fseek(fp, 0, SEEK_END); + long total_size = ftell(fp); // Total size of the file + size_t actual_size = total_size - saved_position; // Calculate remaining content size + fseek(fp, saved_position, SEEK_SET); // Reset file pointer to the beginning of the payload + + // Use actual_size instead of content_length to handle the whole remaining file + tmp.dyncfg.payload = buffer_create(actual_size, NULL); + tmp.dyncfg.payload->content_type = content_type; + + buffer_need_bytes(tmp.dyncfg.payload, actual_size); + tmp.dyncfg.payload->len = fread(tmp.dyncfg.payload->buffer, 1, actual_size, fp); + + if (content_length != tmp.dyncfg.payload->len) { + nd_log(NDLS_DAEMON, NDLP_WARNING, + "DYNCFG: content_length %zu does not match actual payload size %zu for file '%s'", + content_length, actual_size, filename); + } + } + + fclose(fp); + + if(!id) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "DYNCFG: configuration file '%s' does not include a unique id. Ignoring it.", + filename); + + dyncfg_cleanup(&tmp); + return; + } + + tmp.dyncfg.status = DYNCFG_STATUS_ORPHAN; + tmp.dyncfg.restart_required = false; + + dyncfg_set_current_from_dyncfg(&tmp); + + dictionary_set(dyncfg_globals.nodes, id, &tmp, sizeof(tmp)); + + // check if we need to rename the file + CLEAN_CHAR_P *fixed_id = dyncfg_escape_id_for_filename(id); + char fixed_filename[PATH_MAX]; + snprintf(fixed_filename, sizeof(fixed_filename), "%s/%s.dyncfg", dyncfg_globals.dir, fixed_id); + + if(strcmp(filename, fixed_filename) != 0) { + if(rename(filename, fixed_filename) != 0) + nd_log(NDLS_DAEMON, NDLP_ERR, + "DYNCFG: cannot rename file '%s' into '%s'. Saving a new configuraton may not overwrite the old one.", + filename, fixed_filename); + } +} + +void dyncfg_load_all(void) { + DIR *dir = opendir(dyncfg_globals.dir); + if (!dir) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: cannot open directory '%s'", dyncfg_globals.dir); + return; + } + + struct dirent *entry; + while ((entry = readdir(dir)) != NULL) { + if ((entry->d_type == DT_REG || entry->d_type == DT_LNK) && strendswith(entry->d_name, ".dyncfg")) + dyncfg_file_load(entry->d_name); + } + + closedir(dir); +} + +// ---------------------------------------------------------------------------- +// schemas loading + +static bool dyncfg_read_file_to_buffer(const char *filename, BUFFER *dst) { + int fd = open(filename, O_RDONLY | O_CLOEXEC, 0666); + if(unlikely(fd == -1)) + return false; + + struct stat st = { 0 }; + if(fstat(fd, &st) != 0) { + close(fd); + return false; + } + + buffer_flush(dst); + buffer_need_bytes(dst, st.st_size + 1); // +1 for the terminating zero + + ssize_t r = read(fd, (char*)dst->buffer, st.st_size); + if(unlikely(r == -1)) { + close(fd); + return false; + } + dst->len = r; + dst->buffer[dst->len] = '\0'; + + close(fd); + return true; +} + +static bool dyncfg_get_schema_from(const char *dir, const char *id, BUFFER *dst) { + char filename[FILENAME_MAX + 1]; + + CLEAN_CHAR_P *escaped_id = dyncfg_escape_id_for_filename(id); + snprintfz(filename, sizeof(filename), "%s/schema.d/%s.json", dir, escaped_id); + if(dyncfg_read_file_to_buffer(filename, dst)) + return true; + + snprintfz(filename, sizeof(filename), "%s/schema.d/%s.json", dir, id); + if(dyncfg_read_file_to_buffer(filename, dst)) + return true; + + return false; +} + +bool dyncfg_get_schema(const char *id, BUFFER *dst) { + if(dyncfg_get_schema_from(netdata_configured_user_config_dir, id, dst)) + return true; + + if(dyncfg_get_schema_from(netdata_configured_stock_config_dir, id, dst)) + return true; + + return false; +} diff --git a/src/daemon/config/dyncfg-inline.c b/src/daemon/config/dyncfg-inline.c new file mode 100644 index 000000000..bed912e57 --- /dev/null +++ b/src/daemon/config/dyncfg-inline.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "dyncfg.h" + +static DICTIONARY *dyncfg_nodes = NULL; + +static int dyncfg_inline_callback(struct rrd_function_execute *rfe, void *data __maybe_unused) { + char tr[UUID_COMPACT_STR_LEN]; + uuid_unparse_lower_compact(*rfe->transaction, tr); + + bool cancelled = rfe->is_cancelled.cb ? rfe->is_cancelled.cb(rfe->is_cancelled.data) : false; + + int code; + if(cancelled) + code = HTTP_RESP_CLIENT_CLOSED_REQUEST; + else + code = dyncfg_node_find_and_call(dyncfg_nodes, tr, rfe->function, rfe->stop_monotonic_ut, &cancelled, + rfe->payload, rfe->user_access, rfe->source, rfe->result.wb); + + if(code == HTTP_RESP_CLIENT_CLOSED_REQUEST || (rfe->is_cancelled.cb && rfe->is_cancelled.cb(rfe->is_cancelled.data))) { + buffer_flush(rfe->result.wb); + code = HTTP_RESP_CLIENT_CLOSED_REQUEST; + } + + if(rfe->result.cb) + rfe->result.cb(rfe->result.wb, code, rfe->result.data); + + return code; +} + +bool dyncfg_add(RRDHOST *host, const char *id, const char *path, + DYNCFG_STATUS status, DYNCFG_TYPE type, DYNCFG_SOURCE_TYPE source_type, const char *source, + DYNCFG_CMDS cmds, HTTP_ACCESS view_access, HTTP_ACCESS edit_access, + dyncfg_cb_t cb, void *data) { + + struct dyncfg_node tmp = { + .cmds = cmds, + .type = type, + .cb = cb, + .data = data, + }; + dictionary_set(dyncfg_nodes, id, &tmp, sizeof(tmp)); + + if(!dyncfg_add_low_level(host, id, path, status, type, source_type, source, cmds, + 0, 0, true, view_access, edit_access, + dyncfg_inline_callback, NULL)) { + dictionary_del(dyncfg_nodes, id); + return false; + } + + return true; +} + +void dyncfg_del(RRDHOST *host, const char *id) { + dictionary_del(dyncfg_nodes, id); + dyncfg_del_low_level(host, id); +} + +void dyncfg_status(RRDHOST *host, const char *id, DYNCFG_STATUS status) { + dyncfg_status_low_level(host, id, status); +} + +void dyncfg_init(bool load_saved) { + dyncfg_nodes = dyncfg_nodes_dictionary_create(); + dyncfg_init_low_level(load_saved); +} diff --git a/src/daemon/config/dyncfg-intercept.c b/src/daemon/config/dyncfg-intercept.c new file mode 100644 index 000000000..65f8383ed --- /dev/null +++ b/src/daemon/config/dyncfg-intercept.c @@ -0,0 +1,429 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "dyncfg-internals.h" +#include "dyncfg.h" + +// ---------------------------------------------------------------------------- +// we intercept the config function calls of the plugin + +struct dyncfg_call { + BUFFER *payload; + char *function; + char *id; + char *add_name; + char *source; + DYNCFG_CMDS cmd; + rrd_function_result_callback_t result_cb; + void *result_cb_data; + bool from_dyncfg_echo; +}; + +static void dyncfg_function_intercept_job_successfully_added(DYNCFG *df_template, int code, struct dyncfg_call *dc) { + char id[strlen(dc->id) + 1 + strlen(dc->add_name) + 1]; + snprintfz(id, sizeof(id), "%s:%s", dc->id, dc->add_name); + + RRDHOST *host = dyncfg_rrdhost(df_template); + if(!host) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "DYNCFG: cannot add job '%s' because host is missing", id); + } + else { + const DICTIONARY_ITEM *item = dyncfg_add_internal( + host, + id, + string2str(df_template->path), + dyncfg_status_from_successful_response(code), + DYNCFG_TYPE_JOB, + DYNCFG_SOURCE_TYPE_DYNCFG, + dc->source, + (df_template->cmds & ~DYNCFG_CMD_ADD) | DYNCFG_CMD_GET | DYNCFG_CMD_UPDATE | DYNCFG_CMD_TEST | + DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE | DYNCFG_CMD_REMOVE, + 0, + 0, + df_template->sync, + df_template->view_access, + df_template->edit_access, + df_template->execute_cb, + df_template->execute_cb_data, + false); + + // adding does not create df->dyncfg + // we have to do it here + + DYNCFG *df = dictionary_acquired_item_value(item); + SWAP(df->dyncfg.payload, dc->payload); + dyncfg_set_dyncfg_source_from_txt(df, dc->source); + df->dyncfg.user_disabled = false; + df->dyncfg.source_type = DYNCFG_SOURCE_TYPE_DYNCFG; + df->dyncfg.status = dyncfg_status_from_successful_response(code); + + dyncfg_file_save(id, df); // updates also the df->dyncfg timestamps + dyncfg_update_status_on_successful_add_or_update(df, code); + + dictionary_acquired_item_release(dyncfg_globals.nodes, item); + } +} + +static void dyncfg_function_intercept_job_successfully_updated(DYNCFG *df, int code, struct dyncfg_call *dc) { + df->dyncfg.status = dyncfg_status_from_successful_response(code); + df->dyncfg.source_type = DYNCFG_SOURCE_TYPE_DYNCFG; + SWAP(df->dyncfg.payload, dc->payload); + dyncfg_set_dyncfg_source_from_txt(df, dc->source); + + dyncfg_update_status_on_successful_add_or_update(df, code); +} + +void dyncfg_function_intercept_result_cb(BUFFER *wb, int code, void *result_cb_data) { + struct dyncfg_call *dc = result_cb_data; + + bool called_from_dyncfg_echo = dc->from_dyncfg_echo; + + const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item_advanced(dyncfg_globals.nodes, dc->id, -1); + if(item) { + DYNCFG *df = dictionary_acquired_item_value(item); + bool old_user_disabled = df->dyncfg.user_disabled; + bool save_required = false; + + if (!called_from_dyncfg_echo) { + // the command was sent by a user + + if (DYNCFG_RESP_SUCCESS(code)) { + if (dc->cmd == DYNCFG_CMD_ADD) { + dyncfg_function_intercept_job_successfully_added(df, code, dc); + } else if (dc->cmd == DYNCFG_CMD_UPDATE) { + dyncfg_function_intercept_job_successfully_updated(df, code, dc); + save_required = true; + } + else if (dc->cmd == DYNCFG_CMD_ENABLE) { + df->dyncfg.user_disabled = false; + } + else if (dc->cmd == DYNCFG_CMD_DISABLE) { + df->dyncfg.user_disabled = true; + } + else if (dc->cmd == DYNCFG_CMD_REMOVE) { + dyncfg_file_delete(dc->id); + dictionary_del(dyncfg_globals.nodes, dc->id); + } + + if (save_required || old_user_disabled != df->dyncfg.user_disabled) + dyncfg_file_save(dc->id, df); + } + else + nd_log(NDLS_DAEMON, NDLP_ERR, + "DYNCFG: plugin returned code %d to user initiated call: %s", code, dc->function); + } + else { + // the command was sent by dyncfg + // these are handled by the echo callback, we don't need to do anything here + ; + } + + dictionary_acquired_item_release(dyncfg_globals.nodes, item); + } + + if(dc->result_cb) + dc->result_cb(wb, code, dc->result_cb_data); + + buffer_free(dc->payload); + freez(dc->function); + freez(dc->id); + freez(dc->source); + freez(dc->add_name); + freez(dc); +} + +// ---------------------------------------------------------------------------- + +static void dyncfg_apply_action_on_all_template_jobs(struct rrd_function_execute *rfe, const char *template_id, DYNCFG_CMDS c) { + STRING *template = string_strdupz(template_id); + DYNCFG *df; + + size_t all = 0, done = 0; + dfe_start_read(dyncfg_globals.nodes, df) { + if(df->template == template && df->type == DYNCFG_TYPE_JOB) + all++; + } + dfe_done(df); + + if(rfe->progress.cb) + rfe->progress.cb(rfe->progress.data, done, all); + + dfe_start_reentrant(dyncfg_globals.nodes, df) { + if(df->template == template && df->type == DYNCFG_TYPE_JOB) { + DYNCFG_CMDS cmd_to_send_to_plugin = c; + + if(c == DYNCFG_CMD_ENABLE) + cmd_to_send_to_plugin = df->dyncfg.user_disabled ? DYNCFG_CMD_DISABLE : DYNCFG_CMD_ENABLE; + else if(c == DYNCFG_CMD_DISABLE) + cmd_to_send_to_plugin = DYNCFG_CMD_DISABLE; + + dyncfg_echo(df_dfe.item, df, df_dfe.name, cmd_to_send_to_plugin); + + if(rfe->progress.cb) + rfe->progress.cb(rfe->progress.data, ++done, all); + } + } + dfe_done(df); + + string_freez(template); +} + +// ---------------------------------------------------------------------------- +// the callback for all config functions + +static int dyncfg_intercept_early_error(struct rrd_function_execute *rfe, int rc, const char *msg) { + rc = dyncfg_default_response(rfe->result.wb, rc, msg); + + if(rfe->result.cb) + rfe->result.cb(rfe->result.wb, rc, rfe->result.data); + + return rc; +} + +const DICTIONARY_ITEM *dyncfg_get_template_of_new_job(const char *job_id) { + char id_copy[strlen(job_id) + 1]; + memcpy(id_copy, job_id, sizeof(id_copy)); + + char *colon = strrchr(id_copy, ':'); + if(!colon) return NULL; + + *colon = '\0'; + const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(dyncfg_globals.nodes, id_copy); + if(!item) return NULL; + + DYNCFG *df = dictionary_acquired_item_value(item); + if(df->type != DYNCFG_TYPE_TEMPLATE) { + dictionary_acquired_item_release(dyncfg_globals.nodes, item); + return NULL; + } + + return item; +} + +int dyncfg_function_intercept_cb(struct rrd_function_execute *rfe, void *data __maybe_unused) { + + // IMPORTANT: this function MUST call the result_cb even on failures + + bool called_from_dyncfg_echo = rrd_function_has_this_original_result_callback(rfe->transaction, dyncfg_echo_cb); + bool has_payload = rfe->payload && buffer_strlen(rfe->payload) ? true : false; + bool make_the_call_to_plugin = true; + + int rc = HTTP_RESP_INTERNAL_SERVER_ERROR; + DYNCFG_CMDS cmd; + const DICTIONARY_ITEM *item = NULL; + + char buf[strlen(rfe->function) + 1]; + memcpy(buf, rfe->function, sizeof(buf)); + + char *words[20]; + size_t num_words = quoted_strings_splitter_pluginsd(buf, words, 20); + + size_t i = 0; + char *config = get_word(words, num_words, i++); + char *id = get_word(words, num_words, i++); + char *cmd_str = get_word(words, num_words, i++); + char *add_name = get_word(words, num_words, i++); + + if(!config || !*config || strcmp(config, PLUGINSD_FUNCTION_CONFIG) != 0) + return dyncfg_intercept_early_error( + rfe, HTTP_RESP_BAD_REQUEST, + "dyncfg functions intercept: this is not a dyncfg request"); + + cmd = dyncfg_cmds2id(cmd_str); + if(cmd == DYNCFG_CMD_NONE) + return dyncfg_intercept_early_error( + rfe, HTTP_RESP_BAD_REQUEST, + "dyncfg functions intercept: invalid command received"); + + if(cmd == DYNCFG_CMD_ADD || cmd == DYNCFG_CMD_TEST || cmd == DYNCFG_CMD_USERCONFIG) { + if(cmd == DYNCFG_CMD_TEST && (!add_name || !*add_name)) { + // backwards compatibility for TEST without a name + char *colon = strrchr(id, ':'); + if(colon) { + *colon = '\0'; + add_name = ++colon; + } + else + add_name = "test"; + } + + if(!add_name || !*add_name) + return dyncfg_intercept_early_error( + rfe, HTTP_RESP_BAD_REQUEST, + "dyncfg functions intercept: this action requires a name"); + + if(!called_from_dyncfg_echo) { + char nid[strlen(id) + strlen(add_name) + 2]; + snprintfz(nid, sizeof(nid), "%s:%s", id, add_name); + + if (cmd == DYNCFG_CMD_ADD && dictionary_get(dyncfg_globals.nodes, nid)) + return dyncfg_intercept_early_error( + rfe, HTTP_RESP_BAD_REQUEST, + "dyncfg functions intercept: a configuration with this name already exists"); + } + } + + if((cmd == DYNCFG_CMD_ADD || cmd == DYNCFG_CMD_UPDATE || cmd == DYNCFG_CMD_TEST || cmd == DYNCFG_CMD_USERCONFIG) && !has_payload) + return dyncfg_intercept_early_error( + rfe, HTTP_RESP_BAD_REQUEST, + "dyncfg functions intercept: this action requires a payload"); + + if((cmd != DYNCFG_CMD_ADD && cmd != DYNCFG_CMD_UPDATE && cmd != DYNCFG_CMD_TEST && cmd != DYNCFG_CMD_USERCONFIG) && has_payload) + return dyncfg_intercept_early_error( + rfe, HTTP_RESP_BAD_REQUEST, + "dyncfg functions intercept: this action does not require a payload"); + + item = dictionary_get_and_acquire_item(dyncfg_globals.nodes, id); + if(!item) { + if(cmd == DYNCFG_CMD_TEST || cmd == DYNCFG_CMD_USERCONFIG) { + // this may be a test on a new job + item = dyncfg_get_template_of_new_job(id); + } + + if(!item) + return dyncfg_intercept_early_error( + rfe, HTTP_RESP_NOT_FOUND, + "dyncfg functions intercept: id is not found"); + } + + DYNCFG *df = dictionary_acquired_item_value(item); + + // 1. check the permissions of the request + + switch(cmd) { + case DYNCFG_CMD_GET: + case DYNCFG_CMD_SCHEMA: + case DYNCFG_CMD_USERCONFIG: + if(!http_access_user_has_enough_access_level_for_endpoint(rfe->user_access, df->view_access)) { + make_the_call_to_plugin = false; + rc = dyncfg_default_response( + rfe->result.wb, HTTP_RESP_FORBIDDEN, + "dyncfg: you don't have enough view permissions to execute this command"); + } + break; + + case DYNCFG_CMD_ENABLE: + case DYNCFG_CMD_DISABLE: + case DYNCFG_CMD_ADD: + case DYNCFG_CMD_TEST: + case DYNCFG_CMD_UPDATE: + case DYNCFG_CMD_REMOVE: + case DYNCFG_CMD_RESTART: + if(!http_access_user_has_enough_access_level_for_endpoint(rfe->user_access, df->edit_access)) { + make_the_call_to_plugin = false; + rc = dyncfg_default_response( + rfe->result.wb, HTTP_RESP_FORBIDDEN, + "dyncfg: you don't have enough edit permissions to execute this command"); + } + break; + + default: { + make_the_call_to_plugin = false; + rc = dyncfg_default_response( + rfe->result.wb, HTTP_RESP_INTERNAL_SERVER_ERROR, + "dyncfg: permissions for this command are not set"); + } + break; + } + + // 2. validate the request parameters + + if(make_the_call_to_plugin) { + if (!(df->cmds & cmd)) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "DYNCFG: this command is not supported by the configuration node: %s", rfe->function); + + make_the_call_to_plugin = false; + rc = dyncfg_default_response( + rfe->result.wb, HTTP_RESP_BAD_REQUEST, + "dyncfg functions intercept: this command is not supported by this configuration node"); + } + else if (cmd == DYNCFG_CMD_ADD) { + if (df->type != DYNCFG_TYPE_TEMPLATE) { + make_the_call_to_plugin = false; + rc = dyncfg_default_response( + rfe->result.wb, HTTP_RESP_BAD_REQUEST, + "dyncfg functions intercept: add command is only allowed in templates"); + + nd_log(NDLS_DAEMON, NDLP_ERR, + "DYNCFG: add command can only be applied on templates, not %s: %s", + dyncfg_id2type(df->type), rfe->function); + } + } + else if ( + cmd == DYNCFG_CMD_ENABLE && df->type == DYNCFG_TYPE_JOB && + dyncfg_is_user_disabled(string2str(df->template))) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "DYNCFG: cannot enable a job of a disabled template: %s", + rfe->function); + + make_the_call_to_plugin = false; + rc = dyncfg_default_response( + rfe->result.wb, HTTP_RESP_BAD_REQUEST, + "dyncfg functions intercept: this job belongs to disabled template"); + } + } + + // 3. check if it is one of the commands we should execute + + if(make_the_call_to_plugin) { + if (cmd & (DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE | DYNCFG_CMD_RESTART) && df->type == DYNCFG_TYPE_TEMPLATE) { + if (!called_from_dyncfg_echo) { + bool old_user_disabled = df->dyncfg.user_disabled; + if (cmd == DYNCFG_CMD_ENABLE) + df->dyncfg.user_disabled = false; + else if (cmd == DYNCFG_CMD_DISABLE) + df->dyncfg.user_disabled = true; + + if (df->dyncfg.user_disabled != old_user_disabled) + dyncfg_file_save(id, df); + } + + dyncfg_apply_action_on_all_template_jobs(rfe, id, cmd); + + rc = dyncfg_default_response(rfe->result.wb, HTTP_RESP_OK, "applied to all template job"); + make_the_call_to_plugin = false; + } + else if (cmd == DYNCFG_CMD_SCHEMA) { + bool loaded = false; + if (df->type == DYNCFG_TYPE_JOB) { + if (df->template) + loaded = dyncfg_get_schema(string2str(df->template), rfe->result.wb); + } else + loaded = dyncfg_get_schema(id, rfe->result.wb); + + if (loaded) { + rfe->result.wb->content_type = CT_APPLICATION_JSON; + rfe->result.wb->expires = now_realtime_sec(); + rc = HTTP_RESP_OK; + make_the_call_to_plugin = false; + } + } + } + + // 4. execute the command + + if(make_the_call_to_plugin) { + struct dyncfg_call *dc = callocz(1, sizeof(*dc)); + dc->function = strdupz(rfe->function); + dc->id = strdupz(id); + dc->source = rfe->source ? strdupz(rfe->source) : NULL; + dc->add_name = (add_name) ? strdupz(add_name) : NULL; + dc->cmd = cmd; + dc->result_cb = rfe->result.cb; + dc->result_cb_data = rfe->result.data; + dc->payload = buffer_dup(rfe->payload); + dc->from_dyncfg_echo = called_from_dyncfg_echo; + + rfe->result.cb = dyncfg_function_intercept_result_cb; + rfe->result.data = dc; + + rc = df->execute_cb(rfe, df->execute_cb_data); + } + else if(rfe->result.cb) + rfe->result.cb(rfe->result.wb, rc, rfe->result.data); + + dictionary_acquired_item_release(dyncfg_globals.nodes, item); + return rc; +} + diff --git a/src/daemon/config/dyncfg-internals.h b/src/daemon/config/dyncfg-internals.h new file mode 100644 index 000000000..1722ae792 --- /dev/null +++ b/src/daemon/config/dyncfg-internals.h @@ -0,0 +1,145 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_DYNCFG_INTERNALS_H +#define NETDATA_DYNCFG_INTERNALS_H + +#include "../common.h" +#include "database/rrd.h" +#include "database/rrdfunctions.h" +#include "database/rrdfunctions-internals.h" +#include "database/rrdcollector-internals.h" + +typedef struct dyncfg { + ND_UUID host_uuid; + STRING *function; + STRING *template; + STRING *path; + DYNCFG_CMDS cmds; + DYNCFG_TYPE type; + + HTTP_ACCESS view_access; + HTTP_ACCESS edit_access; + + struct { + DYNCFG_STATUS status; + DYNCFG_SOURCE_TYPE source_type; + STRING *source; + usec_t created_ut; + usec_t modified_ut; + } current; + + struct { + uint32_t saves; + bool restart_required; + bool plugin_rejected; + bool user_disabled; + DYNCFG_STATUS status; + DYNCFG_SOURCE_TYPE source_type; + STRING *source; + BUFFER *payload; + usec_t created_ut; + usec_t modified_ut; + } dyncfg; + + bool sync; + rrd_function_execute_cb_t execute_cb; + void *execute_cb_data; +} DYNCFG; + +struct dyncfg_globals { + const char *dir; + DICTIONARY *nodes; +}; + +extern struct dyncfg_globals dyncfg_globals; + +void dyncfg_load_all(void); +void dyncfg_file_load(const char *filename); +void dyncfg_file_save(const char *id, DYNCFG *df); +void dyncfg_file_delete(const char *id); + +bool dyncfg_get_schema(const char *id, BUFFER *dst); + +void dyncfg_echo_cb(BUFFER *wb, int code, void *result_cb_data); +void dyncfg_echo(const DICTIONARY_ITEM *item, DYNCFG *df, const char *id, DYNCFG_CMDS cmd); +void dyncfg_echo_update(const DICTIONARY_ITEM *item, DYNCFG *df, const char *id); +void dyncfg_echo_add(const DICTIONARY_ITEM *item_template, const DICTIONARY_ITEM *item_job, DYNCFG *df_template, DYNCFG *df_job, const char *template_id, const char *job_name); + +const DICTIONARY_ITEM *dyncfg_add_internal(RRDHOST *host, const char *id, const char *path, + DYNCFG_STATUS status, DYNCFG_TYPE type, DYNCFG_SOURCE_TYPE source_type, + const char *source, DYNCFG_CMDS cmds, + usec_t created_ut, usec_t modified_ut, + bool sync, HTTP_ACCESS view_access, HTTP_ACCESS edit_access, + rrd_function_execute_cb_t execute_cb, void *execute_cb_data, + bool overwrite_cb); + +int dyncfg_function_intercept_cb(struct rrd_function_execute *rfe, void *data); +void dyncfg_cleanup(DYNCFG *v); + +const DICTIONARY_ITEM *dyncfg_get_template_of_new_job(const char *job_id); + +bool dyncfg_is_user_disabled(const char *id); + +RRDHOST *dyncfg_rrdhost_by_uuid(ND_UUID *uuid); +RRDHOST *dyncfg_rrdhost(DYNCFG *df); + +static inline void dyncfg_copy_dyncfg_source_to_current(DYNCFG *df) { + STRING *old = df->current.source; + df->current.source = string_dup(df->dyncfg.source); + string_freez(old); +} + +static inline void dyncfg_set_dyncfg_source_from_txt(DYNCFG *df, const char *source) { + STRING *old = df->dyncfg.source; + df->dyncfg.source = string_strdupz(source); + string_freez(old); +} + +static inline void dyncfg_set_current_from_dyncfg(DYNCFG *df) { + df->current.status = df->dyncfg.status; + df->current.source_type = df->dyncfg.source_type; + + dyncfg_copy_dyncfg_source_to_current(df); + + if(df->dyncfg.created_ut < df->current.created_ut) + df->current.created_ut = df->dyncfg.created_ut; + + if(df->dyncfg.modified_ut > df->current.modified_ut) + df->current.modified_ut = df->dyncfg.modified_ut; +} + +static inline void dyncfg_update_status_on_successful_add_or_update(DYNCFG *df, int code) { + df->dyncfg.plugin_rejected = false; + + if (code == DYNCFG_RESP_ACCEPTED_RESTART_REQUIRED) + df->dyncfg.restart_required = true; + else + df->dyncfg.restart_required = false; + + dyncfg_set_current_from_dyncfg(df); +} + +static inline DYNCFG_STATUS dyncfg_status_from_successful_response(int code) { + DYNCFG_STATUS status = DYNCFG_STATUS_ACCEPTED; + + switch(code) { + default: + case DYNCFG_RESP_ACCEPTED: + case DYNCFG_RESP_ACCEPTED_RESTART_REQUIRED: + status = DYNCFG_STATUS_ACCEPTED; + break; + + case DYNCFG_RESP_ACCEPTED_DISABLED: + status = DYNCFG_STATUS_DISABLED; + break; + + case DYNCFG_RESP_RUNNING: + status = DYNCFG_STATUS_RUNNING; + break; + + } + + return status; +} + +#endif //NETDATA_DYNCFG_INTERNALS_H diff --git a/src/daemon/config/dyncfg-tree.c b/src/daemon/config/dyncfg-tree.c new file mode 100644 index 000000000..77d031fa0 --- /dev/null +++ b/src/daemon/config/dyncfg-tree.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "dyncfg-internals.h" +#include "dyncfg.h" + +static int dyncfg_tree_compar(const void *a, const void *b) { + const DICTIONARY_ITEM *item1 = *(const DICTIONARY_ITEM **)a; + const DICTIONARY_ITEM *item2 = *(const DICTIONARY_ITEM **)b; + + DYNCFG *df1 = dictionary_acquired_item_value(item1); + DYNCFG *df2 = dictionary_acquired_item_value(item2); + + int rc = string_cmp(df1->path, df2->path); + if(rc == 0) + rc = strcmp(dictionary_acquired_item_name(item1), dictionary_acquired_item_name(item2)); + + return rc; +} + +static void dyncfg_to_json(DYNCFG *df, const char *id, BUFFER *wb) { + buffer_json_member_add_object(wb, id); + { + buffer_json_member_add_string(wb, "type", dyncfg_id2type(df->type)); + + if(df->type == DYNCFG_TYPE_JOB) + buffer_json_member_add_string(wb, "template", string2str(df->template)); + + buffer_json_member_add_string(wb, "status", dyncfg_id2status(df->current.status)); + dyncfg_cmds2json_array(df->current.status == DYNCFG_STATUS_ORPHAN ? DYNCFG_CMD_REMOVE : df->cmds, "cmds", wb); + buffer_json_member_add_object(wb, "access"); + { + http_access2buffer_json_array(wb, "view", df->view_access); + http_access2buffer_json_array(wb, "edit", df->edit_access); + } + buffer_json_object_close(wb); + buffer_json_member_add_string(wb, "source_type", dyncfg_id2source_type(df->current.source_type)); + buffer_json_member_add_string(wb, "source", string2str(df->current.source)); + buffer_json_member_add_boolean(wb, "sync", df->sync); + buffer_json_member_add_boolean(wb, "user_disabled", df->dyncfg.user_disabled); + buffer_json_member_add_boolean(wb, "restart_required", df->dyncfg.restart_required); + buffer_json_member_add_boolean(wb, "plugin_rejected", df->dyncfg.plugin_rejected); + buffer_json_member_add_object(wb, "payload"); + { + if (df->dyncfg.payload && buffer_strlen(df->dyncfg.payload)) { + buffer_json_member_add_boolean(wb, "available", true); + buffer_json_member_add_string(wb, "status", dyncfg_id2status(df->dyncfg.status)); + buffer_json_member_add_string(wb, "source_type", dyncfg_id2source_type(df->dyncfg.source_type)); + buffer_json_member_add_string(wb, "source", string2str(df->dyncfg.source)); + buffer_json_member_add_uint64(wb, "created_ut", df->dyncfg.created_ut); + buffer_json_member_add_uint64(wb, "modified_ut", df->dyncfg.modified_ut); + buffer_json_member_add_string(wb, "content_type", content_type_id2string(df->dyncfg.payload->content_type)); + buffer_json_member_add_uint64(wb, "content_length", df->dyncfg.payload->len); + } else + buffer_json_member_add_boolean(wb, "available", false); + } + buffer_json_object_close(wb); // payload + buffer_json_member_add_uint64(wb, "saves", df->dyncfg.saves); + buffer_json_member_add_uint64(wb, "created_ut", df->current.created_ut); + buffer_json_member_add_uint64(wb, "modified_ut", df->current.modified_ut); + } + buffer_json_object_close(wb); +} + +static void dyncfg_tree_for_host(RRDHOST *host, BUFFER *wb, const char *path, const char *id) { + size_t entries = dictionary_entries(dyncfg_globals.nodes); + size_t used = 0; + const DICTIONARY_ITEM *items[entries]; + size_t restart_required = 0, plugin_rejected = 0, status_incomplete = 0, status_failed = 0; + + STRING *template = NULL; + if(id && *id) + template = string_strdupz(id); + + ND_UUID host_uuid = uuid2UUID(host->host_uuid); + + size_t path_len = strlen(path); + DYNCFG *df; + dfe_start_read(dyncfg_globals.nodes, df) { + if(!UUIDeq(df->host_uuid, host_uuid)) + continue; + + if(strncmp(string2str(df->path), path, path_len) != 0) + continue; + + if(!rrd_function_available(host, string2str(df->function))) + df->current.status = DYNCFG_STATUS_ORPHAN; + + if((id && strcmp(id, df_dfe.name) != 0) && (template && df->template != template)) + continue; + + items[used++] = dictionary_acquired_item_dup(dyncfg_globals.nodes, df_dfe.item); + } + dfe_done(df); + + if(used > 1) + qsort(items, used, sizeof(const DICTIONARY_ITEM *), dyncfg_tree_compar); + + buffer_flush(wb); + buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_MINIFY); + + buffer_json_member_add_uint64(wb, "version", 1); + + buffer_json_member_add_object(wb, "tree"); + { + STRING *last_path = NULL; + for (size_t i = 0; i < used; i++) { + df = dictionary_acquired_item_value(items[i]); + if (df->path != last_path) { + last_path = df->path; + + if (i) + buffer_json_object_close(wb); + + buffer_json_member_add_object(wb, string2str(last_path)); + } + + dyncfg_to_json(df, dictionary_acquired_item_name(items[i]), wb); + + if (df->dyncfg.plugin_rejected) + plugin_rejected++; + + if(df->current.status != DYNCFG_STATUS_ORPHAN) { + if (df->dyncfg.restart_required) + restart_required++; + + if (df->current.status == DYNCFG_STATUS_FAILED) + status_failed++; + + if (df->current.status == DYNCFG_STATUS_INCOMPLETE) + status_incomplete++; + } + } + + if (used) + buffer_json_object_close(wb); + } + buffer_json_object_close(wb); // tree + + buffer_json_member_add_object(wb, "attention"); + { + buffer_json_member_add_boolean(wb, "degraded", restart_required + plugin_rejected + status_failed + status_incomplete > 0); + buffer_json_member_add_uint64(wb, "restart_required", restart_required); + buffer_json_member_add_uint64(wb, "plugin_rejected", plugin_rejected); + buffer_json_member_add_uint64(wb, "status_failed", status_failed); + buffer_json_member_add_uint64(wb, "status_incomplete", status_incomplete); + } + buffer_json_object_close(wb); // attention + + buffer_json_agents_v2(wb, NULL, 0, false, false); + + buffer_json_finalize(wb); + + for(size_t i = 0; i < used ;i++) + dictionary_acquired_item_release(dyncfg_globals.nodes, items[i]); +} + +static int dyncfg_config_execute_cb(struct rrd_function_execute *rfe, void *data) { + RRDHOST *host = data; + int code; + + char buf[strlen(rfe->function) + 1]; + memcpy(buf, rfe->function, sizeof(buf)); + + char *words[MAX_FUNCTION_PARAMETERS]; // an array of pointers for the words in this line + size_t num_words = quoted_strings_splitter_pluginsd(buf, words, MAX_FUNCTION_PARAMETERS); + + const char *config = get_word(words, num_words, 0); + const char *action = get_word(words, num_words, 1); + const char *path = get_word(words, num_words, 2); + const char *id = get_word(words, num_words, 3); + + if(!config || !*config || strcmp(config, PLUGINSD_FUNCTION_CONFIG) != 0) { + char *msg = "invalid function call, expected: config"; + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG TREE: function call '%s': %s", rfe->function, msg); + code = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg); + goto cleanup; + } + + if(!action || !*action) { + char *msg = "invalid function call, expected: config tree"; + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG TREE: function call '%s': %s", rfe->function, msg); + code = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg); + goto cleanup; + } + + if(strcmp(action, "tree") == 0) { + if(!path || !*path) + path = "/"; + + if(!id || !*id) + id = NULL; + else if(!dyncfg_is_valid_id(id)) { + char *msg = "invalid id given"; + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG TREE: function call '%s': %s", rfe->function, msg); + code = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg); + goto cleanup; + } + + code = HTTP_RESP_OK; + dyncfg_tree_for_host(host, rfe->result.wb, path, id); + } + else { + const char *name = id; + id = action; + action = path; + path = NULL; + + DYNCFG_CMDS cmd = dyncfg_cmds2id(action); + const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(dyncfg_globals.nodes, id); + if(!item) { + item = dyncfg_get_template_of_new_job(id); + + if(item && (!name || !*name)) { + const char *n = dictionary_acquired_item_name(item); + if(strncmp(id, n, strlen(n)) == 0 && id[strlen(n)] == ':') + name = &id[strlen(n) + 1]; + } + } + + if(item) { + DYNCFG *df = dictionary_acquired_item_value(item); + + if(!rrd_function_available(host, string2str(df->function))) + df->current.status = DYNCFG_STATUS_ORPHAN; + + if(cmd == DYNCFG_CMD_REMOVE) { + bool delete = (df->current.status == DYNCFG_STATUS_ORPHAN); + dictionary_acquired_item_release(dyncfg_globals.nodes, item); + item = NULL; + + if(delete) { + if(!http_access_user_has_enough_access_level_for_endpoint(rfe->user_access, df->edit_access)) { + code = dyncfg_default_response( + rfe->result.wb, HTTP_RESP_FORBIDDEN, + "dyncfg: you don't have enough edit permissions to execute this command"); + goto cleanup; + } + + dictionary_del(dyncfg_globals.nodes, id); + dyncfg_file_delete(id); + code = dyncfg_default_response(rfe->result.wb, 200, ""); + goto cleanup; + } + } + else if((cmd == DYNCFG_CMD_USERCONFIG || cmd == DYNCFG_CMD_TEST) && df->current.status != DYNCFG_STATUS_ORPHAN) { + const char *old_rfe_function = rfe->function; + char buf2[2048]; + snprintfz(buf2, sizeof(buf2), "config %s %s %s", dictionary_acquired_item_name(item), action, name?name:""); + rfe->function = buf2; + dictionary_acquired_item_release(dyncfg_globals.nodes, item); + item = NULL; + code = dyncfg_function_intercept_cb(rfe, data); + rfe->function = old_rfe_function; + return code; + } + + if(item) + dictionary_acquired_item_release(dyncfg_globals.nodes, item); + } + + code = HTTP_RESP_NOT_FOUND; + nd_log(NDLS_DAEMON, NDLP_ERR, + "DYNCFG: unknown config id '%s' in call: '%s'. " + "This can happen if the plugin that registered the dynamic configuration is not running now.", + id, rfe->function); + + rrd_call_function_error( + rfe->result.wb, + "unknown config id given", code); + } + +cleanup: + if(rfe->result.cb) + rfe->result.cb(rfe->result.wb, code, rfe->result.data); + + return code; +} + +// ---------------------------------------------------------------------------- +// this adds a 'config' function to all leaf nodes (localhost and virtual nodes) +// which is used to serve the tree and act as a catch-all for all config calls +// for which there is no id overloaded. + +void dyncfg_host_init(RRDHOST *host) { + // IMPORTANT: + // This function needs to be async, although it is internal. + // The reason is that it can call by itself another function that may or may not be internal (sync). + + rrd_function_add(host, NULL, PLUGINSD_FUNCTION_CONFIG, 120, + 1000, "Dynamic configuration", "config", HTTP_ACCESS_ANONYMOUS_DATA, + false, dyncfg_config_execute_cb, host); +} diff --git a/src/daemon/config/dyncfg-unittest.c b/src/daemon/config/dyncfg-unittest.c new file mode 100644 index 000000000..775dc7cbd --- /dev/null +++ b/src/daemon/config/dyncfg-unittest.c @@ -0,0 +1,799 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "dyncfg-internals.h" +#include "dyncfg.h" + +// ---------------------------------------------------------------------------- +// unit test + +#define LINE_FILE_STR TOSTRING(__LINE__) "@" __FILE__ + +struct dyncfg_unittest { + bool enabled; + size_t errors; + + DICTIONARY *nodes; + + SPINLOCK spinlock; + struct dyncfg_unittest_action *queue; +} dyncfg_unittest_data = { 0 }; + +typedef struct { + bool enabled; + bool removed; + struct { + double dbl; + bool bln; + } value; +} TEST_CFG; + +typedef struct { + const char *id; + const char *source; + bool sync; + DYNCFG_TYPE type; + DYNCFG_CMDS cmds; + DYNCFG_SOURCE_TYPE source_type; + + TEST_CFG current; + TEST_CFG expected; + + bool received; + bool finished; + + size_t last_saves; + bool needs_save; +} TEST; + +struct dyncfg_unittest_action { + TEST *t; + BUFFER *result; + BUFFER *payload; + DYNCFG_CMDS cmd; + const char *add_name; + const char *source; + + rrd_function_result_callback_t result_cb; + void *result_cb_data; + + struct dyncfg_unittest_action *prev, *next; +}; + +static void dyncfg_unittest_register_error(const char *id, const char *msg) { + if(msg) + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG UNITTEST: error on id '%s': %s", id ? id : "", msg); + + __atomic_add_fetch(&dyncfg_unittest_data.errors, 1, __ATOMIC_RELAXED); +} + +static int dyncfg_unittest_execute_cb(struct rrd_function_execute *rfe, void *data); + +bool dyncfg_unittest_parse_payload(BUFFER *payload, TEST *t, DYNCFG_CMDS cmd, const char *add_name, const char *source) { + CLEAN_JSON_OBJECT *jobj = json_tokener_parse(buffer_tostring(payload)); + if(!jobj) { + dyncfg_unittest_register_error(t->id, "cannot parse json payload"); + return false; + } + + struct json_object *json_double; + struct json_object *json_boolean; + + json_object_object_get_ex(jobj, "double", &json_double); + double value_double = json_object_get_double(json_double); + + json_object_object_get_ex(jobj, "boolean", &json_boolean); + int value_boolean = json_object_get_boolean(json_boolean); + + if(cmd == DYNCFG_CMD_UPDATE) { + t->current.value.dbl = value_double; + t->current.value.bln = value_boolean; + } + else if(cmd == DYNCFG_CMD_ADD) { + char buf[strlen(t->id) + strlen(add_name) + 20]; + snprintfz(buf, sizeof(buf), "%s:%s", t->id, add_name); + TEST tmp = { + .id = strdupz(buf), + .source = strdupz(source), + .cmds = (t->cmds & ~DYNCFG_CMD_ADD) | DYNCFG_CMD_GET | DYNCFG_CMD_REMOVE | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE | DYNCFG_CMD_TEST, + .sync = t->sync, + .type = DYNCFG_TYPE_JOB, + .source_type = DYNCFG_SOURCE_TYPE_DYNCFG, + .received = true, + .finished = true, + .current = + {.enabled = true, + .removed = false, + .value = + { + .dbl = value_double, + .bln = value_boolean, + }}, + .expected = { + .enabled = true, + .removed = false, + .value = { + .dbl = 3.14, + .bln = true, + } + }, + .needs_save = true, + }; + const DICTIONARY_ITEM *item = dictionary_set_and_acquire_item(dyncfg_unittest_data.nodes, buf, &tmp, sizeof(tmp)); + TEST *t2 = dictionary_acquired_item_value(item); + dictionary_acquired_item_release(dyncfg_unittest_data.nodes, item); + + dyncfg_add_low_level(localhost, t2->id, "/unittests", + DYNCFG_STATUS_RUNNING, t2->type, t2->source_type, t2->source, + t2->cmds, 0, 0, t2->sync, + HTTP_ACCESS_NONE, HTTP_ACCESS_NONE, + dyncfg_unittest_execute_cb, t2); + } + else { + dyncfg_unittest_register_error(t->id, "invalid command received to parse payload"); + return false; + } + + return true; +} + +static int dyncfg_unittest_action(struct dyncfg_unittest_action *a) { + TEST *t = a->t; + + int rc = HTTP_RESP_OK; + + if(a->cmd == DYNCFG_CMD_ENABLE) + t->current.enabled = true; + else if(a->cmd == DYNCFG_CMD_DISABLE) + t->current.enabled = false; + else if(a->cmd == DYNCFG_CMD_ADD || a->cmd == DYNCFG_CMD_UPDATE) + rc = dyncfg_unittest_parse_payload(a->payload, a->t, a->cmd, a->add_name, a->source) ? HTTP_RESP_OK : HTTP_RESP_BAD_REQUEST; + else if(a->cmd == DYNCFG_CMD_REMOVE) + t->current.removed = true; + else + rc = HTTP_RESP_BAD_REQUEST; + + dyncfg_default_response(a->result, rc, NULL); + + a->result_cb(a->result, rc, a->result_cb_data); + + buffer_free(a->payload); + freez((void *)a->add_name); + freez(a); + + __atomic_store_n(&t->finished, true, __ATOMIC_RELAXED); + + return rc; +} + +static void *dyncfg_unittest_thread_action(void *ptr) { + while(!nd_thread_signaled_to_cancel()) { + struct dyncfg_unittest_action *a = NULL; + spinlock_lock(&dyncfg_unittest_data.spinlock); + a = dyncfg_unittest_data.queue; + if(a) + DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(dyncfg_unittest_data.queue, a, prev, next); + spinlock_unlock(&dyncfg_unittest_data.spinlock); + + if(a) + dyncfg_unittest_action(a); + else + sleep_usec(10 * USEC_PER_MS); + } + + return ptr; +} + +static int dyncfg_unittest_execute_cb(struct rrd_function_execute *rfe, void *data) { + + int rc; + bool run_the_callback = true; + TEST *t = data; + + t->received = true; + + char buf[strlen(rfe->function) + 1]; + memcpy(buf, rfe->function, sizeof(buf)); + + char *words[MAX_FUNCTION_PARAMETERS]; // an array of pointers for the words in this line + size_t num_words = quoted_strings_splitter_pluginsd(buf, words, MAX_FUNCTION_PARAMETERS); + + const char *config = get_word(words, num_words, 0); + const char *id = get_word(words, num_words, 1); + const char *action = get_word(words, num_words, 2); + const char *add_name = get_word(words, num_words, 3); + + if(!config || !*config || strcmp(config, PLUGINSD_FUNCTION_CONFIG) != 0) { + char *msg = "did not receive a config call"; + dyncfg_unittest_register_error(id, msg); + rc = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg); + goto cleanup; + } + + if(!id || !*id) { + char *msg = "did not receive an id"; + dyncfg_unittest_register_error(id, msg); + rc = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg); + goto cleanup; + } + + if(t->type != DYNCFG_TYPE_TEMPLATE && strcmp(t->id, id) != 0) { + char *msg = "id received is not the expected"; + dyncfg_unittest_register_error(id, msg); + rc = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg); + goto cleanup; + } + + if(!action || !*action) { + char *msg = "did not receive an action"; + dyncfg_unittest_register_error(id, msg); + rc = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg); + goto cleanup; + } + + DYNCFG_CMDS cmd = dyncfg_cmds2id(action); + if(cmd == DYNCFG_CMD_NONE) { + char *msg = "action received is not known"; + dyncfg_unittest_register_error(id, msg); + rc = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg); + goto cleanup; + } + + if(!(t->cmds & cmd)) { + char *msg = "received a command that is not supported"; + dyncfg_unittest_register_error(id, msg); + rc = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg); + goto cleanup; + } + + if(t->current.removed && cmd != DYNCFG_CMD_ADD) { + char *msg = "received a command for a removed entry"; + dyncfg_unittest_register_error(id, msg); + rc = dyncfg_default_response(rfe->result.wb, HTTP_RESP_BAD_REQUEST, msg); + goto cleanup; + } + + struct dyncfg_unittest_action *a = callocz(1, sizeof(*a)); + a->t = t; + a->add_name = add_name ? strdupz(add_name) : NULL; + a->source = rfe->source, + a->result = rfe->result.wb; + a->payload = buffer_dup(rfe->payload); + a->cmd = cmd; + a->result_cb = rfe->result.cb; + a->result_cb_data = rfe->result.data; + + run_the_callback = false; + + if(t->sync) + rc = dyncfg_unittest_action(a); + else { + spinlock_lock(&dyncfg_unittest_data.spinlock); + DOUBLE_LINKED_LIST_APPEND_ITEM_UNSAFE(dyncfg_unittest_data.queue, a, prev, next); + spinlock_unlock(&dyncfg_unittest_data.spinlock); + rc = HTTP_RESP_OK; + } + +cleanup: + if(run_the_callback) { + __atomic_store_n(&t->finished, true, __ATOMIC_RELAXED); + + if (rfe->result.cb) + rfe->result.cb(rfe->result.wb, rc, rfe->result.data); + } + + return rc; +} + +static bool dyncfg_unittest_check(TEST *t, DYNCFG_CMDS c, const char *cmd, bool received) { + size_t errors = 0; + + fprintf(stderr, "CHECK '%s' after cmd '%s'...", t->id, cmd); + + if(t->received != received) { + fprintf(stderr, "\n - received flag found '%s', expected '%s'", + t->received?"true":"false", + received?"true":"false"); + errors++; + goto cleanup; + } + + if(!received) + goto cleanup; + + usec_t give_up_ut = now_monotonic_usec() + 2 * USEC_PER_SEC; + while(!__atomic_load_n(&t->finished, __ATOMIC_RELAXED)) { + tinysleep(); + + if(now_monotonic_usec() > give_up_ut) { + fprintf(stderr, "\n - gave up waiting for the plugin to process this!"); + errors++; + goto cleanup; + } + } + + if(t->type != DYNCFG_TYPE_TEMPLATE && t->current.enabled != t->expected.enabled) { + fprintf(stderr, "\n - enabled flag found '%s', expected '%s'", + t->current.enabled?"true":"false", + t->expected.enabled?"true":"false"); + errors++; + } + if(t->current.removed != t->expected.removed) { + fprintf(stderr, "\n - removed flag found '%s', expected '%s'", + t->current.removed?"true":"false", + t->expected.removed?"true":"false"); + errors++; + } + if(t->current.value.bln != t->expected.value.bln) { + fprintf(stderr, "\n - boolean value found '%s', expected '%s'", + t->current.value.bln?"true":"false", + t->expected.value.bln?"true":"false"); + errors++; + } + if(t->current.value.dbl != t->expected.value.dbl) { + fprintf(stderr, "\n - double value found '%f', expected '%f'", + t->current.value.dbl, t->expected.value.dbl); + errors++; + } + + DYNCFG *df = dictionary_get(dyncfg_globals.nodes, t->id); + if(!df) { + fprintf(stderr, "\n - not found in DYNCFG nodes dictionary!"); + errors++; + } + else if(df->cmds != t->cmds) { + fprintf(stderr, "\n - has different cmds in DYNCFG nodes dictionary; found: "); + dyncfg_cmds2fp(df->cmds, stderr); + fprintf(stderr, ", expected: "); + dyncfg_cmds2fp(t->cmds, stderr); + fprintf(stderr, "\n"); + errors++; + } + else if(df->type == DYNCFG_TYPE_JOB && df->current.source_type == DYNCFG_SOURCE_TYPE_DYNCFG && !df->dyncfg.saves) { + fprintf(stderr, "\n - DYNCFG job has no saves!"); + errors++; + } + else if(df->type == DYNCFG_TYPE_JOB && df->current.source_type == DYNCFG_SOURCE_TYPE_DYNCFG && (!df->dyncfg.payload || !buffer_strlen(df->dyncfg.payload))) { + fprintf(stderr, "\n - DYNCFG job has no payload!"); + errors++; + } + else if(df->dyncfg.user_disabled && !df->dyncfg.saves) { + fprintf(stderr, "\n - DYNCFG disabled config has no saves!"); + errors++; + } + else if((c & (DYNCFG_CMD_ADD | DYNCFG_CMD_UPDATE)) && t->source && string_strcmp(df->current.source, t->source) != 0) { + fprintf(stderr, "\n - source does not match!"); + errors++; + } + else if((c & (DYNCFG_CMD_ADD | DYNCFG_CMD_UPDATE)) && df->current.source && !t->source) { + fprintf(stderr, "\n - there is a source but it shouldn't be any!"); + errors++; + } + else if(t->needs_save && df->dyncfg.saves <= t->last_saves) { + fprintf(stderr, "\n - should be saved, but it is not saved!"); + errors++; + } + else if(!t->needs_save && df->dyncfg.saves > t->last_saves) { + fprintf(stderr, "\n - should be not be saved, but it saved!"); + errors++; + } + +cleanup: + if(errors) { + fprintf(stderr, "\n >>> FAILED\n\n"); + dyncfg_unittest_register_error(NULL, NULL); + return false; + } + + fprintf(stderr, " OK\n"); + return true; +} + +static void dyncfg_unittest_reset(void) { + TEST *t; + dfe_start_read(dyncfg_unittest_data.nodes, t) { + t->received = t->finished = false; + t->needs_save = false; + + DYNCFG *df = dictionary_get(dyncfg_globals.nodes, t->id); + if(!df) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG UNITTEST: cannot find id '%s'", t->id); + dyncfg_unittest_register_error(NULL, NULL); + } + else + t->last_saves = df->dyncfg.saves; + } + dfe_done(t); +} + +void should_be_saved(TEST *t, DYNCFG_CMDS c) { + DYNCFG *df; + + if(t->type == DYNCFG_TYPE_TEMPLATE) { + df = dictionary_get(dyncfg_globals.nodes, t->id); + t->current.enabled = !df->dyncfg.user_disabled; + } + + t->needs_save = + c == DYNCFG_CMD_UPDATE || + (t->current.enabled && c == DYNCFG_CMD_DISABLE) || + (!t->current.enabled && c == DYNCFG_CMD_ENABLE); +} + +static int dyncfg_unittest_run(const char *cmd, BUFFER *wb, const char *payload, const char *source) { + dyncfg_unittest_reset(); + + char buf[strlen(cmd) + 1]; + memcpy(buf, cmd, sizeof(buf)); + + char *words[MAX_FUNCTION_PARAMETERS]; // an array of pointers for the words in this line + size_t num_words = quoted_strings_splitter_pluginsd(buf, words, MAX_FUNCTION_PARAMETERS); + + // const char *config = get_word(words, num_words, 0); + const char *id = get_word(words, num_words, 1); + char *action = get_word(words, num_words, 2); + const char *add_name = get_word(words, num_words, 3); + + DYNCFG_CMDS c = dyncfg_cmds2id(action); + + TEST *t = dictionary_get(dyncfg_unittest_data.nodes, id); + if(!t) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG UNITTEST: cannot find id '%s' from cmd: %s", id, cmd); + dyncfg_unittest_register_error(NULL, NULL); + return HTTP_RESP_NOT_FOUND; + } + + if(t->type == DYNCFG_TYPE_TEMPLATE) + t->received = t->finished = true; + + if(c == DYNCFG_CMD_DISABLE) + t->expected.enabled = false; + if(c == DYNCFG_CMD_ENABLE) + t->expected.enabled = true; + if(c == DYNCFG_CMD_UPDATE) + memset(&t->current.value, 0, sizeof(t->current.value)); + + if(c & (DYNCFG_CMD_UPDATE) || (c & (DYNCFG_CMD_DISABLE|DYNCFG_CMD_ENABLE) && t->type != DYNCFG_TYPE_TEMPLATE)) { + freez((void *)t->source); + t->source = strdupz(source); + } + + buffer_flush(wb); + + CLEAN_BUFFER *pld = NULL; + + if(payload) { + pld = buffer_create(1024, NULL); + buffer_strcat(pld, payload); + } + + should_be_saved(t, c); + + int rc = rrd_function_run(localhost, wb, 10, HTTP_ACCESS_ALL, cmd, + true, NULL, + NULL, NULL, + NULL, NULL, + NULL, NULL, + pld, source); + if(!DYNCFG_RESP_SUCCESS(rc)) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG UNITTEST: failed to run: %s; returned code %d", cmd, rc); + dyncfg_unittest_register_error(NULL, NULL); + } + + dyncfg_unittest_check(t, c, cmd, true); + + if(rc == HTTP_RESP_OK && t->type == DYNCFG_TYPE_TEMPLATE) { + if(c == DYNCFG_CMD_ADD) { + char buf2[strlen(id) + strlen(add_name) + 2]; + snprintfz(buf2, sizeof(buf2), "%s:%s", id, add_name); + TEST *tt = dictionary_get(dyncfg_unittest_data.nodes, buf2); + if (!tt) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "DYNCFG UNITTEST: failed to find newly added id '%s' of command: %s", + id, cmd); + dyncfg_unittest_register_error(NULL, NULL); + } + dyncfg_unittest_check(tt, c, cmd, true); + } + else { + STRING *template = string_strdupz(t->id); + DYNCFG *df; + dfe_start_read(dyncfg_globals.nodes, df) { + if(df->type == DYNCFG_TYPE_JOB && df->template == template) { + TEST *tt = dictionary_get(dyncfg_unittest_data.nodes, df_dfe.name); + if (!tt) { + nd_log(NDLS_DAEMON, NDLP_ERR, + "DYNCFG UNITTEST: failed to find id '%s' while running command: %s", df_dfe.name, cmd); + dyncfg_unittest_register_error(NULL, NULL); + } + else { + if(c == DYNCFG_CMD_DISABLE) + tt->expected.enabled = false; + if(c == DYNCFG_CMD_ENABLE) + tt->expected.enabled = true; + dyncfg_unittest_check(tt, c, cmd, true); + } + } + } + dfe_done(df); + string_freez(template); + } + } + + return rc; +} + +static void dyncfg_unittest_cleanup_files(void) { + char path[FILENAME_MAX]; + snprintfz(path, sizeof(path) - 1, "%s/%s", netdata_configured_varlib_dir, "config"); + + DIR *dir = opendir(path); + if (!dir) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG UNITTEST: cannot open directory '%s'", path); + return; + } + + struct dirent *entry; + char filename[FILENAME_MAX + sizeof(entry->d_name)]; + while ((entry = readdir(dir)) != NULL) { + if ((entry->d_type == DT_REG || entry->d_type == DT_LNK) && strstartswith(entry->d_name, "unittest:") && strendswith(entry->d_name, ".dyncfg")) { + snprintf(filename, sizeof(filename), "%s/%s", path, entry->d_name); + nd_log(NDLS_DAEMON, NDLP_INFO, "DYNCFG UNITTEST: deleting file '%s'", filename); + unlink(filename); + } + } + + closedir(dir); +} + +static TEST *dyncfg_unittest_add(TEST t) { + dyncfg_unittest_reset(); + + TEST *ret = dictionary_set(dyncfg_unittest_data.nodes, t.id, &t, sizeof(t)); + + if(!dyncfg_add_low_level(localhost, t.id, "/unittests", DYNCFG_STATUS_RUNNING, t.type, + t.source_type, t.source, + t.cmds, 0, 0, t.sync, + HTTP_ACCESS_NONE, HTTP_ACCESS_NONE, + dyncfg_unittest_execute_cb, ret)) { + dyncfg_unittest_register_error(t.id, "addition of job failed"); + } + + dyncfg_unittest_check(ret, DYNCFG_CMD_NONE, "plugin create", t.type != DYNCFG_TYPE_TEMPLATE); + + return ret; +} + +void dyncfg_unittest_delete_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + TEST *v = value; + freez((void *)v->id); + freez((void *)v->source); +} + +int dyncfg_unittest(void) { + dyncfg_unittest_data.nodes = dictionary_create(DICT_OPTION_NONE); + dictionary_register_delete_callback(dyncfg_unittest_data.nodes, dyncfg_unittest_delete_cb, NULL); + + dyncfg_unittest_cleanup_files(); + rrd_functions_inflight_init(); + dyncfg_init(false); + + // ------------------------------------------------------------------------ + // create the thread for testing async communication + + ND_THREAD *thread = nd_thread_create("unittest", NETDATA_THREAD_OPTION_JOINABLE, dyncfg_unittest_thread_action, NULL); + + // ------------------------------------------------------------------------ + // single + + TEST *single1 = dyncfg_unittest_add((TEST){ + .id = strdupz("unittest:sync:single1"), + .source = strdupz(LINE_FILE_STR), + .type = DYNCFG_TYPE_SINGLE, + .cmds = DYNCFG_CMD_GET | DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE, + .source_type = DYNCFG_SOURCE_TYPE_INTERNAL, + .sync = true, + .current = { + .enabled = true, + }, + .expected = { + .enabled = true, + } + }); (void)single1; + + TEST *single2 = dyncfg_unittest_add((TEST){ + .id = strdupz("unittest:async:single2"), + .source = strdupz(LINE_FILE_STR), + .type = DYNCFG_TYPE_SINGLE, + .cmds = DYNCFG_CMD_GET | DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE, + .source_type = DYNCFG_SOURCE_TYPE_INTERNAL, + .sync = false, + .current = { + .enabled = true, + }, + .expected = { + .enabled = true, + } + }); (void)single2; + + // ------------------------------------------------------------------------ + // template + + TEST *template1 = dyncfg_unittest_add((TEST){ + .id = strdupz("unittest:sync:template1"), + .source = strdupz(LINE_FILE_STR), + .type = DYNCFG_TYPE_TEMPLATE, + .cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_ADD | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE, + .source_type = DYNCFG_SOURCE_TYPE_INTERNAL, + .sync = true, + }); (void)template1; + + TEST *template2 = dyncfg_unittest_add((TEST){ + .id = strdupz("unittest:async:template2"), + .source = strdupz(LINE_FILE_STR), + .type = DYNCFG_TYPE_TEMPLATE, + .cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_ADD | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE, + .source_type = DYNCFG_SOURCE_TYPE_INTERNAL, + .sync = false, + }); (void)template2; + + // ------------------------------------------------------------------------ + // job + + TEST *user1 = dyncfg_unittest_add((TEST){ + .id = strdupz("unittest:sync:template1:user1"), + .source = strdupz(LINE_FILE_STR), + .type = DYNCFG_TYPE_JOB, + .cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE, + .source_type = DYNCFG_SOURCE_TYPE_USER, + .sync = true, + .current = { + .enabled = true, + }, + .expected = { + .enabled = true, + } + }); (void)user1; + + TEST *user2 = dyncfg_unittest_add((TEST){ + .id = strdupz("unittest:async:template2:user2"), + .source = strdupz(LINE_FILE_STR), + .type = DYNCFG_TYPE_JOB, + .cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE, + .source_type = DYNCFG_SOURCE_TYPE_USER, + .sync = false, + .expected = { + .enabled = true, + } + }); (void)user2; + + // ------------------------------------------------------------------------ + + int rc; (void)rc; + BUFFER *wb = buffer_create(0, NULL); + + // ------------------------------------------------------------------------ + // dynamic job + + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1 add dyn1", wb, "{\"double\":3.14,\"boolean\":true}", LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1 add dyn2", wb, "{\"double\":3.14,\"boolean\":true}", LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2 add dyn3", wb, "{\"double\":3.14,\"boolean\":true}", LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2 add dyn4", wb, "{\"double\":3.14,\"boolean\":true}", LINE_FILE_STR); + + // ------------------------------------------------------------------------ + // saving of user_disabled + + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:single1 disable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:single2 disable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1:user1 disable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2:user2 disable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1:dyn1 disable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1:dyn2 disable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2:dyn3 disable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2:dyn4 disable", wb, NULL, LINE_FILE_STR); + + // ------------------------------------------------------------------------ + // enabling + + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:single1 enable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:single2 enable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1:user1 enable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2:user2 enable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1:dyn1 enable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1:dyn2 enable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2:dyn3 enable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2:dyn4 enable", wb, NULL, LINE_FILE_STR); + + // ------------------------------------------------------------------------ + // disabling template + + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1 disable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2 disable", wb, NULL, LINE_FILE_STR); + + // ------------------------------------------------------------------------ + // enabling template + + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1 enable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2 enable", wb, NULL, LINE_FILE_STR); + + // ------------------------------------------------------------------------ + // adding job on disabled template + + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1 disable", wb, NULL, LINE_FILE_STR); + dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2 disable", wb, NULL, LINE_FILE_STR); + + TEST *user3 = dyncfg_unittest_add((TEST){ + .id = strdupz("unittest:sync:template1:user3"), + .source = strdupz(LINE_FILE_STR), + .type = DYNCFG_TYPE_JOB, + .cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE, + .source_type = DYNCFG_SOURCE_TYPE_USER, + .sync = true, + .expected = { + .enabled = false, + } + }); (void)user3; + + TEST *user4 = dyncfg_unittest_add((TEST){ + .id = strdupz("unittest:async:template2:user4"), + .source = strdupz(LINE_FILE_STR), + .type = DYNCFG_TYPE_JOB, + .cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE, + .source_type = DYNCFG_SOURCE_TYPE_USER, + .sync = false, + .expected = { + .enabled = false, + } + }); (void)user4; + + TEST *user5 = dyncfg_unittest_add((TEST){ + .id = strdupz("unittest:sync:template1:user5"), + .source = strdupz(LINE_FILE_STR), + .type = DYNCFG_TYPE_JOB, + .cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE, + .source_type = DYNCFG_SOURCE_TYPE_USER, + .sync = true, + .expected = { + .enabled = false, + } + }); (void)user5; + + TEST *user6 = dyncfg_unittest_add((TEST){ + .id = strdupz("unittest:async:template2:user6"), + .source = strdupz(LINE_FILE_STR), + .type = DYNCFG_TYPE_JOB, + .cmds = DYNCFG_CMD_SCHEMA | DYNCFG_CMD_UPDATE | DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE, + .source_type = DYNCFG_SOURCE_TYPE_USER, + .sync = false, + .expected = { + .enabled = false, + } + }); (void)user6; + +// dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1:user5 disable", wb, NULL, LINE_FILE_STR); +// dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2:user6 disable", wb, NULL, LINE_FILE_STR); + +// // ------------------------------------------------------------------------ +// // enable template with disabled jobs +// +// user3->expected.enabled = true; +// user5->expected.enabled = false; +// dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:sync:template1 enable", wb, NULL, LINE_FILE_STR); +// +// user4->expected.enabled = true; +// user6->expected.enabled = false; +// dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " unittest:async:template2 enable", wb, NULL, LINE_FILE_STR); + + +// // ------------------------------------------------------------------------ +// +// rc = dyncfg_unittest_run(PLUGINSD_FUNCTION_CONFIG " tree", wb, NULL); +// if(rc == HTTP_RESP_OK) +// fprintf(stderr, "%s\n", buffer_tostring(wb)); + + nd_thread_signal_cancel(thread); + nd_thread_join(thread); + dyncfg_unittest_cleanup_files(); + dictionary_destroy(dyncfg_unittest_data.nodes); + buffer_free(wb); + return __atomic_load_n(&dyncfg_unittest_data.errors, __ATOMIC_RELAXED) > 0 ? 1 : 0; +} diff --git a/src/daemon/config/dyncfg.c b/src/daemon/config/dyncfg.c new file mode 100644 index 000000000..2f484d1ed --- /dev/null +++ b/src/daemon/config/dyncfg.c @@ -0,0 +1,454 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#include "dyncfg-internals.h" +#include "dyncfg.h" + +struct dyncfg_globals dyncfg_globals = { 0 }; + +RRDHOST *dyncfg_rrdhost_by_uuid(ND_UUID *uuid) { + char uuid_str[UUID_STR_LEN]; + uuid_unparse_lower(uuid->uuid, uuid_str); + + RRDHOST *host = rrdhost_find_by_guid(uuid_str); + if(!host) + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: cannot find host with UUID '%s'", uuid_str); + + return host; +} + +RRDHOST *dyncfg_rrdhost(DYNCFG *df) { + return dyncfg_rrdhost_by_uuid(&df->host_uuid); +} + +void dyncfg_cleanup(DYNCFG *v) { + string_freez(v->dyncfg.source); + v->dyncfg.source = NULL; + + buffer_free(v->dyncfg.payload); + v->dyncfg.payload = NULL; + + string_freez(v->path); + v->path = NULL; + + string_freez(v->current.source); + v->current.source = NULL; + + string_freez(v->function); + v->function = NULL; + + string_freez(v->template); + v->template = NULL; +} + +static void dyncfg_normalize(DYNCFG *df) { + usec_t now_ut = now_realtime_usec(); + + if(!df->current.created_ut) + df->current.created_ut = now_ut; + + if(!df->current.modified_ut) + df->current.modified_ut = now_ut; +} + +static void dyncfg_delete_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + DYNCFG *df = value; + dyncfg_cleanup(df); +} + +static void dyncfg_insert_cb(const DICTIONARY_ITEM *item, void *value, void *data __maybe_unused) { + DYNCFG *df = value; + dyncfg_normalize(df); + + const char *id = dictionary_acquired_item_name(item); + char buf[strlen(id) + 20]; + snprintfz(buf, sizeof(buf), PLUGINSD_FUNCTION_CONFIG " %s", id); + df->function = string_strdupz(buf); + + if(df->type == DYNCFG_TYPE_JOB && !df->template) { + const char *last_colon = strrchr(id, ':'); + if(last_colon) + df->template = string_strndupz(id, last_colon - id); + else + nd_log(NDLS_DAEMON, NDLP_WARNING, + "DYNCFG: id '%s' is a job, but does not contain a colon to find the template", id); + } +} + +static void dyncfg_react_cb(const DICTIONARY_ITEM *item __maybe_unused, void *value, void *data __maybe_unused) { + DYNCFG *df = value; (void)df; + ; +} + +static bool dyncfg_conflict_cb(const DICTIONARY_ITEM *item __maybe_unused, void *old_value, void *new_value, void *data) { + bool *overwrite_cb_ptr = data; + bool overwrite_cb = (overwrite_cb_ptr && *overwrite_cb_ptr); + + DYNCFG *v = old_value; + DYNCFG *nv = new_value; + + size_t changes = 0; + + dyncfg_normalize(nv); + + if(!UUIDeq(v->host_uuid, nv->host_uuid)) { + SWAP(v->host_uuid, nv->host_uuid); + changes++; + } + + if(v->path != nv->path) { + SWAP(v->path, nv->path); + changes++; + } + + if(v->cmds != nv->cmds) { + SWAP(v->cmds, nv->cmds); + changes++; + } + + if(v->type != nv->type) { + SWAP(v->type, nv->type); + changes++; + } + + if(v->view_access != nv->view_access) { + SWAP(v->view_access, nv->view_access); + changes++; + } + + if(v->edit_access != nv->edit_access) { + SWAP(v->edit_access, nv->edit_access); + changes++; + } + + if(v->current.status != nv->current.status) { + SWAP(v->current.status, nv->current.status); + changes++; + } + + if (v->current.source_type != nv->current.source_type) { + SWAP(v->current.source_type, nv->current.source_type); + changes++; + } + + if (v->current.source != nv->current.source) { + SWAP(v->current.source, nv->current.source); + changes++; + } + + if(nv->current.created_ut < v->current.created_ut) { + SWAP(v->current.created_ut, nv->current.created_ut); + changes++; + } + + if(nv->current.modified_ut > v->current.modified_ut) { + SWAP(v->current.modified_ut, nv->current.modified_ut); + changes++; + } + + if(!v->execute_cb || (overwrite_cb && nv->execute_cb && (v->execute_cb != nv->execute_cb || v->execute_cb_data != nv->execute_cb_data))) { + v->sync = nv->sync, + v->execute_cb = nv->execute_cb; + v->execute_cb_data = nv->execute_cb_data; + changes++; + } + + dyncfg_cleanup(nv); + + return changes > 0; +} + +// ---------------------------------------------------------------------------- + +void dyncfg_init_low_level(bool load_saved) { + if(!dyncfg_globals.nodes) { + dyncfg_globals.nodes = dictionary_create_advanced(DICT_OPTION_FIXED_SIZE | DICT_OPTION_DONT_OVERWRITE_VALUE, NULL, sizeof(DYNCFG)); + dictionary_register_insert_callback(dyncfg_globals.nodes, dyncfg_insert_cb, NULL); + dictionary_register_react_callback(dyncfg_globals.nodes, dyncfg_react_cb, NULL); + dictionary_register_conflict_callback(dyncfg_globals.nodes, dyncfg_conflict_cb, NULL); + dictionary_register_delete_callback(dyncfg_globals.nodes, dyncfg_delete_cb, NULL); + + char path[PATH_MAX]; + snprintfz(path, sizeof(path), "%s/%s", netdata_configured_varlib_dir, "config"); + + if(mkdir(path, 0755) == -1) { + if(errno != EEXIST) + nd_log(NDLS_DAEMON, NDLP_CRIT, "DYNCFG: failed to create dynamic configuration directory '%s'", path); + } + + dyncfg_globals.dir = strdupz(path); + + if(load_saved) + dyncfg_load_all(); + } +} + +// ---------------------------------------------------------------------------- + +const DICTIONARY_ITEM *dyncfg_add_internal(RRDHOST *host, const char *id, const char *path, + DYNCFG_STATUS status, DYNCFG_TYPE type, DYNCFG_SOURCE_TYPE source_type, + const char *source, DYNCFG_CMDS cmds, + usec_t created_ut, usec_t modified_ut, + bool sync, HTTP_ACCESS view_access, HTTP_ACCESS edit_access, + rrd_function_execute_cb_t execute_cb, void *execute_cb_data, + bool overwrite_cb) { + DYNCFG tmp = { + .host_uuid = uuid2UUID(host->host_uuid), + .path = string_strdupz(path), + .cmds = cmds, + .type = type, + .view_access = view_access, + .edit_access = edit_access, + .current = { + .status = status, + .source_type = source_type, + .source = string_strdupz(source), + .created_ut = created_ut, + .modified_ut = modified_ut, + }, + .sync = sync, + .dyncfg = { 0 }, + .execute_cb = execute_cb, + .execute_cb_data = execute_cb_data, + }; + + return dictionary_set_and_acquire_item_advanced(dyncfg_globals.nodes, id, -1, &tmp, sizeof(tmp), &overwrite_cb); +} + +static void dyncfg_send_updates(const char *id) { + const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item_advanced(dyncfg_globals.nodes, id, -1); + if(!item) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: asked to update plugin for configuration '%s', but it is not found.", id); + return; + } + + DYNCFG *df = dictionary_acquired_item_value(item); + + if(df->type == DYNCFG_TYPE_SINGLE || df->type == DYNCFG_TYPE_JOB) { + if (df->cmds & DYNCFG_CMD_UPDATE && df->dyncfg.source_type == DYNCFG_SOURCE_TYPE_DYNCFG && df->dyncfg.payload && buffer_strlen(df->dyncfg.payload)) + dyncfg_echo_update(item, df, id); + } + else if(df->type == DYNCFG_TYPE_TEMPLATE && (df->cmds & DYNCFG_CMD_ADD)) { + STRING *template = string_strdupz(id); + + size_t len = strlen(id); + DYNCFG *df_job; + dfe_start_reentrant(dyncfg_globals.nodes, df_job) { + const char *id_template = df_job_dfe.name; + if(df_job->type == DYNCFG_TYPE_JOB && // it is a job + df_job->current.source_type == DYNCFG_SOURCE_TYPE_DYNCFG && // it is dynamically configured + df_job->template == template && // it has the same template name + strncmp(id_template, id, len) == 0 && // the template name matches (redundant) + id_template[len] == ':' && // immediately after the template there is ':' + id_template[len + 1]) { // and there is something else after the ':' + dyncfg_echo_add(item, df_job_dfe.item, df, df_job, id, &id_template[len + 1]); + } + } + dfe_done(df_job); + + string_freez(template); + } + + dictionary_acquired_item_release(dyncfg_globals.nodes, item); +} + +bool dyncfg_is_user_disabled(const char *id) { + const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(dyncfg_globals.nodes, id); + if(!item) + return false; + + DYNCFG *df = dictionary_acquired_item_value(item); + bool ret = df->dyncfg.user_disabled; + dictionary_acquired_item_release(dyncfg_globals.nodes, item); + return ret; +} + +bool dyncfg_job_has_registered_template(const char *id) { + char buf[strlen(id) + 1]; + memcpy(buf, id, sizeof(buf)); + char *colon = strrchr(buf, ':'); + if(!colon) + return false; + + *colon = '\0'; + const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(dyncfg_globals.nodes, buf); + if(!item) + return false; + + DYNCFG *df = dictionary_acquired_item_value(item); + bool ret = df->type == DYNCFG_TYPE_TEMPLATE; + + dictionary_acquired_item_release(dyncfg_globals.nodes, item); + return ret; +} + +bool dyncfg_add_low_level(RRDHOST *host, const char *id, const char *path, + DYNCFG_STATUS status, DYNCFG_TYPE type, DYNCFG_SOURCE_TYPE source_type, const char *source, + DYNCFG_CMDS cmds, usec_t created_ut, usec_t modified_ut, bool sync, + HTTP_ACCESS view_access, HTTP_ACCESS edit_access, + rrd_function_execute_cb_t execute_cb, void *execute_cb_data) { + + if(view_access == HTTP_ACCESS_NONE) + view_access = HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_VIEW_AGENT_CONFIG; + + if(edit_access == HTTP_ACCESS_NONE) + edit_access = HTTP_ACCESS_SIGNED_ID | HTTP_ACCESS_SAME_SPACE | HTTP_ACCESS_EDIT_AGENT_CONFIG | HTTP_ACCESS_COMMERCIAL_SPACE; + + if(!dyncfg_is_valid_id(id)) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: id '%s' is invalid. Ignoring dynamic configuration for it.", id); + return false; + } + + if(type == DYNCFG_TYPE_JOB && !dyncfg_job_has_registered_template(id)) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: job id '%s' does not have a registered template. Ignoring dynamic configuration for it.", id); + return false; + } + + DYNCFG_CMDS old_cmds = cmds; + + // all configurations support schema + cmds |= DYNCFG_CMD_SCHEMA; + + // if there is either enable or disable, both are supported + if(cmds & (DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE)) + cmds |= DYNCFG_CMD_ENABLE | DYNCFG_CMD_DISABLE; + + // add + if(type == DYNCFG_TYPE_TEMPLATE) { + // templates must always support "add" + cmds |= DYNCFG_CMD_ADD; + } + else { + // only templates can have "add" + cmds &= ~DYNCFG_CMD_ADD; + } + + // remove + if(source_type != DYNCFG_SOURCE_TYPE_DYNCFG || type != DYNCFG_TYPE_JOB) { + // remove is only available for dyncfg jobs + cmds &= ~DYNCFG_CMD_REMOVE; + } + + // data + if(type == DYNCFG_TYPE_TEMPLATE) { + // templates do not have data + cmds &= ~(DYNCFG_CMD_GET | DYNCFG_CMD_UPDATE); + } + + if(cmds != old_cmds) { + CLEAN_BUFFER *t = buffer_create(1024, NULL); + buffer_sprintf(t, "DYNCFG: id '%s' was declared with cmds: ", id); + dyncfg_cmds2buffer(old_cmds, t); + buffer_strcat(t, ", but they have sanitized to: "); + dyncfg_cmds2buffer(cmds, t); + nd_log(NDLS_DAEMON, NDLP_NOTICE, "%s", buffer_tostring(t)); + } + + const DICTIONARY_ITEM *item = dyncfg_add_internal(host, id, path, status, type, source_type, source, cmds, + created_ut, modified_ut, sync, view_access, edit_access, + execute_cb, execute_cb_data, true); + DYNCFG *df = dictionary_acquired_item_value(item); + +// if(df->source_type == DYNCFG_SOURCE_TYPE_DYNCFG && !df->saves) +// nd_log(NDLS_DAEMON, NDLP_WARNING, "DYNCFG: configuration '%s' is created with source type dyncfg, but we don't have a saved configuration for it", id); + + rrd_collector_started(); + rrd_function_add( + host, + NULL, + string2str(df->function), + 120, + 1000, + "Dynamic configuration", + "config", + (view_access & edit_access), + sync, + dyncfg_function_intercept_cb, + NULL); + + if(df->type != DYNCFG_TYPE_TEMPLATE && (df->cmds & (DYNCFG_CMD_ENABLE|DYNCFG_CMD_DISABLE))) { + DYNCFG_CMDS status_to_send_to_plugin = + (df->dyncfg.user_disabled || df->current.status == DYNCFG_STATUS_DISABLED) ? DYNCFG_CMD_DISABLE : DYNCFG_CMD_ENABLE; + + if (status_to_send_to_plugin == DYNCFG_CMD_ENABLE && dyncfg_is_user_disabled(string2str(df->template))) + status_to_send_to_plugin = DYNCFG_CMD_DISABLE; + + dyncfg_echo(item, df, id, status_to_send_to_plugin); + } + + if(!(df->current.source_type == DYNCFG_SOURCE_TYPE_DYNCFG && df->type == DYNCFG_TYPE_JOB)) + dyncfg_send_updates(id); + + dictionary_acquired_item_release(dyncfg_globals.nodes, item); + + return true; +} + +void dyncfg_del_low_level(RRDHOST *host, const char *id) { + if(!dyncfg_is_valid_id(id)) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: id '%s' is invalid. Ignoring dynamic configuration for it.", id); + return; + } + + const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(dyncfg_globals.nodes, id); + if(item) { + DYNCFG *df = dictionary_acquired_item_value(item); + rrd_function_del(host, NULL, string2str(df->function)); + + bool garbage_collect = false; + if(df->dyncfg.saves == 0) { + dictionary_del(dyncfg_globals.nodes, id); + garbage_collect = true; + } + + dictionary_acquired_item_release(dyncfg_globals.nodes, item); + + if(garbage_collect) + dictionary_garbage_collect(dyncfg_globals.nodes); + } +} + +void dyncfg_status_low_level(RRDHOST *host __maybe_unused, const char *id, DYNCFG_STATUS status) { + if(!dyncfg_is_valid_id(id)) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: id '%s' is invalid. Ignoring dynamic configuration for it.", id); + return; + } + + if(status == DYNCFG_STATUS_NONE) { + nd_log(NDLS_DAEMON, NDLP_ERR, "DYNCFG: status provided to id '%s' is invalid. Ignoring it.", id); + return; + } + + const DICTIONARY_ITEM *item = dictionary_get_and_acquire_item(dyncfg_globals.nodes, id); + if(item) { + DYNCFG *df = dictionary_acquired_item_value(item); + df->current.status = status; + dictionary_acquired_item_release(dyncfg_globals.nodes, item); + } +} + +// ---------------------------------------------------------------------------- + +void dyncfg_add_streaming(BUFFER *wb) { + // when sending config functions to parents, we send only 1 function called 'config'; + // the parent will send the command to the child, and the child will validate it; + // this way the parent does not need to receive removals of config functions; + + buffer_sprintf(wb + , PLUGINSD_KEYWORD_FUNCTION " GLOBAL " PLUGINSD_FUNCTION_CONFIG " %d \"%s\" \"%s\" "HTTP_ACCESS_FORMAT" %d\n" + , 120 + , "Dynamic configuration" + , "config" + , (unsigned)HTTP_ACCESS_ANONYMOUS_DATA + , 1000 + ); +} + +bool dyncfg_available_for_rrdhost(RRDHOST *host) { + if(host == localhost || rrdhost_option_check(host, RRDHOST_OPTION_VIRTUAL_HOST)) + return true; + + return rrd_function_available(host, PLUGINSD_FUNCTION_CONFIG); +} + +// ---------------------------------------------------------------------------- + diff --git a/src/daemon/config/dyncfg.h b/src/daemon/config/dyncfg.h new file mode 100644 index 000000000..539eddbfb --- /dev/null +++ b/src/daemon/config/dyncfg.h @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-3.0-or-later + +#ifndef NETDATA_DYNCFG_H +#define NETDATA_DYNCFG_H + +#include "../common.h" +#include "database/rrd.h" +#include "database/rrdfunctions.h" + +void dyncfg_add_streaming(BUFFER *wb); +bool dyncfg_available_for_rrdhost(RRDHOST *host); +void dyncfg_host_init(RRDHOST *host); + +// low-level API used by plugins.d and high-level API +bool dyncfg_add_low_level(RRDHOST *host, const char *id, const char *path, DYNCFG_STATUS status, DYNCFG_TYPE type, + DYNCFG_SOURCE_TYPE source_type, const char *source, DYNCFG_CMDS cmds, + usec_t created_ut, usec_t modified_ut, bool sync, + HTTP_ACCESS view_access, HTTP_ACCESS edit_access, + rrd_function_execute_cb_t execute_cb, void *execute_cb_data); +void dyncfg_del_low_level(RRDHOST *host, const char *id); +void dyncfg_status_low_level(RRDHOST *host, const char *id, DYNCFG_STATUS status); +void dyncfg_init_low_level(bool load_saved); + +// high-level API for internal modules +bool dyncfg_add(RRDHOST *host, const char *id, const char *path, DYNCFG_STATUS status, DYNCFG_TYPE type, + DYNCFG_SOURCE_TYPE source_type, const char *source, DYNCFG_CMDS cmds, + HTTP_ACCESS view_access, HTTP_ACCESS edit_access, + dyncfg_cb_t cb, void *data); +void dyncfg_del(RRDHOST *host, const char *id); +void dyncfg_status(RRDHOST *host, const char *id, DYNCFG_STATUS status); + +void dyncfg_init(bool load_saved); + +#endif //NETDATA_DYNCFG_H |