summaryrefslogtreecommitdiffstats
path: root/daemon
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2022-08-12 07:26:17 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2022-08-12 07:26:17 +0000
commit7877a98bd9c00db5e81dd2f8c734cba2bab20be7 (patch)
treed18b767250f7c7ced9b8abe2ece784ac1fe24d3e /daemon
parentReleasing debian version 1.35.1-2. (diff)
downloadnetdata-7877a98bd9c00db5e81dd2f8c734cba2bab20be7.tar.xz
netdata-7877a98bd9c00db5e81dd2f8c734cba2bab20be7.zip
Merging upstream version 1.36.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'daemon')
-rw-r--r--daemon/README.md8
-rw-r--r--daemon/analytics.c27
-rw-r--r--daemon/buildinfo.c15
-rw-r--r--daemon/commands.c12
-rw-r--r--daemon/common.h3
-rw-r--r--daemon/config/README.md90
-rw-r--r--daemon/get-kubernetes-labels.sh.in17
-rw-r--r--daemon/global_statistics.c104
-rw-r--r--daemon/main.c202
-rw-r--r--daemon/static_threads.c12
-rwxr-xr-xdaemon/system-info.sh10
-rw-r--r--daemon/unit_test.c581
-rw-r--r--daemon/unit_test.h1
13 files changed, 752 insertions, 330 deletions
diff --git a/daemon/README.md b/daemon/README.md
index 44abfa8e9..3ebb405b2 100644
--- a/daemon/README.md
+++ b/daemon/README.md
@@ -180,6 +180,14 @@ The command line options of the Netdata 1.10.0 version are the following:
-W set section option value
set netdata.conf option from the command line.
+ -W buildinfo Print the version, the configure options,
+ a list of optional features, and whether they
+ are enabled or not.
+
+ -W buildinfojson Print the version, the configure options,
+ a list of optional features, and whether they
+ are enabled or not, in JSON format.
+
-W simple-pattern pattern string
Check if string matches pattern and exit.
diff --git a/daemon/analytics.c b/daemon/analytics.c
index 6c02561d0..370818b8a 100644
--- a/daemon/analytics.c
+++ b/daemon/analytics.c
@@ -7,7 +7,7 @@ struct analytics_data analytics_data;
extern void analytics_exporting_connectors (BUFFER *b);
extern void analytics_exporting_connectors_ssl (BUFFER *b);
extern void analytics_build_info (BUFFER *b);
-extern int aclk_connected, aclk_use_new_cloud_arch;
+extern int aclk_connected;
struct collector {
char *plugin;
@@ -382,7 +382,7 @@ void analytics_https(void)
BUFFER *b = buffer_create(30);
#ifdef ENABLE_HTTPS
analytics_exporting_connectors_ssl(b);
- buffer_strcat(b, netdata_client_ctx && localhost->ssl.flags == NETDATA_SSL_HANDSHAKE_COMPLETE && localhost->rrdpush_sender_connected == 1 ? "streaming|" : "|");
+ buffer_strcat(b, netdata_client_ctx && localhost->ssl.flags == NETDATA_SSL_HANDSHAKE_COMPLETE && __atomic_load_n(&localhost->rrdpush_sender_connected, __ATOMIC_SEQ_CST) ? "streaming|" : "|");
buffer_strcat(b, netdata_srv_ctx ? "web" : "");
#else
buffer_strcat(b, "||");
@@ -499,12 +499,7 @@ void analytics_aclk(void)
#ifdef ENABLE_ACLK
if (aclk_connected) {
analytics_set_data(&analytics_data.netdata_host_aclk_available, "true");
-#ifdef ENABLE_NEW_CLOUD_PROTOCOL
- if (aclk_use_new_cloud_arch)
- analytics_set_data_str(&analytics_data.netdata_host_aclk_protocol, "New");
- else
-#endif
- analytics_set_data_str(&analytics_data.netdata_host_aclk_protocol, "Legacy");
+ analytics_set_data_str(&analytics_data.netdata_host_aclk_protocol, "New");
}
else
#endif
@@ -546,7 +541,7 @@ void analytics_gather_mutable_meta_data(void)
analytics_set_data(
&analytics_data.netdata_config_is_parent, (localhost->next || configured_as_parent()) ? "true" : "false");
- char *claim_id = is_agent_claimed();
+ char *claim_id = get_agent_claimid();
analytics_set_data(&analytics_data.netdata_host_agent_claimed, claim_id ? "true" : "false");
freez(claim_id);
@@ -847,6 +842,20 @@ void set_global_environment()
setenv("HOME", verify_required_directory(netdata_configured_home_dir), 1);
setenv("NETDATA_HOST_PREFIX", netdata_configured_host_prefix, 1);
+ {
+ BUFFER *user_plugins_dirs = buffer_create(FILENAME_MAX);
+
+ for (size_t i = 1; i < PLUGINSD_MAX_DIRECTORIES && plugin_directories[i]; i++) {
+ if (i > 1)
+ buffer_strcat(user_plugins_dirs, " ");
+ buffer_strcat(user_plugins_dirs, plugin_directories[i]);
+ }
+
+ setenv("NETDATA_USER_PLUGINS_DIRS", buffer_tostring(user_plugins_dirs), 1);
+
+ buffer_free(user_plugins_dirs);
+ }
+
analytics_data.data_length = 0;
analytics_set_data(&analytics_data.netdata_config_stream_enabled, "null");
analytics_set_data(&analytics_data.netdata_config_memory_mode, "null");
diff --git a/daemon/buildinfo.c b/daemon/buildinfo.c
index 86c586afc..0a64547af 100644
--- a/daemon/buildinfo.c
+++ b/daemon/buildinfo.c
@@ -20,12 +20,6 @@
#endif
#endif
-#ifdef ENABLE_NEW_CLOUD_PROTOCOL
-#define NEW_CLOUD_PROTO 1
-#else
-#define NEW_CLOUD_PROTO 0
-#endif
-
#ifdef ENABLE_DBENGINE
#define FEAT_DBENGINE 1
#else
@@ -273,7 +267,7 @@ void print_build_info(void) {
printf(" Native HTTPS: %s\n", FEAT_YES_NO(FEAT_NATIVE_HTTPS));
printf(" Netdata Cloud: %s %s\n", FEAT_YES_NO(FEAT_CLOUD), FEAT_CLOUD_MSG);
printf(" ACLK Next Generation: %s\n", FEAT_YES_NO(FEAT_CLOUD));
- printf(" ACLK-NG New Cloud Protocol: %s\n", FEAT_YES_NO(NEW_CLOUD_PROTO));
+ printf(" ACLK-NG New Cloud Protocol: %s\n", FEAT_YES_NO(1));
printf(" ACLK Legacy: %s\n", FEAT_YES_NO(0));
printf(" TLS Host Verification: %s\n", FEAT_YES_NO(FEAT_TLS_HOST_VERIFY));
printf(" Machine Learning: %s\n", FEAT_YES_NO(FEAT_ML));
@@ -325,7 +319,7 @@ void print_build_info_json(void) {
printf(" \"cloud-disabled\": false,\n");
#endif
printf(" \"aclk-ng\": %s,\n", FEAT_JSON_BOOL(FEAT_CLOUD));
- printf(" \"aclk-ng-new-cloud-proto\": %s,\n", FEAT_JSON_BOOL(NEW_CLOUD_PROTO));
+ printf(" \"aclk-ng-new-cloud-proto\": %s,\n", FEAT_JSON_BOOL(1));
printf(" \"aclk-legacy\": %s,\n", FEAT_JSON_BOOL(0));
printf(" \"tls-host-verify\": %s,\n", FEAT_JSON_BOOL(FEAT_TLS_HOST_VERIFY));
@@ -383,10 +377,7 @@ void analytics_build_info(BUFFER *b) {
add_to_bi(b, "Native HTTPS");
#endif
#ifdef ENABLE_ACLK
- add_to_bi(b, "Netdata Cloud|ACLK Next Generation");
-#endif
-#ifdef ENABLE_NEW_CLOUD_PROTOCOL
- add_to_bi(b, "New Cloud Protocol Support");
+ add_to_bi(b, "Netdata Cloud|ACLK Next Generation|New Cloud Protocol Support");
#endif
#if (FEAT_TLS_HOST_VERIFY!=0)
add_to_bi(b, "TLS Host Verification");
diff --git a/daemon/commands.c b/daemon/commands.c
index 6efc37c96..13d8dbd40 100644
--- a/daemon/commands.c
+++ b/daemon/commands.c
@@ -217,17 +217,7 @@ static cmd_status_t cmd_reload_labels_execute(char *args, char **message)
reload_host_labels();
BUFFER *wb = buffer_create(10);
-
- rrdhost_rdlock(localhost);
- netdata_rwlock_rdlock(&localhost->labels.labels_rwlock);
- struct label *l = localhost->labels.head;
- while (l != NULL) {
- buffer_sprintf(wb,"Label [source id=%s]: \"%s\" -> \"%s\"\n", translate_label_source(l->label_source), l->key, l->value);
- l = l->next;
- }
- netdata_rwlock_unlock(&localhost->labels.labels_rwlock);
- rrdhost_unlock(localhost);
-
+ rrdlabels_log_to_buffer(localhost->host_labels, wb);
(*message)=strdupz(buffer_tostring(wb));
buffer_free(wb);
diff --git a/daemon/common.h b/daemon/common.h
index da96e2ac1..2a45ffe70 100644
--- a/daemon/common.h
+++ b/daemon/common.h
@@ -84,9 +84,6 @@
#include "commands.h"
#include "analytics.h"
-// metric correlations
-#include "database/metric_correlations.h"
-
// global netdata daemon variables
extern char *netdata_configured_hostname;
extern char *netdata_configured_user_config_dir;
diff --git a/daemon/config/README.md b/daemon/config/README.md
index 72f688543..7b4d27ecf 100644
--- a/daemon/config/README.md
+++ b/daemon/config/README.md
@@ -26,20 +26,21 @@ the [web server access lists](/web/server/README.md#access-lists).
`netdata.conf` has sections stated with `[section]`. You will see the following sections:
1. `[global]` to [configure](#global-section-options) the [Netdata daemon](/daemon/README.md).
-2. `[directories]` to [configure](#directories-section-options) the directories used by Netdata.
-3. `[logs]` to [configure](#logs-section-options) the Netdata logging.
-4. `[environment variables]` to [configure](#environment-variables-section-options) the environment variables used
+2. `[db]` to [configure](#db-section-options) the database of Netdata.
+3. `[directories]` to [configure](#directories-section-options) the directories used by Netdata.
+4. `[logs]` to [configure](#logs-section-options) the Netdata logging.
+5. `[environment variables]` to [configure](#environment-variables-section-options) the environment variables used
Netdata.
-5. `[sqlite]` to [configure](#sqlite-section-options) the [Netdata daemon](/daemon/README.md) SQLite settings.
-6. `[ml]` to configure settings for [machine learning](/ml/README.md).
-7. `[health]` to [configure](#health-section-options) general settings for [health monitoring](/health/README.md).
-8. `[web]` to [configure the web server](/web/server/README.md).
-9. `[registry]` for the [Netdata registry](/registry/README.md).
-10. `[global statistics]` for the [Netdata registry](/registry/README.md).
-11. `[statsd]` for the general settings of the [stats.d.plugin](/collectors/statsd.plugin/README.md).
-12. `[plugins]` to [configure](#plugins-section-options) which [collectors](/collectors/README.md) to use and PATH
+6. `[sqlite]` to [configure](#sqlite-section-options) the [Netdata daemon](/daemon/README.md) SQLite settings.
+7. `[ml]` to configure settings for [machine learning](/ml/README.md).
+8. `[health]` to [configure](#health-section-options) general settings for [health monitoring](/health/README.md).
+9. `[web]` to [configure the web server](/web/server/README.md).
+10. `[registry]` for the [Netdata registry](/registry/README.md).
+11. `[global statistics]` for the [Netdata registry](/registry/README.md).
+12. `[statsd]` for the general settings of the [stats.d.plugin](/collectors/statsd.plugin/README.md).
+13. `[plugins]` to [configure](#plugins-section-options) which [collectors](/collectors/README.md) to use and PATH
settings.
-13. `[plugin:NAME]` sections for each collector plugin, under the
+14. `[plugin:NAME]` sections for each collector plugin, under the
comment [Per plugin configuration](#per-plugin-configuration).
The configuration file is a `name = value` dictionary. Netdata will not complain if you set options unknown to it. When
@@ -67,30 +68,47 @@ Please note that your data history will be lost if you have modified `history` p
### [global] section options
-| setting | default | info |
-|:-------------------------------------:|:------------------------------------------------------------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
-| process scheduling policy | `keep` | See [Netdata process scheduling policy](/daemon/README.md#netdata-process-scheduling-policy) |
-| OOM score | `0` | |
-| glibc malloc arena max for plugins | `1` | See [Virtual memory](/daemon/README.md#virtual-memory). |
-| glibc malloc arena max for Netdata | `1` | See [Virtual memory](/daemon/README.md#virtual-memory). |
-| hostname | auto-detected | The hostname of the computer running Netdata. |
-| history | `3996` | Used with `memory mode = save/map/ram/alloc`, not the default `memory mode = dbengine`. This number reflects the number of entries the `netdata` daemon will by default keep in memory for each chart dimension. This setting can also be configured per chart. Check [Memory Requirements](/database/README.md) for more information. |
-| update every | `1` | The frequency in seconds, for data collection. For more information see the [performance guide](/docs/guides/configure/performance.md). |
-| memory mode | `dbengine` | `dbengine`: The default for long-term metrics storage with efficient RAM and disk usage. Can be extended with `page cache size` and `dbengine disk space`. <br />`save`: Netdata will save its round robin database on exit and load it on startup. <br />`map`: Cache files will be updated in real-time. Not ideal for systems with high load or slow disks (check `man mmap`). <br />`ram`: The round-robin database will be temporary and it will be lost when Netdata exits. <br />`none`: Disables the database at this host, and disables health monitoring entirely, as that requires a database of metrics. |
-| page cache size | 32 | Determines the amount of RAM in MiB that is dedicated to caching Netdata metric values. |
-| dbengine disk space | 256 | Determines the amount of disk space in MiB that is dedicated to storing Netdata metric values and all related metadata describing them. |
-| dbengine multihost disk space | 256 | Same functionality as `dbengine disk space`, but includes support for storing metrics streamed to a parent node by its children. Can be used in single-node environments as well. |
-| host access prefix | | This is used in docker environments where /proc, /sys, etc have to be accessed via another path. You may also have to set SYS_PTRACE capability on the docker for this work. Check [issue 43](https://github.com/netdata/netdata/issues/43). |
-| memory deduplication (ksm) | `yes` | When set to `yes`, Netdata will offer its in-memory round robin database to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](/database/README.md#ksm) |
-| timezone | auto-detected | The timezone retrieved from the environment variable |
-| run as user | `netdata` | The user Netdata will run as. |
-| pthread stack size | auto-detected | |
-| cleanup obsolete charts after seconds | `3600` | See [monitoring ephemeral containers](/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions |
-| gap when lost iterations above | `1` | |
-| cleanup orphan hosts after seconds | `3600` | How long to wait until automatically removing from the DB a remote Netdata host (child) that is no longer sending data. |
-| delete obsolete charts files | `yes` | See [monitoring ephemeral containers](/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also affects the deletion of files for obsolete dimensions |
-| delete orphan hosts files | `yes` | Set to `no` to disable non-responsive host removal. |
-| enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero. |
+| setting | default | info |
+|:-------------------------------------:|:-------------:|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| process scheduling policy | `keep` | See [Netdata process scheduling policy](/daemon/README.md#netdata-process-scheduling-policy) |
+| OOM score | `0` | |
+| glibc malloc arena max for plugins | `1` | See [Virtual memory](/daemon/README.md#virtual-memory). |
+| glibc malloc arena max for Netdata | `1` | See [Virtual memory](/daemon/README.md#virtual-memory). |
+| hostname | auto-detected | The hostname of the computer running Netdata. |
+| host access prefix | empty | This is used in docker environments where /proc, /sys, etc have to be accessed via another path. You may also have to set SYS_PTRACE capability on the docker for this work. Check [issue 43](https://github.com/netdata/netdata/issues/43). |
+| timezone | auto-detected | The timezone retrieved from the environment variable |
+| run as user | `netdata` | The user Netdata will run as. |
+| pthread stack size | auto-detected | |
+
+### [db] section options
+
+| setting | default | info |
+|:---------------------------------------------:|:----------:|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
+| mode | `dbengine` | `dbengine`: The default for long-term metrics storage with efficient RAM and disk usage. Can be extended with `dbengine page cache size MB` and `dbengine disk space MB`. <br />`save`: Netdata will save its round robin database on exit and load it on startup. <br />`map`: Cache files will be updated in real-time. Not ideal for systems with high load or slow disks (check `man mmap`). <br />`ram`: The round-robin database will be temporary and it will be lost when Netdata exits. <br />`none`: Disables the database at this host, and disables health monitoring entirely, as that requires a database of metrics. |
+| retention | `3600` | Used with `mode = save/map/ram/alloc`, not the default `mode = dbengine`. This number reflects the number of entries the `netdata` daemon will by default keep in memory for each chart dimension. Check [Memory Requirements](/database/README.md) for more information. |
+| storage tiers | `1` | The number of storage tiers you want to have in your dbengine. Check the tiering mechanism in the [dbengine's reference](/database/engine/README.md#tiering). You can have up to 5 tiers of data (including the _Tier 0_). This number ranges between 1 and 5. |
+| dbengine page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated to caching for _Tier 0_ Netdata metric values. |
+| dbengine tier **`N`** page cache size MB | `32` | Determines the amount of RAM in MiB that is dedicated for caching Netdata metric values of the **`N`** tier. <br /> `N belongs to [1..4]` ||
+ | dbengine disk space MB | `256` | Determines the amount of disk space in MiB that is dedicated to storing _Tier 0_ Netdata metric values and all related metadata describing them. This option is available **only for legacy configuration** (`Agent v1.23.2 and prior`). |
+| dbengine multihost disk space MB | `256` | Same functionality as `dbengine disk space MB`, but includes support for storing metrics streamed to a parent node by its children. Can be used in single-node environments as well. This setting is only for _Tier 0_ metrics. |
+| dbengine tier **`N`** multihost disk space MB | `256` | Same functionality as `dbengine multihost disk space MB`, but stores metrics of the **`N`** tier (both parent node and its children). Can be used in single-node environments as well. <br /> `N belongs to [1..4]` |
+| update every | `1` | The frequency in seconds, for data collection. For more information see the [performance guide](/docs/guides/configure/performance.md). These metrics stored as _Tier 0_ data. Explore the tiering mechanism in the [dbengine's reference](/database/engine/README.md#tiering). |
+| dbengine tier **`N`** update every iterations | `60` | The down sampling value of each tier from the previous one. For each Tier, the greater by one Tier has N (equal to 60 by default) less data points of any metric it collects. This setting can take values from `2` up to `255`. <br /> `N belongs to [1..4]` |
+| dbengine tier **`N`** back fill | `New` | Specifies the strategy of recreating missing data on each Tier from the exact lower Tier. <br /> `New`: Sees the latest point on each Tier and save new points to it only if the exact lower Tier has available points for it's observation window (`dbengine tier N update every iterations` window). <br /> `none`: No back filling is applied. <br /> `N belongs to [1..4]` |
+| memory deduplication (ksm) | `yes` | When set to `yes`, Netdata will offer its in-memory round robin database and the dbengine page cache to kernel same page merging (KSM) for deduplication. For more information check [Memory Deduplication - Kernel Same Page Merging - KSM](/database/README.md#ksm) |
+| cleanup obsolete charts after secs | `3600` | See [monitoring ephemeral containers](/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also sets the timeout for cleaning up obsolete dimensions |
+| gap when lost iterations above | `1` | |
+| cleanup orphan hosts after secs | `3600` | How long to wait until automatically removing from the DB a remote Netdata host (child) that is no longer sending data. |
+| delete obsolete charts files | `yes` | See [monitoring ephemeral containers](/collectors/cgroups.plugin/README.md#monitoring-ephemeral-containers), also affects the deletion of files for obsolete dimensions |
+| delete orphan hosts files | `yes` | Set to `no` to disable non-responsive host removal. |
+| enable zero metrics | `no` | Set to `yes` to show charts when all their metrics are zero. |
+
+:::info
+
+The multiplication of all the **enabled** tiers `dbengine tier N update every iterations` values must be less than `65535`.
+
+:::
+
### [directories] section options
diff --git a/daemon/get-kubernetes-labels.sh.in b/daemon/get-kubernetes-labels.sh.in
index 7e11ba3dd..bc82c2aee 100644
--- a/daemon/get-kubernetes-labels.sh.in
+++ b/daemon/get-kubernetes-labels.sh.in
@@ -1,4 +1,5 @@
#!/usr/bin/env bash
+me="$(basename "${0}")"
# Checks if netdata is running in a kubernetes pod and fetches:
# - pod's labels
@@ -8,8 +9,8 @@ if [ -z "${KUBERNETES_SERVICE_HOST}" ] || [ -z "${KUBERNETES_PORT_443_TCP_PORT}"
exit 0
fi
-if ! command -v jq > /dev/null 2>&1; then
- echo "jq command not available. Please install jq to get host labels for kubernetes pods."
+if ! command -v jq >/dev/null 2>&1; then
+ echo >&2 "${me}: jq command not available. Please install jq to get host labels for kubernetes pods."
exit 1
fi
@@ -18,24 +19,24 @@ HEADER="Authorization: Bearer $TOKEN"
HOST="$KUBERNETES_SERVICE_HOST:$KUBERNETES_PORT_443_TCP_PORT"
URL="https://$HOST/api/v1/namespaces/$MY_POD_NAMESPACE/pods/$MY_POD_NAME"
-if ! POD_DATA=$(curl -sSk -H "$HEADER" "$URL" 2>&1); then
- echo "error on curl '${URL}': ${POD_DATA}."
+if ! POD_DATA=$(curl --fail -sSk -H "$HEADER" "$URL" 2>&1); then
+ echo >&2 "${me}: error on curl '${URL}': ${POD_DATA}."
exit 1
fi
URL="https://$HOST/api/v1/namespaces/kube-system"
-if ! KUBE_SYSTEM_NS_DATA=$(curl -sSk -H "$HEADER" "$URL" 2>&1); then
- echo "error on curl '${URL}': ${KUBE_SYSTEM_NS_DATA}."
+if ! KUBE_SYSTEM_NS_DATA=$(curl --fail -sSk -H "$HEADER" "$URL" 2>&1); then
+ echo >&2 "${me}: error on curl '${URL}': ${KUBE_SYSTEM_NS_DATA}."
exit 1
fi
if ! POD_LABELS=$(jq -r '.metadata.labels' <<< "$POD_DATA" | grep ':' | tr -d '," ' 2>&1); then
- echo "error on 'jq' parse pod data: ${POD_LABELS}."
+ echo >&2 "${me}: error on 'jq' parse pod data: ${POD_LABELS}."
exit 1
fi
if ! KUBE_SYSTEM_NS_UID=$(jq -r '.metadata.uid' <<< "$KUBE_SYSTEM_NS_DATA" 2>&1); then
- echo "error on 'jq' parse kube_system_ns: ${KUBE_SYSTEM_NS_UID}."
+ echo >&2 "${me}: error on 'jq' parse kube_system_ns: ${KUBE_SYSTEM_NS_UID}."
exit 1
fi
diff --git a/daemon/global_statistics.c b/daemon/global_statistics.c
index 98bf0bf9a..249369519 100644
--- a/daemon/global_statistics.c
+++ b/daemon/global_statistics.c
@@ -46,9 +46,9 @@ static struct global_statistics {
};
void rrdr_query_completed(uint64_t db_points_read, uint64_t result_points_generated) {
- __atomic_fetch_add(&global_statistics.rrdr_queries_made, 1, __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&global_statistics.rrdr_db_points_read, db_points_read, __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&global_statistics.rrdr_result_points_generated, result_points_generated, __ATOMIC_SEQ_CST);
+ __atomic_fetch_add(&global_statistics.rrdr_queries_made, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&global_statistics.rrdr_db_points_read, db_points_read, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&global_statistics.rrdr_result_points_generated, result_points_generated, __ATOMIC_RELAXED);
}
void finished_web_request_statistics(uint64_t dt,
@@ -58,45 +58,44 @@ void finished_web_request_statistics(uint64_t dt,
uint64_t compressed_content_size) {
uint64_t old_web_usec_max = global_statistics.web_usec_max;
while(dt > old_web_usec_max)
- __atomic_compare_exchange(&global_statistics.web_usec_max, &old_web_usec_max, &dt, 1, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
-
- __atomic_fetch_add(&global_statistics.web_requests, 1, __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&global_statistics.web_usec, dt, __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&global_statistics.bytes_received, bytes_received, __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&global_statistics.bytes_sent, bytes_sent, __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&global_statistics.content_size, content_size, __ATOMIC_SEQ_CST);
- __atomic_fetch_add(&global_statistics.compressed_content_size, compressed_content_size, __ATOMIC_SEQ_CST);
+ __atomic_compare_exchange(&global_statistics.web_usec_max, &old_web_usec_max, &dt, 1, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+
+ __atomic_fetch_add(&global_statistics.web_requests, 1, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&global_statistics.web_usec, dt, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&global_statistics.bytes_received, bytes_received, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&global_statistics.bytes_sent, bytes_sent, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&global_statistics.content_size, content_size, __ATOMIC_RELAXED);
+ __atomic_fetch_add(&global_statistics.compressed_content_size, compressed_content_size, __ATOMIC_RELAXED);
}
uint64_t web_client_connected(void) {
- __atomic_fetch_add(&global_statistics.connected_clients, 1, __ATOMIC_SEQ_CST);
- return __atomic_fetch_add(&global_statistics.web_client_count, 1, __ATOMIC_SEQ_CST);
+ __atomic_fetch_add(&global_statistics.connected_clients, 1, __ATOMIC_RELAXED);
+ return __atomic_fetch_add(&global_statistics.web_client_count, 1, __ATOMIC_RELAXED);
}
void web_client_disconnected(void) {
- __atomic_fetch_sub(&global_statistics.connected_clients, 1, __ATOMIC_SEQ_CST);
+ __atomic_fetch_sub(&global_statistics.connected_clients, 1, __ATOMIC_RELAXED);
}
static inline void global_statistics_copy(struct global_statistics *gs, uint8_t options) {
- gs->connected_clients = __atomic_fetch_add(&global_statistics.connected_clients, 0, __ATOMIC_SEQ_CST);
- gs->web_requests = __atomic_fetch_add(&global_statistics.web_requests, 0, __ATOMIC_SEQ_CST);
- gs->web_usec = __atomic_fetch_add(&global_statistics.web_usec, 0, __ATOMIC_SEQ_CST);
- gs->web_usec_max = __atomic_fetch_add(&global_statistics.web_usec_max, 0, __ATOMIC_SEQ_CST);
- gs->bytes_received = __atomic_fetch_add(&global_statistics.bytes_received, 0, __ATOMIC_SEQ_CST);
- gs->bytes_sent = __atomic_fetch_add(&global_statistics.bytes_sent, 0, __ATOMIC_SEQ_CST);
- gs->content_size = __atomic_fetch_add(&global_statistics.content_size, 0, __ATOMIC_SEQ_CST);
- gs->compressed_content_size = __atomic_fetch_add(&global_statistics.compressed_content_size, 0, __ATOMIC_SEQ_CST);
- gs->web_client_count = __atomic_fetch_add(&global_statistics.web_client_count, 0, __ATOMIC_SEQ_CST);
-
- gs->rrdr_queries_made = __atomic_fetch_add(&global_statistics.rrdr_queries_made, 0, __ATOMIC_SEQ_CST);
- gs->rrdr_db_points_read = __atomic_fetch_add(&global_statistics.rrdr_db_points_read, 0, __ATOMIC_SEQ_CST);
- gs->rrdr_result_points_generated = __atomic_fetch_add(&global_statistics.rrdr_result_points_generated, 0, __ATOMIC_SEQ_CST);
+ gs->connected_clients = __atomic_fetch_add(&global_statistics.connected_clients, 0, __ATOMIC_RELAXED);
+ gs->web_requests = __atomic_fetch_add(&global_statistics.web_requests, 0, __ATOMIC_RELAXED);
+ gs->web_usec = __atomic_fetch_add(&global_statistics.web_usec, 0, __ATOMIC_RELAXED);
+ gs->web_usec_max = __atomic_fetch_add(&global_statistics.web_usec_max, 0, __ATOMIC_RELAXED);
+ gs->bytes_received = __atomic_fetch_add(&global_statistics.bytes_received, 0, __ATOMIC_RELAXED);
+ gs->bytes_sent = __atomic_fetch_add(&global_statistics.bytes_sent, 0, __ATOMIC_RELAXED);
+ gs->content_size = __atomic_fetch_add(&global_statistics.content_size, 0, __ATOMIC_RELAXED);
+ gs->compressed_content_size = __atomic_fetch_add(&global_statistics.compressed_content_size, 0, __ATOMIC_RELAXED);
+ gs->web_client_count = __atomic_fetch_add(&global_statistics.web_client_count, 0, __ATOMIC_RELAXED);
+
+ gs->rrdr_queries_made = __atomic_fetch_add(&global_statistics.rrdr_queries_made, 0, __ATOMIC_RELAXED);
+ gs->rrdr_db_points_read = __atomic_fetch_add(&global_statistics.rrdr_db_points_read, 0, __ATOMIC_RELAXED);
+ gs->rrdr_result_points_generated = __atomic_fetch_add(&global_statistics.rrdr_result_points_generated, 0, __ATOMIC_RELAXED);
if(options & GLOBAL_STATS_RESET_WEB_USEC_MAX) {
uint64_t n = 0;
- __atomic_compare_exchange(&global_statistics.web_usec_max, (uint64_t *) &gs->web_usec_max, &n, 1, __ATOMIC_SEQ_CST,
- __ATOMIC_SEQ_CST);
+ __atomic_compare_exchange(&global_statistics.web_usec_max, (uint64_t *) &gs->web_usec_max, &n, 1, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
}
}
@@ -452,21 +451,28 @@ static void dbengine_statistics_charts(void) {
RRDHOST *host;
unsigned long long stats_array[RRDENG_NR_STATS] = {0};
unsigned long long local_stats_array[RRDENG_NR_STATS];
- unsigned dbengine_contexts = 0, counted_multihost_db = 0, i;
+ unsigned dbengine_contexts = 0, counted_multihost_db[RRD_STORAGE_TIERS] = { 0 }, i;
rrdhost_foreach_read(host) {
if (host->rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE && !rrdhost_flag_check(host, RRDHOST_FLAG_ARCHIVED)) {
- if (&multidb_ctx == host->rrdeng_ctx) {
- if (counted_multihost_db)
- continue; /* Only count multi-host DB once */
- counted_multihost_db = 1;
- }
- ++dbengine_contexts;
- /* get localhost's DB engine's statistics */
- rrdeng_get_37_statistics(host->rrdeng_ctx, local_stats_array);
- for (i = 0; i < RRDENG_NR_STATS; ++i) {
- /* aggregate statistics across hosts */
- stats_array[i] += local_stats_array[i];
+
+ /* get localhost's DB engine's statistics for each tier */
+ for(int tier = 0; tier < storage_tiers ;tier++) {
+ if(!host->storage_instance[tier]) continue;
+
+ if(is_storage_engine_shared(host->storage_instance[tier])) {
+ if(counted_multihost_db[tier])
+ continue;
+ else
+ counted_multihost_db[tier] = 1;
+ }
+
+ ++dbengine_contexts;
+ rrdeng_get_37_statistics((struct rrdengine_instance *)host->storage_instance[tier], local_stats_array);
+ for (i = 0; i < RRDENG_NR_STATS; ++i) {
+ /* aggregate statistics across hosts */
+ stats_array[i] += local_stats_array[i];
+ }
}
}
}
@@ -796,7 +802,7 @@ static void dbengine_statistics_charts(void) {
static RRDDIM *rd_index_metadata = NULL;
static RRDDIM *rd_pages_metadata = NULL;
- collected_number cached_pages, pinned_pages, API_producers, populated_pages, cache_metadata, pages_on_disk,
+ collected_number API_producers, populated_pages, cache_metadata, pages_on_disk,
page_cache_descriptors, index_metadata, pages_metadata;
if (unlikely(!st_ram_usage)) {
@@ -827,13 +833,6 @@ static void dbengine_statistics_charts(void) {
populated_pages = (collected_number)stats_array[3];
page_cache_descriptors = (collected_number)stats_array[27];
- if (API_producers * 2 > populated_pages) {
- pinned_pages = API_producers;
- } else {
- pinned_pages = API_producers * 2;
- }
- cached_pages = populated_pages - pinned_pages;
-
cache_metadata = page_cache_descriptors * sizeof(struct page_cache_descr);
pages_metadata = pages_on_disk * sizeof(struct rrdeng_page_descr);
@@ -841,8 +840,8 @@ static void dbengine_statistics_charts(void) {
/* This is an empirical estimation for Judy array indexing and extent structures */
index_metadata = pages_on_disk * 58;
- rrddim_set_by_pointer(st_ram_usage, rd_cached, cached_pages);
- rrddim_set_by_pointer(st_ram_usage, rd_pinned, pinned_pages);
+ rrddim_set_by_pointer(st_ram_usage, rd_cached, populated_pages - API_producers);
+ rrddim_set_by_pointer(st_ram_usage, rd_pinned, API_producers);
rrddim_set_by_pointer(st_ram_usage, rd_cache_metadata, cache_metadata);
rrddim_set_by_pointer(st_ram_usage, rd_pages_metadata, pages_metadata);
rrddim_set_by_pointer(st_ram_usage, rd_index_metadata, index_metadata);
@@ -997,6 +996,7 @@ static struct worker_utilization all_workers_utilization[] = {
{ .name = "TC", .family = "workers plugin tc", .priority = 1000000 },
{ .name = "TIMEX", .family = "workers plugin timex", .priority = 1000000 },
{ .name = "IDLEJITTER", .family = "workers plugin idlejitter", .priority = 1000000 },
+ { .name = "RRDCONTEXT", .family = "workers aclk contexts", .priority = 1000000 },
// has to be terminated with a NULL
{ .name = NULL, .family = NULL }
@@ -1153,7 +1153,7 @@ static void workers_utilization_update_chart(struct worker_utilization *wu) {
if(wu->workers_cpu_registered == 0)
rrddim_set_by_pointer(wu->st_workers_cpu, wu->rd_workers_cpu_avg, 0);
else
- rrddim_set_by_pointer(wu->st_workers_cpu, wu->rd_workers_cpu_avg, (collected_number)( wu->workers_cpu_total * 10000ULL / (calculated_number)wu->workers_cpu_registered ));
+ rrddim_set_by_pointer(wu->st_workers_cpu, wu->rd_workers_cpu_avg, (collected_number)( wu->workers_cpu_total * 10000ULL / (NETDATA_DOUBLE)wu->workers_cpu_registered ));
rrdset_done(wu->st_workers_cpu);
}
diff --git a/daemon/main.c b/daemon/main.c
index e10d38b40..ada3c14f2 100644
--- a/daemon/main.c
+++ b/daemon/main.c
@@ -47,7 +47,7 @@ void netdata_cleanup_and_exit(int ret) {
// stop everything
info("EXIT: stopping static threads...");
-#ifdef ENABLE_NEW_CLOUD_PROTOCOL
+#ifdef ENABLE_ACLK
aclk_sync_exit_all();
#endif
cancel_main_threads();
@@ -55,13 +55,16 @@ void netdata_cleanup_and_exit(int ret) {
// free the database
info("EXIT: freeing database memory...");
#ifdef ENABLE_DBENGINE
- rrdeng_prepare_exit(&multidb_ctx);
+ for(int tier = 0; tier < storage_tiers ; tier++)
+ rrdeng_prepare_exit(multidb_ctx[tier]);
#endif
rrdhost_free_all();
#ifdef ENABLE_DBENGINE
- rrdeng_exit(&multidb_ctx);
+ for(int tier = 0; tier < storage_tiers ; tier++)
+ rrdeng_exit(multidb_ctx[tier]);
#endif
}
+ sql_close_context_database();
sql_close_database();
// unlink the pid
@@ -349,6 +352,12 @@ int help(int exitcode) {
#endif
" -W set section option value\n"
" set netdata.conf option from the command line.\n\n"
+ " -W buildinfo Print the version, the configure options,\n"
+ " a list of optional features, and whether they\n"
+ " are enabled or not.\n\n"
+ " -W buildinfojson Print the version, the configure options,\n"
+ " a list of optional features, and whether they\n"
+ " are enabled or not, in JSON format.\n\n"
" -W simple-pattern pattern string\n"
" Check if string matches pattern and exit.\n\n"
" -W \"claim -token=TOKEN -rooms=ROOM1,ROOM2\"\n"
@@ -393,6 +402,14 @@ static void log_init(void) {
snprintfz(filename, FILENAME_MAX, "%s/access.log", netdata_configured_log_dir);
stdaccess_filename = config_get(CONFIG_SECTION_LOGS, "access", filename);
+#ifdef ENABLE_ACLK
+ aclklog_enabled = config_get_boolean(CONFIG_SECTION_CLOUD, "conversation log", CONFIG_BOOLEAN_NO);
+ if (aclklog_enabled) {
+ snprintfz(filename, FILENAME_MAX, "%s/aclk.log", netdata_configured_log_dir);
+ aclklog_filename = config_get(CONFIG_SECTION_CLOUD, "conversation log file", filename);
+ }
+#endif
+
char deffacility[8];
snprintfz(deffacility,7,"%s","daemon");
facility_log = config_get(CONFIG_SECTION_LOGS, "facility", deffacility);
@@ -516,6 +533,64 @@ static void backwards_compatible_config() {
config_move(CONFIG_SECTION_STATSD, "enabled",
CONFIG_SECTION_PLUGINS, "statsd");
+
+ config_move(CONFIG_SECTION_GLOBAL, "memory mode",
+ CONFIG_SECTION_DB, "mode");
+
+ config_move(CONFIG_SECTION_GLOBAL, "history",
+ CONFIG_SECTION_DB, "retention");
+
+ config_move(CONFIG_SECTION_GLOBAL, "update every",
+ CONFIG_SECTION_DB, "update every");
+
+ config_move(CONFIG_SECTION_GLOBAL, "page cache size",
+ CONFIG_SECTION_DB, "dbengine page cache size MB");
+
+ config_move(CONFIG_SECTION_DB, "page cache size",
+ CONFIG_SECTION_DB, "dbengine page cache size MB");
+
+ config_move(CONFIG_SECTION_GLOBAL, "page cache uses malloc",
+ CONFIG_SECTION_DB, "dbengine page cache with malloc");
+
+ config_move(CONFIG_SECTION_DB, "page cache with malloc",
+ CONFIG_SECTION_DB, "dbengine page cache with malloc");
+
+ config_move(CONFIG_SECTION_GLOBAL, "dbengine disk space",
+ CONFIG_SECTION_DB, "dbengine disk space MB");
+
+ config_move(CONFIG_SECTION_GLOBAL, "dbengine multihost disk space",
+ CONFIG_SECTION_DB, "dbengine multihost disk space MB");
+
+ config_move(CONFIG_SECTION_GLOBAL, "memory deduplication (ksm)",
+ CONFIG_SECTION_DB, "memory deduplication (ksm)");
+
+ config_move(CONFIG_SECTION_GLOBAL, "dbengine page fetch timeout",
+ CONFIG_SECTION_DB, "dbengine page fetch timeout secs");
+
+ config_move(CONFIG_SECTION_GLOBAL, "dbengine page fetch retries",
+ CONFIG_SECTION_DB, "dbengine page fetch retries");
+
+ config_move(CONFIG_SECTION_GLOBAL, "dbengine extent pages",
+ CONFIG_SECTION_DB, "dbengine pages per extent");
+
+ config_move(CONFIG_SECTION_GLOBAL, "cleanup obsolete charts after seconds",
+ CONFIG_SECTION_DB, "cleanup obsolete charts after secs");
+
+ config_move(CONFIG_SECTION_GLOBAL, "gap when lost iterations above",
+ CONFIG_SECTION_DB, "gap when lost iterations above");
+
+ config_move(CONFIG_SECTION_GLOBAL, "cleanup orphan hosts after seconds",
+ CONFIG_SECTION_DB, "cleanup orphan hosts after secs");
+
+ config_move(CONFIG_SECTION_GLOBAL, "delete obsolete charts files",
+ CONFIG_SECTION_DB, "delete obsolete charts files");
+
+ config_move(CONFIG_SECTION_GLOBAL, "delete orphan hosts files",
+ CONFIG_SECTION_DB, "delete orphan hosts files");
+
+ config_move(CONFIG_SECTION_GLOBAL, "enable zero metrics",
+ CONFIG_SECTION_DB, "enable zero metrics");
+
}
static void get_netdata_configured_variables() {
@@ -533,28 +608,40 @@ static void get_netdata_configured_variables() {
debug(D_OPTIONS, "hostname set to '%s'", netdata_configured_hostname);
// ------------------------------------------------------------------------
- // get default database size
-
- default_rrd_history_entries = (int) config_get_number(CONFIG_SECTION_GLOBAL, "history", align_entries_to_pagesize(default_rrd_memory_mode, RRD_DEFAULT_HISTORY_ENTRIES));
+ // get default database update frequency
- long h = align_entries_to_pagesize(default_rrd_memory_mode, default_rrd_history_entries);
- if(h != default_rrd_history_entries) {
- config_set_number(CONFIG_SECTION_GLOBAL, "history", h);
- default_rrd_history_entries = (int)h;
+ default_rrd_update_every = (int) config_get_number(CONFIG_SECTION_DB, "update every", UPDATE_EVERY);
+ if(default_rrd_update_every < 1 || default_rrd_update_every > 600) {
+ error("Invalid data collection frequency (update every) %d given. Defaulting to %d.", default_rrd_update_every, UPDATE_EVERY);
+ default_rrd_update_every = UPDATE_EVERY;
+ config_set_number(CONFIG_SECTION_DB, "update every", default_rrd_update_every);
}
- if(default_rrd_history_entries < 5 || default_rrd_history_entries > RRD_HISTORY_ENTRIES_MAX) {
- error("Invalid history entries %d given. Defaulting to %d.", default_rrd_history_entries, RRD_DEFAULT_HISTORY_ENTRIES);
- default_rrd_history_entries = RRD_DEFAULT_HISTORY_ENTRIES;
+ // ------------------------------------------------------------------------
+ // get default memory mode for the database
+
+ {
+ const char *mode = config_get(CONFIG_SECTION_DB, "mode", rrd_memory_mode_name(default_rrd_memory_mode));
+ default_rrd_memory_mode = rrd_memory_mode_id(mode);
+ if(strcmp(mode, rrd_memory_mode_name(default_rrd_memory_mode)) != 0) {
+ error("Invalid memory mode '%s' given. Using '%s'", mode, rrd_memory_mode_name(default_rrd_memory_mode));
+ config_set(CONFIG_SECTION_DB, "mode", rrd_memory_mode_name(default_rrd_memory_mode));
+ }
}
// ------------------------------------------------------------------------
- // get default database update frequency
+ // get default database size
- default_rrd_update_every = (int) config_get_number(CONFIG_SECTION_GLOBAL, "update every", UPDATE_EVERY);
- if(default_rrd_update_every < 1 || default_rrd_update_every > 600) {
- error("Invalid data collection frequency (update every) %d given. Defaulting to %d.", default_rrd_update_every, UPDATE_EVERY_MAX);
- default_rrd_update_every = UPDATE_EVERY;
+ if(default_rrd_memory_mode != RRD_MEMORY_MODE_DBENGINE && default_rrd_memory_mode != RRD_MEMORY_MODE_NONE) {
+ default_rrd_history_entries = (int)config_get_number(
+ CONFIG_SECTION_DB, "retention",
+ align_entries_to_pagesize(default_rrd_memory_mode, RRD_DEFAULT_HISTORY_ENTRIES));
+
+ long h = align_entries_to_pagesize(default_rrd_memory_mode, default_rrd_history_entries);
+ if (h != default_rrd_history_entries) {
+ config_set_number(CONFIG_SECTION_DB, "retention", h);
+ default_rrd_history_entries = (int)h;
+ }
}
// ------------------------------------------------------------------------
@@ -576,38 +663,38 @@ static void get_netdata_configured_variables() {
netdata_configured_primary_plugins_dir = plugin_directories[PLUGINSD_STOCK_PLUGINS_DIRECTORY_PATH];
}
- // ------------------------------------------------------------------------
- // get default memory mode for the database
-
- default_rrd_memory_mode = rrd_memory_mode_id(config_get(CONFIG_SECTION_GLOBAL, "memory mode", rrd_memory_mode_name(default_rrd_memory_mode)));
#ifdef ENABLE_DBENGINE
// ------------------------------------------------------------------------
// get default Database Engine page cache size in MiB
- default_rrdeng_page_cache_mb = (int) config_get_number(CONFIG_SECTION_GLOBAL, "page cache size", default_rrdeng_page_cache_mb);
+ db_engine_use_malloc = config_get_boolean(CONFIG_SECTION_DB, "dbengine page cache with malloc", CONFIG_BOOLEAN_NO);
+ default_rrdeng_page_cache_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine page cache size MB", default_rrdeng_page_cache_mb);
if(default_rrdeng_page_cache_mb < RRDENG_MIN_PAGE_CACHE_SIZE_MB) {
error("Invalid page cache size %d given. Defaulting to %d.", default_rrdeng_page_cache_mb, RRDENG_MIN_PAGE_CACHE_SIZE_MB);
default_rrdeng_page_cache_mb = RRDENG_MIN_PAGE_CACHE_SIZE_MB;
+ config_set_number(CONFIG_SECTION_DB, "dbengine page cache size MB", default_rrdeng_page_cache_mb);
}
// ------------------------------------------------------------------------
// get default Database Engine disk space quota in MiB
- default_rrdeng_disk_quota_mb = (int) config_get_number(CONFIG_SECTION_GLOBAL, "dbengine disk space", default_rrdeng_disk_quota_mb);
+ default_rrdeng_disk_quota_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine disk space MB", default_rrdeng_disk_quota_mb);
if(default_rrdeng_disk_quota_mb < RRDENG_MIN_DISK_SPACE_MB) {
error("Invalid dbengine disk space %d given. Defaulting to %d.", default_rrdeng_disk_quota_mb, RRDENG_MIN_DISK_SPACE_MB);
default_rrdeng_disk_quota_mb = RRDENG_MIN_DISK_SPACE_MB;
+ config_set_number(CONFIG_SECTION_DB, "dbengine disk space MB", default_rrdeng_disk_quota_mb);
}
- default_multidb_disk_quota_mb = (int) config_get_number(CONFIG_SECTION_GLOBAL, "dbengine multihost disk space", compute_multidb_diskspace());
+ default_multidb_disk_quota_mb = (int) config_get_number(CONFIG_SECTION_DB, "dbengine multihost disk space MB", compute_multidb_diskspace());
if(default_multidb_disk_quota_mb < RRDENG_MIN_DISK_SPACE_MB) {
error("Invalid multidb disk space %d given. Defaulting to %d.", default_multidb_disk_quota_mb, default_rrdeng_disk_quota_mb);
default_multidb_disk_quota_mb = default_rrdeng_disk_quota_mb;
+ config_set_number(CONFIG_SECTION_DB, "dbengine multihost disk space MB", default_multidb_disk_quota_mb);
}
#else
if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE) {
- error_report("RRD_MEMORY_MODE_DBENGINE is not supported in this platform. The agent will use memory mode ram instead.");
- default_rrd_memory_mode = RRD_MEMORY_MODE_RAM;
+ error_report("RRD_MEMORY_MODE_DBENGINE is not supported in this platform. The agent will use db mode 'save' instead.");
+ default_rrd_memory_mode = RRD_MEMORY_MODE_SAVE;
}
#endif
// ------------------------------------------------------------------------
@@ -619,12 +706,40 @@ static void get_netdata_configured_variables() {
// get KSM settings
#ifdef MADV_MERGEABLE
- enable_ksm = config_get_boolean(CONFIG_SECTION_GLOBAL, "memory deduplication (ksm)", enable_ksm);
+ enable_ksm = config_get_boolean(CONFIG_SECTION_DB, "memory deduplication (ksm)", enable_ksm);
#endif
// --------------------------------------------------------------------
// metric correlations
+
enable_metric_correlations = config_get_boolean(CONFIG_SECTION_GLOBAL, "enable metric correlations", enable_metric_correlations);
+ default_metric_correlations_method = weights_string_to_method(config_get(
+ CONFIG_SECTION_GLOBAL, "metric correlations method",
+ weights_method_to_string(default_metric_correlations_method)));
+
+ // --------------------------------------------------------------------
+
+ rrdset_free_obsolete_time = config_get_number(CONFIG_SECTION_DB, "cleanup obsolete charts after secs", rrdset_free_obsolete_time);
+ // Current chart locking and invalidation scheme doesn't prevent Netdata from segmentation faults if a short
+ // cleanup delay is set. Extensive stress tests showed that 10 seconds is quite a safe delay. Look at
+ // https://github.com/netdata/netdata/pull/11222#issuecomment-868367920 for more information.
+ if (rrdset_free_obsolete_time < 10) {
+ rrdset_free_obsolete_time = 10;
+ info("The \"cleanup obsolete charts after seconds\" option was set to 10 seconds.");
+ config_set_number(CONFIG_SECTION_DB, "cleanup obsolete charts after secs", rrdset_free_obsolete_time);
+ }
+
+ gap_when_lost_iterations_above = (int)config_get_number(CONFIG_SECTION_DB, "gap when lost iterations above", gap_when_lost_iterations_above);
+ if (gap_when_lost_iterations_above < 1) {
+ gap_when_lost_iterations_above = 1;
+ config_set_number(CONFIG_SECTION_DB, "gap when lost iterations above", gap_when_lost_iterations_above);
+ }
+
+ // --------------------------------------------------------------------
+ // rrdcontext
+
+ rrdcontext_enabled = config_get_boolean(CONFIG_SECTION_CLOUD, "rrdcontexts", rrdcontext_enabled);
+
// --------------------------------------------------------------------
// get various system parameters
@@ -867,13 +982,16 @@ int main(int argc, char **argv) {
return 1;
if (unit_test_str2ld())
return 1;
-
+ if (unit_test_bitmap256())
+ return 1;
// No call to load the config file on this code-path
post_conf_load(&user);
get_netdata_configured_variables();
+ rrdcontext_enabled = CONFIG_BOOLEAN_NO;
default_rrd_update_every = 1;
default_rrd_memory_mode = RRD_MEMORY_MODE_RAM;
default_health_enabled = 0;
+ storage_tiers = 1;
registry_init();
if(rrd_init("unittest", NULL)) {
fprintf(stderr, "rrd_init failed for unittest\n");
@@ -886,6 +1004,12 @@ int main(int argc, char **argv) {
if(test_dbengine()) return 1;
#endif
if(test_sqlite()) return 1;
+ if (dictionary_unittest(10000))
+ return 1;
+ if (rrdlabels_unittest())
+ return 1;
+ if (ctx_unittest())
+ return 1;
fprintf(stderr, "\n\nALL TESTS PASSED\n\n");
return 0;
}
@@ -895,9 +1019,18 @@ int main(int argc, char **argv) {
}
#endif
#ifdef ENABLE_DBENGINE
+ else if(strcmp(optarg, "mctest") == 0) {
+ return mc_unittest();
+ }
+ else if(strcmp(optarg, "ctxtest") == 0) {
+ return ctx_unittest();
+ }
else if(strcmp(optarg, "dicttest") == 0) {
return dictionary_unittest(10000);
}
+ else if(strcmp(optarg, "rrdlabelstest") == 0) {
+ return rrdlabels_unittest();
+ }
else if(strncmp(optarg, createdataset_string, strlen(createdataset_string)) == 0) {
optarg += strlen(createdataset_string);
unsigned history_seconds = strtoul(optarg, NULL, 0);
@@ -1214,25 +1347,31 @@ int main(int argc, char **argv) {
// --------------------------------------------------------------------
// get log filenames and settings
+
log_init();
error_log_limit_unlimited();
+
// initialize the log files
open_all_log_files();
get_system_timezone();
+
// --------------------------------------------------------------------
// get the certificate and start security
+
#ifdef ENABLE_HTTPS
security_init();
#endif
// --------------------------------------------------------------------
// This is the safest place to start the SILENCERS structure
+
set_silencers_filename();
health_initialize_global_silencers();
// --------------------------------------------------------------------
// Initialize ML configuration
+
ml_init();
// --------------------------------------------------------------------
@@ -1240,9 +1379,11 @@ int main(int argc, char **argv) {
// block signals while initializing threads.
// this causes the threads to block signals.
+
signals_block();
// setup the signals we want to use
+
signals_init();
// setup threads configs
@@ -1271,6 +1412,7 @@ int main(int argc, char **argv) {
if(web_server_mode != WEB_SERVER_MODE_NONE)
api_listen_sockets_setup();
+
}
#ifdef NETDATA_INTERNAL_CHECKS
@@ -1354,7 +1496,7 @@ int main(int argc, char **argv) {
web_server_config_options();
- netdata_zero_metrics_enabled = config_get_boolean_ondemand(CONFIG_SECTION_GLOBAL, "enable zero metrics", CONFIG_BOOLEAN_NO);
+ netdata_zero_metrics_enabled = config_get_boolean_ondemand(CONFIG_SECTION_DB, "enable zero metrics", CONFIG_BOOLEAN_NO);
set_late_global_environment();
diff --git a/daemon/static_threads.c b/daemon/static_threads.c
index c07473bd6..96e279906 100644
--- a/daemon/static_threads.c
+++ b/daemon/static_threads.c
@@ -123,7 +123,7 @@ const struct netdata_static_thread static_threads_common[] = {
.start_routine = socket_listen_main_static_threaded
},
-#if defined(ENABLE_ACLK) || defined(ACLK_NG)
+#ifdef ENABLE_ACLK
{
.name = "ACLK_Main",
.config_section = NULL,
@@ -135,6 +135,16 @@ const struct netdata_static_thread static_threads_common[] = {
},
#endif
+ {
+ .name = "rrdcontext",
+ .config_section = NULL,
+ .config_name = NULL,
+ .enabled = 1,
+ .thread = NULL,
+ .init_routine = NULL,
+ .start_routine = rrdcontext_main
+ },
+
{NULL, NULL, NULL, 0, NULL, NULL, NULL}
};
diff --git a/daemon/system-info.sh b/daemon/system-info.sh
index 12553e3da..101ccb0bf 100755
--- a/daemon/system-info.sh
+++ b/daemon/system-info.sh
@@ -44,9 +44,11 @@ if [ -z "${VIRTUALIZATION}" ]; then
[ -n "$VIRTUALIZATION" ] && VIRT_DETECTION="dmidecode"
fi
- if [ -z "${VIRTUALIZATION}" ] && [ "${KERNEL_NAME}" = "FreeBSD" ]; then
- VIRTUALIZATION=$(sysctl kern.vm_guest 2>/dev/null | cut -d: -f 2 | awk '{$1=$1};1')
- [ -n "$VIRTUALIZATION" ] && VIRT_DETECTION="sysctl"
+ if [ -z "${VIRTUALIZATION}" ] || [ "$VIRTUALIZATION" = "unknown" ]; then
+ if [ "${KERNEL_NAME}" = "FreeBSD" ]; then
+ VIRTUALIZATION=$(sysctl kern.vm_guest 2>/dev/null | cut -d: -f 2 | awk '{$1=$1};1')
+ [ -n "$VIRTUALIZATION" ] && VIRT_DETECTION="sysctl"
+ fi
fi
if [ -z "${VIRTUALIZATION}" ]; then
@@ -336,7 +338,7 @@ if [ "${KERNEL_NAME}" = FreeBSD ]; then
TOTAL_RAM="$(sysctl -n hw.physmem)"
elif [ "${KERNEL_NAME}" = Darwin ]; then
RAM_DETECTION="sysctl"
- TOTAL_RAM="$(sysctl -n hw.physmem)"
+ TOTAL_RAM="$(sysctl -n hw.memsize)"
elif [ -r /proc/meminfo ]; then
RAM_DETECTION="procfs"
TOTAL_RAM="$(grep -F MemTotal /proc/meminfo | cut -f 2 -d ':' | sed -e 's/^[[:space:]]*//' -e 's/[[:space:]]*$//' | cut -f 1 -d ' ')"
diff --git a/daemon/unit_test.c b/daemon/unit_test.c
index 35f8613a2..8ba251b9a 100644
--- a/daemon/unit_test.c
+++ b/daemon/unit_test.c
@@ -4,7 +4,7 @@
static int check_number_printing(void) {
struct {
- calculated_number n;
+ NETDATA_DOUBLE n;
const char *correct;
} values[] = {
{ .n = 0, .correct = "0" },
@@ -22,8 +22,8 @@ static int check_number_printing(void) {
char netdata[50], system[50];
int i, failed = 0;
for(i = 0; values[i].correct ; i++) {
- print_calculated_number(netdata, values[i].n);
- snprintfz(system, 49, "%0.12" LONG_DOUBLE_MODIFIER, (LONG_DOUBLE)values[i].n);
+ print_netdata_double(netdata, values[i].n);
+ snprintfz(system, 49, "%0.12" NETDATA_DOUBLE_MODIFIER, (NETDATA_DOUBLE)values[i].n);
int ok = 1;
if(strcmp(netdata, values[i].correct) != 0) {
@@ -95,35 +95,36 @@ static int check_rrdcalc_comparisons(void) {
return 0;
}
-int check_storage_number(calculated_number n, int debug) {
+int check_storage_number(NETDATA_DOUBLE n, int debug) {
char buffer[100];
uint32_t flags = SN_DEFAULT_FLAGS;
storage_number s = pack_storage_number(n, flags);
- calculated_number d = unpack_storage_number(s);
+ NETDATA_DOUBLE d = unpack_storage_number(s);
if(!does_storage_number_exist(s)) {
- fprintf(stderr, "Exists flags missing for number " CALCULATED_NUMBER_FORMAT "!\n", n);
+ fprintf(stderr, "Exists flags missing for number " NETDATA_DOUBLE_FORMAT "!\n", n);
return 5;
}
- calculated_number ddiff = d - n;
- calculated_number dcdiff = ddiff * 100.0 / n;
+ NETDATA_DOUBLE ddiff = d - n;
+ NETDATA_DOUBLE dcdiff = ddiff * 100.0 / n;
if(dcdiff < 0) dcdiff = -dcdiff;
- size_t len = (size_t)print_calculated_number(buffer, d);
- calculated_number p = str2ld(buffer, NULL);
- calculated_number pdiff = n - p;
- calculated_number pcdiff = pdiff * 100.0 / n;
+ size_t len = (size_t)print_netdata_double(buffer, d);
+ NETDATA_DOUBLE p = str2ndd(buffer, NULL);
+ NETDATA_DOUBLE pdiff = n - p;
+ NETDATA_DOUBLE pcdiff = pdiff * 100.0 / n;
if(pcdiff < 0) pcdiff = -pcdiff;
if(debug) {
fprintf(stderr,
- CALCULATED_NUMBER_FORMAT " original\n"
- CALCULATED_NUMBER_FORMAT " packed and unpacked, (stored as 0x%08X, diff " CALCULATED_NUMBER_FORMAT ", " CALCULATED_NUMBER_FORMAT "%%)\n"
- "%s printed after unpacked (%zu bytes)\n"
- CALCULATED_NUMBER_FORMAT " re-parsed from printed (diff " CALCULATED_NUMBER_FORMAT ", " CALCULATED_NUMBER_FORMAT "%%)\n\n",
+ NETDATA_DOUBLE_FORMAT
+ " original\n" NETDATA_DOUBLE_FORMAT " packed and unpacked, (stored as 0x%08X, diff " NETDATA_DOUBLE_FORMAT
+ ", " NETDATA_DOUBLE_FORMAT "%%)\n"
+ "%s printed after unpacked (%zu bytes)\n" NETDATA_DOUBLE_FORMAT
+ " re-parsed from printed (diff " NETDATA_DOUBLE_FORMAT ", " NETDATA_DOUBLE_FORMAT "%%)\n\n",
n,
d, s, ddiff, dcdiff,
buffer, len,
@@ -132,10 +133,11 @@ int check_storage_number(calculated_number n, int debug) {
if(len != strlen(buffer)) fprintf(stderr, "ERROR: printed number %s is reported to have length %zu but it has %zu\n", buffer, len, strlen(buffer));
if(dcdiff > ACCURACY_LOSS_ACCEPTED_PERCENT)
- fprintf(stderr, "WARNING: packing number " CALCULATED_NUMBER_FORMAT " has accuracy loss " CALCULATED_NUMBER_FORMAT " %%\n", n, dcdiff);
+ fprintf(stderr, "WARNING: packing number " NETDATA_DOUBLE_FORMAT " has accuracy loss " NETDATA_DOUBLE_FORMAT " %%\n", n, dcdiff);
if(pcdiff > ACCURACY_LOSS_ACCEPTED_PERCENT)
- fprintf(stderr, "WARNING: re-parsing the packed, unpacked and printed number " CALCULATED_NUMBER_FORMAT " has accuracy loss " CALCULATED_NUMBER_FORMAT " %%\n", n, pcdiff);
+ fprintf(stderr, "WARNING: re-parsing the packed, unpacked and printed number " NETDATA_DOUBLE_FORMAT
+ " has accuracy loss " NETDATA_DOUBLE_FORMAT " %%\n", n, pcdiff);
}
if(len != strlen(buffer)) return 1;
@@ -144,8 +146,8 @@ int check_storage_number(calculated_number n, int debug) {
return 0;
}
-calculated_number storage_number_min(calculated_number n) {
- calculated_number r = 1, last;
+NETDATA_DOUBLE storage_number_min(NETDATA_DOUBLE n) {
+ NETDATA_DOUBLE r = 1, last;
do {
last = n;
@@ -159,12 +161,12 @@ calculated_number storage_number_min(calculated_number n) {
void benchmark_storage_number(int loop, int multiplier) {
int i, j;
- calculated_number n, d;
+ NETDATA_DOUBLE n, d;
storage_number s;
unsigned long long user, system, total, mine, their;
- calculated_number storage_number_positive_min = unpack_storage_number(STORAGE_NUMBER_POSITIVE_MIN_RAW);
- calculated_number storage_number_positive_max = unpack_storage_number(STORAGE_NUMBER_POSITIVE_MAX_RAW);
+ NETDATA_DOUBLE storage_number_positive_min = unpack_storage_number(STORAGE_NUMBER_POSITIVE_MIN_RAW);
+ NETDATA_DOUBLE storage_number_positive_max = unpack_storage_number(STORAGE_NUMBER_POSITIVE_MAX_RAW);
char buffer[100];
@@ -174,25 +176,25 @@ void benchmark_storage_number(int loop, int multiplier) {
// ------------------------------------------------------------------------
- fprintf(stderr, "SYSTEM LONG DOUBLE SIZE: %zu bytes\n", sizeof(calculated_number));
+ fprintf(stderr, "SYSTEM LONG DOUBLE SIZE: %zu bytes\n", sizeof(NETDATA_DOUBLE));
fprintf(stderr, "NETDATA FLOATING POINT SIZE: %zu bytes\n", sizeof(storage_number));
- mine = (calculated_number)sizeof(storage_number) * (calculated_number)loop;
- their = (calculated_number)sizeof(calculated_number) * (calculated_number)loop;
+ mine = (NETDATA_DOUBLE)sizeof(storage_number) * (NETDATA_DOUBLE)loop;
+ their = (NETDATA_DOUBLE)sizeof(NETDATA_DOUBLE) * (NETDATA_DOUBLE)loop;
if(mine > their) {
- fprintf(stderr, "\nNETDATA NEEDS %0.2" LONG_DOUBLE_MODIFIER " TIMES MORE MEMORY. Sorry!\n", (LONG_DOUBLE)(mine / their));
+ fprintf(stderr, "\nNETDATA NEEDS %0.2" NETDATA_DOUBLE_MODIFIER " TIMES MORE MEMORY. Sorry!\n", (NETDATA_DOUBLE)(mine / their));
}
else {
- fprintf(stderr, "\nNETDATA INTERNAL FLOATING POINT ARITHMETICS NEEDS %0.2" LONG_DOUBLE_MODIFIER " TIMES LESS MEMORY.\n", (LONG_DOUBLE)(their / mine));
+ fprintf(stderr, "\nNETDATA INTERNAL FLOATING POINT ARITHMETICS NEEDS %0.2" NETDATA_DOUBLE_MODIFIER " TIMES LESS MEMORY.\n", (NETDATA_DOUBLE)(their / mine));
}
fprintf(stderr, "\nNETDATA FLOATING POINT\n");
- fprintf(stderr, "MIN POSITIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", unpack_storage_number(STORAGE_NUMBER_POSITIVE_MIN_RAW));
- fprintf(stderr, "MAX POSITIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", unpack_storage_number(STORAGE_NUMBER_POSITIVE_MAX_RAW));
- fprintf(stderr, "MIN NEGATIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", unpack_storage_number(STORAGE_NUMBER_NEGATIVE_MIN_RAW));
- fprintf(stderr, "MAX NEGATIVE VALUE " CALCULATED_NUMBER_FORMAT "\n", unpack_storage_number(STORAGE_NUMBER_NEGATIVE_MAX_RAW));
- fprintf(stderr, "Maximum accuracy loss accepted: " CALCULATED_NUMBER_FORMAT "%%\n\n\n", (calculated_number)ACCURACY_LOSS_ACCEPTED_PERCENT);
+ fprintf(stderr, "MIN POSITIVE VALUE " NETDATA_DOUBLE_FORMAT "\n", unpack_storage_number(STORAGE_NUMBER_POSITIVE_MIN_RAW));
+ fprintf(stderr, "MAX POSITIVE VALUE " NETDATA_DOUBLE_FORMAT "\n", unpack_storage_number(STORAGE_NUMBER_POSITIVE_MAX_RAW));
+ fprintf(stderr, "MIN NEGATIVE VALUE " NETDATA_DOUBLE_FORMAT "\n", unpack_storage_number(STORAGE_NUMBER_NEGATIVE_MIN_RAW));
+ fprintf(stderr, "MAX NEGATIVE VALUE " NETDATA_DOUBLE_FORMAT "\n", unpack_storage_number(STORAGE_NUMBER_NEGATIVE_MAX_RAW));
+ fprintf(stderr, "Maximum accuracy loss accepted: " NETDATA_DOUBLE_FORMAT "%%\n\n\n", (NETDATA_DOUBLE)ACCURACY_LOSS_ACCEPTED_PERCENT);
// ------------------------------------------------------------------------
@@ -207,7 +209,7 @@ void benchmark_storage_number(int loop, int multiplier) {
n *= multiplier;
if(n > storage_number_positive_max) n = storage_number_positive_min;
- print_calculated_number(buffer, n);
+ print_netdata_double(buffer, n);
}
}
@@ -217,7 +219,8 @@ void benchmark_storage_number(int loop, int multiplier) {
total = user + system;
mine = total;
- fprintf(stderr, "user %0.5" LONG_DOUBLE_MODIFIER", system %0.5" LONG_DOUBLE_MODIFIER ", total %0.5" LONG_DOUBLE_MODIFIER "\n", (LONG_DOUBLE)(user / 1000000.0), (LONG_DOUBLE)(system / 1000000.0), (LONG_DOUBLE)(total / 1000000.0));
+ fprintf(stderr, "user %0.5" NETDATA_DOUBLE_MODIFIER ", system %0.5" NETDATA_DOUBLE_MODIFIER
+ ", total %0.5" NETDATA_DOUBLE_MODIFIER "\n", (NETDATA_DOUBLE)(user / 1000000.0), (NETDATA_DOUBLE)(system / 1000000.0), (NETDATA_DOUBLE)(total / 1000000.0));
// ------------------------------------------------------------------------
@@ -231,7 +234,7 @@ void benchmark_storage_number(int loop, int multiplier) {
for(i = 0; i < loop ;i++) {
n *= multiplier;
if(n > storage_number_positive_max) n = storage_number_positive_min;
- snprintfz(buffer, 100, CALCULATED_NUMBER_FORMAT, n);
+ snprintfz(buffer, 100, NETDATA_DOUBLE_FORMAT, n);
}
}
@@ -241,13 +244,14 @@ void benchmark_storage_number(int loop, int multiplier) {
total = user + system;
their = total;
- fprintf(stderr, "user %0.5" LONG_DOUBLE_MODIFIER ", system %0.5" LONG_DOUBLE_MODIFIER ", total %0.5" LONG_DOUBLE_MODIFIER "\n", (LONG_DOUBLE)(user / 1000000.0), (LONG_DOUBLE)(system / 1000000.0), (LONG_DOUBLE)(total / 1000000.0));
+ fprintf(stderr, "user %0.5" NETDATA_DOUBLE_MODIFIER ", system %0.5" NETDATA_DOUBLE_MODIFIER
+ ", total %0.5" NETDATA_DOUBLE_MODIFIER "\n", (NETDATA_DOUBLE)(user / 1000000.0), (NETDATA_DOUBLE)(system / 1000000.0), (NETDATA_DOUBLE)(total / 1000000.0));
if(mine > total) {
- fprintf(stderr, "NETDATA CODE IS SLOWER %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(mine * 100.0 / their - 100.0));
+ fprintf(stderr, "NETDATA CODE IS SLOWER %0.2" NETDATA_DOUBLE_MODIFIER " %%\n", (NETDATA_DOUBLE)(mine * 100.0 / their - 100.0));
}
else {
- fprintf(stderr, "NETDATA CODE IS F A S T E R %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(their * 100.0 / mine - 100.0));
+ fprintf(stderr, "NETDATA CODE IS F A S T E R %0.2" NETDATA_DOUBLE_MODIFIER " %%\n", (NETDATA_DOUBLE)(their * 100.0 / mine - 100.0));
}
// ------------------------------------------------------------------------
@@ -265,7 +269,7 @@ void benchmark_storage_number(int loop, int multiplier) {
s = pack_storage_number(n, SN_DEFAULT_FLAGS);
d = unpack_storage_number(s);
- print_calculated_number(buffer, d);
+ print_netdata_double(buffer, d);
}
}
@@ -275,13 +279,14 @@ void benchmark_storage_number(int loop, int multiplier) {
total = user + system;
mine = total;
- fprintf(stderr, "user %0.5" LONG_DOUBLE_MODIFIER ", system %0.5" LONG_DOUBLE_MODIFIER ", total %0.5" LONG_DOUBLE_MODIFIER "\n", (LONG_DOUBLE)(user / 1000000.0), (LONG_DOUBLE)(system / 1000000.0), (LONG_DOUBLE)(total / 1000000.0));
+ fprintf(stderr, "user %0.5" NETDATA_DOUBLE_MODIFIER ", system %0.5" NETDATA_DOUBLE_MODIFIER
+ ", total %0.5" NETDATA_DOUBLE_MODIFIER "\n", (NETDATA_DOUBLE)(user / 1000000.0), (NETDATA_DOUBLE)(system / 1000000.0), (NETDATA_DOUBLE)(total / 1000000.0));
if(mine > their) {
- fprintf(stderr, "WITH PACKING UNPACKING NETDATA CODE IS SLOWER %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(mine * 100.0 / their - 100.0));
+ fprintf(stderr, "WITH PACKING UNPACKING NETDATA CODE IS SLOWER %0.2" NETDATA_DOUBLE_MODIFIER " %%\n", (NETDATA_DOUBLE)(mine * 100.0 / their - 100.0));
}
else {
- fprintf(stderr, "EVEN WITH PACKING AND UNPACKING, NETDATA CODE IS F A S T E R %0.2" LONG_DOUBLE_MODIFIER " %%\n", (LONG_DOUBLE)(their * 100.0 / mine - 100.0));
+ fprintf(stderr, "EVEN WITH PACKING AND UNPACKING, NETDATA CODE IS F A S T E R %0.2" NETDATA_DOUBLE_MODIFIER " %%\n", (NETDATA_DOUBLE)(their * 100.0 / mine - 100.0));
}
// ------------------------------------------------------------------------
@@ -290,13 +295,13 @@ void benchmark_storage_number(int loop, int multiplier) {
static int check_storage_number_exists() {
uint32_t flags = SN_DEFAULT_FLAGS;
- calculated_number n = 0.0;
+ NETDATA_DOUBLE n = 0.0;
storage_number s = pack_storage_number(n, flags);
- calculated_number d = unpack_storage_number(s);
+ NETDATA_DOUBLE d = unpack_storage_number(s);
if(n != d) {
- fprintf(stderr, "Wrong number returned. Expected " CALCULATED_NUMBER_FORMAT ", returned " CALCULATED_NUMBER_FORMAT "!\n", n, d);
+ fprintf(stderr, "Wrong number returned. Expected " NETDATA_DOUBLE_FORMAT ", returned " NETDATA_DOUBLE_FORMAT "!\n", n, d);
return 1;
}
@@ -306,10 +311,10 @@ static int check_storage_number_exists() {
int unit_test_storage() {
if(check_storage_number_exists()) return 0;
- calculated_number storage_number_positive_min = unpack_storage_number(STORAGE_NUMBER_POSITIVE_MIN_RAW);
- calculated_number storage_number_negative_max = unpack_storage_number(STORAGE_NUMBER_NEGATIVE_MAX_RAW);
+ NETDATA_DOUBLE storage_number_positive_min = unpack_storage_number(STORAGE_NUMBER_POSITIVE_MIN_RAW);
+ NETDATA_DOUBLE storage_number_negative_max = unpack_storage_number(STORAGE_NUMBER_NEGATIVE_MAX_RAW);
- calculated_number c, a = 0;
+ NETDATA_DOUBLE c, a = 0;
int i, j, g, r = 0;
for(g = -1; g <= 1 ; g++) {
@@ -343,23 +348,26 @@ int unit_test_str2ld() {
int i;
for(i = 0; values[i] ; i++) {
char *e_mine = "hello", *e_sys = "world";
- LONG_DOUBLE mine = str2ld(values[i], &e_mine);
- LONG_DOUBLE sys = strtold(values[i], &e_sys);
+ NETDATA_DOUBLE mine = str2ndd(values[i], &e_mine);
+ NETDATA_DOUBLE sys = strtondd(values[i], &e_sys);
if(isnan(mine)) {
if(!isnan(sys)) {
- fprintf(stderr, "Value '%s' is parsed as %" LONG_DOUBLE_MODIFIER ", but system believes it is %" LONG_DOUBLE_MODIFIER ".\n", values[i], mine, sys);
+ fprintf(stderr, "Value '%s' is parsed as %" NETDATA_DOUBLE_MODIFIER
+ ", but system believes it is %" NETDATA_DOUBLE_MODIFIER ".\n", values[i], mine, sys);
return -1;
}
}
else if(isinf(mine)) {
if(!isinf(sys)) {
- fprintf(stderr, "Value '%s' is parsed as %" LONG_DOUBLE_MODIFIER ", but system believes it is %" LONG_DOUBLE_MODIFIER ".\n", values[i], mine, sys);
+ fprintf(stderr, "Value '%s' is parsed as %" NETDATA_DOUBLE_MODIFIER
+ ", but system believes it is %" NETDATA_DOUBLE_MODIFIER ".\n", values[i], mine, sys);
return -1;
}
}
else if(mine != sys && ABS(mine-sys) > 0.000001) {
- fprintf(stderr, "Value '%s' is parsed as %" LONG_DOUBLE_MODIFIER ", but system believes it is %" LONG_DOUBLE_MODIFIER ", delta %" LONG_DOUBLE_MODIFIER ".\n", values[i], mine, sys, sys-mine);
+ fprintf(stderr, "Value '%s' is parsed as %" NETDATA_DOUBLE_MODIFIER
+ ", but system believes it is %" NETDATA_DOUBLE_MODIFIER ", delta %" NETDATA_DOUBLE_MODIFIER ".\n", values[i], mine, sys, sys-mine);
return -1;
}
@@ -368,7 +376,8 @@ int unit_test_str2ld() {
return -1;
}
- fprintf(stderr, "str2ld() parsed value '%s' exactly the same way with strtold(), returned %" LONG_DOUBLE_MODIFIER " vs %" LONG_DOUBLE_MODIFIER "\n", values[i], mine, sys);
+ fprintf(stderr, "str2ndd() parsed value '%s' exactly the same way with strtold(), returned %" NETDATA_DOUBLE_MODIFIER
+ " vs %" NETDATA_DOUBLE_MODIFIER "\n", values[i], mine, sys);
}
return 0;
@@ -461,10 +470,10 @@ struct test {
unsigned long feed_entries;
unsigned long result_entries;
struct feed_values *feed;
- calculated_number *results;
+ NETDATA_DOUBLE *results;
collected_number *feed2;
- calculated_number *results2;
+ NETDATA_DOUBLE *results2;
};
// --------------------------------------------------------------------------------------------------------------------
@@ -484,7 +493,7 @@ struct feed_values test1_feed[] = {
{ 1000000, 100 },
};
-calculated_number test1_results[] = {
+NETDATA_DOUBLE test1_results[] = {
20, 30, 40, 50, 60, 70, 80, 90, 100
};
@@ -520,7 +529,7 @@ struct feed_values test2_feed[] = {
{ 1000000, 100 },
};
-calculated_number test2_results[] = {
+NETDATA_DOUBLE test2_results[] = {
20, 30, 40, 50, 60, 70, 80, 90, 100
};
@@ -555,7 +564,7 @@ struct feed_values test3_feed[] = {
{ 1000000, 100 },
};
-calculated_number test3_results[] = {
+NETDATA_DOUBLE test3_results[] = {
10, 10, 10, 10, 10, 10, 10, 10, 10
};
@@ -590,7 +599,7 @@ struct feed_values test4_feed[] = {
{ 1000000, 100 },
};
-calculated_number test4_results[] = {
+NETDATA_DOUBLE test4_results[] = {
10, 10, 10, 10, 10, 10, 10, 10, 10
};
@@ -625,7 +634,7 @@ struct feed_values test5_feed[] = {
{ 1000000, 0x00000000FFFFFFFFULL / 15 * 0 },
};
-calculated_number test5_results[] = {
+NETDATA_DOUBLE test5_results[] = {
0x00000000FFFFFFFFULL / 15 * 7,
0x00000000FFFFFFFFULL / 15 * 7,
0x00000000FFFFFFFFULL / 15,
@@ -668,7 +677,7 @@ struct feed_values test5b_feed[] = {
{ 1000000, 0xFFFFFFFFFFFFFFFFULL / 15 * 0 },
};
-calculated_number test5b_results[] = {
+NETDATA_DOUBLE test5b_results[] = {
0xFFFFFFFFFFFFFFFFULL / 15 * 7,
0xFFFFFFFFFFFFFFFFULL / 15 * 7,
0xFFFFFFFFFFFFFFFFULL / 15,
@@ -717,7 +726,7 @@ struct feed_values test6_feed[] = {
{ 250000, 16000 },
};
-calculated_number test6_results[] = {
+NETDATA_DOUBLE test6_results[] = {
4000, 4000, 4000, 4000
};
@@ -752,7 +761,7 @@ struct feed_values test7_feed[] = {
{ 2000000, 10000 },
};
-calculated_number test7_results[] = {
+NETDATA_DOUBLE test7_results[] = {
500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500
};
@@ -783,7 +792,7 @@ struct feed_values test8_feed[] = {
{ 2000000, 6000 },
};
-calculated_number test8_results[] = {
+NETDATA_DOUBLE test8_results[] = {
1250, 2000, 2250, 3000, 3250, 4000, 4250, 5000, 5250, 6000
};
@@ -824,7 +833,7 @@ struct feed_values test9_feed[] = {
{ 250000, 16000 },
};
-calculated_number test9_results[] = {
+NETDATA_DOUBLE test9_results[] = {
4000, 8000, 12000, 16000
};
@@ -859,7 +868,7 @@ struct feed_values test10_feed[] = {
{ 1000000, 6900 + 1000 },
};
-calculated_number test10_results[] = {
+NETDATA_DOUBLE test10_results[] = {
1000, 1000, 1000, 1000, 1000, 1000, 1000
};
@@ -898,11 +907,11 @@ collected_number test11_feed2[] = {
10, 20, 30, 40, 50, 60, 70, 80, 90, 100
};
-calculated_number test11_results[] = {
+NETDATA_DOUBLE test11_results[] = {
50, 50, 50, 50, 50, 50, 50, 50, 50
};
-calculated_number test11_results2[] = {
+NETDATA_DOUBLE test11_results2[] = {
50, 50, 50, 50, 50, 50, 50, 50, 50
};
@@ -941,11 +950,11 @@ collected_number test12_feed2[] = {
10*3, 20*3, 30*3, 40*3, 50*3, 60*3, 70*3, 80*3, 90*3, 100*3
};
-calculated_number test12_results[] = {
+NETDATA_DOUBLE test12_results[] = {
25, 25, 25, 25, 25, 25, 25, 25, 25
};
-calculated_number test12_results2[] = {
+NETDATA_DOUBLE test12_results2[] = {
75, 75, 75, 75, 75, 75, 75, 75, 75
};
@@ -980,7 +989,7 @@ struct feed_values test13_feed[] = {
{ 1000000, 6900 + 1000 },
};
-calculated_number test13_results[] = {
+NETDATA_DOUBLE test13_results[] = {
83.3333300, 100, 100, 100, 100, 100, 100
};
@@ -1015,7 +1024,7 @@ struct feed_values test14_feed[] = {
{ 29942000, 0x0153987f888982d0ULL },
};
-calculated_number test14_results[] = {
+NETDATA_DOUBLE test14_results[] = {
23.1383300, 21.8515600, 21.8804600, 21.7788000, 22.0112200, 22.4386100, 22.0906100, 21.9150800
};
@@ -1047,7 +1056,7 @@ struct feed_values test14b_feed[] = {
{ 29942000, 13573000 + 29969000 + 29958000 + 30054000 + 34952000 + 25046000 + 29947000 + 30054000 + 29942000 },
};
-calculated_number test14b_results[] = {
+NETDATA_DOUBLE test14b_results[] = {
1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000
};
@@ -1079,7 +1088,7 @@ struct feed_values test14c_feed[] = {
{ 30000000, 29000000 + 1000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 + 30000000 },
};
-calculated_number test14c_results[] = {
+NETDATA_DOUBLE test14c_results[] = {
1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000, 1000000
};
@@ -1118,11 +1127,11 @@ collected_number test15_feed2[] = {
178825286, 178825286, 178825286, 178825286, 178825498, 178825498, 179165652, 179202964, 179203282, 179204130
};
-calculated_number test15_results[] = {
+NETDATA_DOUBLE test15_results[] = {
5857.4080000, 5898.4540000, 5891.6590000, 5806.3160000, 5914.2640000, 3202.2630000, 5589.6560000, 5822.5260000, 5911.7520000
};
-calculated_number test15_results2[] = {
+NETDATA_DOUBLE test15_results2[] = {
0.0000000, 0.0000000, 0.0024944, 1.6324779, 0.0212777, 2655.1890000, 290.5387000, 5.6733610, 6.5960220
};
@@ -1173,12 +1182,13 @@ int run_test(struct test *test)
if(c) {
time_now += test->feed[c].microseconds;
- fprintf(stderr, " > %s: feeding position %lu, after %0.3f seconds (%0.3f seconds from start), delta " CALCULATED_NUMBER_FORMAT ", rate " CALCULATED_NUMBER_FORMAT "\n",
+ fprintf(stderr, " > %s: feeding position %lu, after %0.3f seconds (%0.3f seconds from start), delta " NETDATA_DOUBLE_FORMAT
+ ", rate " NETDATA_DOUBLE_FORMAT "\n",
test->name, c+1,
(float)test->feed[c].microseconds / 1000000.0,
(float)time_now / 1000000.0,
- ((calculated_number)test->feed[c].value - (calculated_number)last) * (calculated_number)test->multiplier / (calculated_number)test->divisor,
- (((calculated_number)test->feed[c].value - (calculated_number)last) * (calculated_number)test->multiplier / (calculated_number)test->divisor) / (calculated_number)test->feed[c].microseconds * (calculated_number)1000000);
+ ((NETDATA_DOUBLE)test->feed[c].value - (NETDATA_DOUBLE)last) * (NETDATA_DOUBLE)test->multiplier / (NETDATA_DOUBLE)test->divisor,
+ (((NETDATA_DOUBLE)test->feed[c].value - (NETDATA_DOUBLE)last) * (NETDATA_DOUBLE)test->multiplier / (NETDATA_DOUBLE)test->divisor) / (NETDATA_DOUBLE)test->feed[c].microseconds * (NETDATA_DOUBLE)1000000);
// rrdset_next_usec_unfiltered(st, test->feed[c].microseconds);
st->usec_since_last_update = test->feed[c].microseconds;
@@ -1216,10 +1226,11 @@ int run_test(struct test *test)
unsigned long max = (st->counter < test->result_entries)?st->counter:test->result_entries;
for(c = 0 ; c < max ; c++) {
- calculated_number v = unpack_storage_number(rd->values[c]);
- calculated_number n = unpack_storage_number(pack_storage_number(test->results[c], SN_DEFAULT_FLAGS));
- int same = (calculated_number_round(v * 10000000.0) == calculated_number_round(n * 10000000.0))?1:0;
- fprintf(stderr, " %s/%s: checking position %lu (at %"PRId64" secs), expecting value " CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT ", %s\n",
+ NETDATA_DOUBLE v = unpack_storage_number(rd->db[c]);
+ NETDATA_DOUBLE n = unpack_storage_number(pack_storage_number(test->results[c], SN_DEFAULT_FLAGS));
+ int same = (roundndd(v * 10000000.0) == roundndd(n * 10000000.0))?1:0;
+ fprintf(stderr, " %s/%s: checking position %lu (at %"PRId64" secs), expecting value " NETDATA_DOUBLE_FORMAT
+ ", found " NETDATA_DOUBLE_FORMAT ", %s\n",
test->name, rd->name, c+1,
(int64_t)((rrdset_first_entry_t(st) + c * st->update_every) - time_start),
n, v, (same)?"OK":"### E R R O R ###");
@@ -1227,10 +1238,11 @@ int run_test(struct test *test)
if(!same) errors++;
if(rd2) {
- v = unpack_storage_number(rd2->values[c]);
+ v = unpack_storage_number(rd2->db[c]);
n = test->results2[c];
- same = (calculated_number_round(v * 10000000.0) == calculated_number_round(n * 10000000.0))?1:0;
- fprintf(stderr, " %s/%s: checking position %lu (at %"PRId64" secs), expecting value " CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT ", %s\n",
+ same = (roundndd(v * 10000000.0) == roundndd(n * 10000000.0))?1:0;
+ fprintf(stderr, " %s/%s: checking position %lu (at %"PRId64" secs), expecting value " NETDATA_DOUBLE_FORMAT
+ ", found " NETDATA_DOUBLE_FORMAT ", %s\n",
test->name, rd2->name, c+1,
(int64_t)((rrdset_first_entry_t(st) + c * st->update_every) - time_start),
n, v, (same)?"OK":"### E R R O R ###");
@@ -1242,6 +1254,8 @@ int run_test(struct test *test)
}
static int test_variable_renames(void) {
+ fprintf(stderr, "%s() running...\n", __FUNCTION__ );
+
fprintf(stderr, "Creating chart\n");
RRDSET *st = rrdset_create_localhost("chart", "ID", NULL, "family", "context", "Unit Testing", "a value", "unittest", NULL, 1, 1, RRDSET_TYPE_LINE);
fprintf(stderr, "Created chart with id '%s', name '%s'\n", st->id, st->name);
@@ -1326,6 +1340,7 @@ int check_strdupz_path_subpath() {
int run_all_mockup_tests(void)
{
+ fprintf(stderr, "%s() running...\n", __FUNCTION__ );
if(check_strdupz_path_subpath())
return 1;
@@ -1399,6 +1414,7 @@ int run_all_mockup_tests(void)
int unit_test(long delay, long shift)
{
+ fprintf(stderr, "%s() running...\n", __FUNCTION__ );
static int repeat = 0;
repeat++;
@@ -1466,14 +1482,14 @@ int unit_test(long delay, long shift)
int ret = 0;
storage_number sn;
- calculated_number cn, v;
+ NETDATA_DOUBLE cn, v;
for(c = 0 ; c < st->counter ; c++) {
fprintf(stderr, "\nPOSITION: c = %lu, EXPECTED VALUE %lu\n", c, (oincrement + c * increment + increment * (1000000 - shift) / 1000000 )* 10);
for(rd = st->dimensions ; rd ; rd = rd->next) {
- sn = rd->values[c];
+ sn = rd->db[c];
cn = unpack_storage_number(sn);
- fprintf(stderr, "\t %s " CALCULATED_NUMBER_FORMAT " (PACKED AS " STORAGE_NUMBER_FORMAT ") -> ", rd->id, cn, sn);
+ fprintf(stderr, "\t %s " NETDATA_DOUBLE_FORMAT " (PACKED AS " STORAGE_NUMBER_FORMAT ") -> ", rd->id, cn, sn);
if(rd == rdabs) v =
( oincrement
@@ -1488,7 +1504,7 @@ int unit_test(long delay, long shift)
if(v == cn) fprintf(stderr, "passed.\n");
else {
- fprintf(stderr, "ERROR! (expected " CALCULATED_NUMBER_FORMAT ")\n", v);
+ fprintf(stderr, "ERROR! (expected " NETDATA_DOUBLE_FORMAT ")\n", v);
ret = 1;
}
}
@@ -1501,6 +1517,7 @@ int unit_test(long delay, long shift)
}
int test_sqlite(void) {
+ fprintf(stderr, "%s() running...\n", __FUNCTION__ );
sqlite3 *db_meta;
fprintf(stderr, "Testing SQLIte\n");
@@ -1527,10 +1544,194 @@ int test_sqlite(void) {
fprintf(stderr,"Failed to test SQLite: Update with LIMIT failed\n");
return 1;
}
+
+ BUFFER *sql = buffer_create(ACLK_SYNC_QUERY_SIZE);
+ char *uuid_str = "0000_000";
+
+ buffer_sprintf(sql, TABLE_ACLK_CHART, uuid_str);
+ rc = sqlite3_exec(db_meta, buffer_tostring(sql), 0, 0, NULL);
+ buffer_flush(sql);
+ if (rc != SQLITE_OK)
+ goto error;
+
+ buffer_sprintf(sql, TABLE_ACLK_CHART_PAYLOAD, uuid_str);
+ rc = sqlite3_exec(db_meta, buffer_tostring(sql), 0, 0, NULL);
+ buffer_flush(sql);
+ if (rc != SQLITE_OK)
+ goto error;
+
+ buffer_sprintf(sql, TABLE_ACLK_CHART_LATEST, uuid_str);
+ rc = sqlite3_exec(db_meta, buffer_tostring(sql), 0, 0, NULL);
+ if (rc != SQLITE_OK)
+ goto error;
+ buffer_flush(sql);
+
+ buffer_sprintf(sql, INDEX_ACLK_CHART, uuid_str, uuid_str);
+ rc = sqlite3_exec(db_meta, buffer_tostring(sql), 0, 0, NULL);
+ if (rc != SQLITE_OK)
+ goto error;
+ buffer_flush(sql);
+
+ buffer_sprintf(sql, INDEX_ACLK_CHART_LATEST, uuid_str, uuid_str);
+ rc = sqlite3_exec(db_meta, buffer_tostring(sql), 0, 0, NULL);
+ if (rc != SQLITE_OK)
+ goto error;
+ buffer_flush(sql);
+
+ buffer_sprintf(sql, TRIGGER_ACLK_CHART_PAYLOAD, uuid_str, uuid_str, uuid_str);
+ rc = sqlite3_exec(db_meta, buffer_tostring(sql), 0, 0, NULL);
+ if (rc != SQLITE_OK)
+ goto error;
+ buffer_flush(sql);
+
+ buffer_sprintf(sql, TABLE_ACLK_ALERT, uuid_str);
+ rc = sqlite3_exec(db_meta, buffer_tostring(sql), 0, 0, NULL);
+ if (rc != SQLITE_OK)
+ goto error;
+ buffer_flush(sql);
+
+ buffer_sprintf(sql, INDEX_ACLK_ALERT, uuid_str, uuid_str);
+ rc = sqlite3_exec(db_meta, buffer_tostring(sql), 0, 0, NULL);
+ if (rc != SQLITE_OK)
+ goto error;
+ buffer_flush(sql);
+
+ buffer_free(sql);
fprintf(stderr,"SQLite is OK\n");
return 0;
+error:
+ fprintf(stderr,"SQLite statement failed: %s\n", buffer_tostring(sql));
+ buffer_free(sql);
+ fprintf(stderr,"SQLite tests failed\n");
+ return 1;
}
+int unit_test_bitmap256(void) {
+ fprintf(stderr, "%s() running...\n", __FUNCTION__ );
+
+ BITMAP256 test_bitmap = {0};
+
+ bitmap256_set_bit(&test_bitmap, 0, 1);
+ bitmap256_set_bit(&test_bitmap, 64, 1);
+ bitmap256_set_bit(&test_bitmap, 128, 1);
+ bitmap256_set_bit(&test_bitmap, 192, 1);
+ if (test_bitmap.data[0] == 1)
+ fprintf(stderr, "%s() INDEX 1 is OK\n", __FUNCTION__ );
+ if (test_bitmap.data[1] == 1)
+ fprintf(stderr, "%s() INDEX 65 is OK\n", __FUNCTION__ );
+ if (test_bitmap.data[2] == 1)
+ fprintf(stderr, "%s() INDEX 129 is OK\n", __FUNCTION__ );
+ if (test_bitmap.data[3] == 1)
+ fprintf(stderr, "%s() INDEX 192 is OK\n", __FUNCTION__ );
+
+ uint8_t i=0;
+ int j = 0;
+ do {
+ bitmap256_set_bit(&test_bitmap, i++, 1);
+ j++;
+ } while (j < 256);
+
+ if (test_bitmap.data[0] == 0xffffffffffffffff)
+ fprintf(stderr, "%s() INDEX 0 is fully set OK\n", __FUNCTION__);
+ else {
+ fprintf(stderr, "%s() INDEX 0 is %lx expected 0xffffffffffffffff\n", __FUNCTION__, test_bitmap.data[0]);
+ return 1;
+ }
+
+ if (test_bitmap.data[1] == 0xffffffffffffffff)
+ fprintf(stderr, "%s() INDEX 1 is fully set OK\n", __FUNCTION__);
+ else {
+ fprintf(stderr, "%s() INDEX 1 is %lx expected 0xffffffffffffffff\n", __FUNCTION__, test_bitmap.data[0]);
+ return 1;
+ }
+
+ if (test_bitmap.data[2] == 0xffffffffffffffff)
+ fprintf(stderr, "%s() INDEX 2 is fully set OK\n", __FUNCTION__);
+ else {
+ fprintf(stderr, "%s() INDEX 2 is %lx expected 0xffffffffffffffff\n", __FUNCTION__, test_bitmap.data[0]);
+ return 1;
+ }
+
+ if (test_bitmap.data[3] == 0xffffffffffffffff)
+ fprintf(stderr, "%s() INDEX 3 is fully set OK\n", __FUNCTION__);
+ else {
+ fprintf(stderr, "%s() INDEX 3 is %lx expected 0xffffffffffffffff\n", __FUNCTION__, test_bitmap.data[0]);
+ return 1;
+ }
+
+ i = 0;
+ j = 0;
+ do {
+ bitmap256_set_bit(&test_bitmap, i++, 0);
+ j++;
+ } while (j < 256);
+
+ if (test_bitmap.data[0] == 0)
+ fprintf(stderr, "%s() INDEX 0 is reset OK\n", __FUNCTION__);
+ else {
+ fprintf(stderr, "%s() INDEX 0 is not reset FAILED\n", __FUNCTION__);
+ return 1;
+ }
+ if (test_bitmap.data[1] == 0)
+ fprintf(stderr, "%s() INDEX 1 is reset OK\n", __FUNCTION__);
+ else {
+ fprintf(stderr, "%s() INDEX 1 is not reset FAILED\n", __FUNCTION__);
+ return 1;
+ }
+
+ if (test_bitmap.data[2] == 0)
+ fprintf(stderr, "%s() INDEX 2 is reset OK\n", __FUNCTION__);
+ else {
+ fprintf(stderr, "%s() INDEX 2 is not reset FAILED\n", __FUNCTION__);
+ return 1;
+ }
+
+ if (test_bitmap.data[3] == 0)
+ fprintf(stderr, "%s() INDEX 3 is reset OK\n", __FUNCTION__);
+ else {
+ fprintf(stderr, "%s() INDEX 3 is not reset FAILED\n", __FUNCTION__);
+ return 1;
+ }
+
+ i=0;
+ j = 0;
+ do {
+ bitmap256_set_bit(&test_bitmap, i, 1);
+ i += 4;
+ j += 4;
+ } while (j < 256);
+
+ if (test_bitmap.data[0] == 0x1111111111111111)
+ fprintf(stderr, "%s() INDEX 0 is 0x1111111111111111 set OK\n", __FUNCTION__);
+ else {
+ fprintf(stderr, "%s() INDEX 0 is %lx expected 0x1111111111111111\n", __FUNCTION__, test_bitmap.data[0]);
+ return 1;
+ }
+
+ if (test_bitmap.data[1] == 0x1111111111111111)
+ fprintf(stderr, "%s() INDEX 1 is 0x1111111111111111 set OK\n", __FUNCTION__);
+ else {
+ fprintf(stderr, "%s() INDEX 1 is %lx expected 0x1111111111111111\n", __FUNCTION__, test_bitmap.data[1]);
+ return 1;
+ }
+
+ if (test_bitmap.data[2] == 0x1111111111111111)
+ fprintf(stderr, "%s() INDEX 2 is 0x1111111111111111 set OK\n", __FUNCTION__);
+ else {
+ fprintf(stderr, "%s() INDEX 2 is %lx expected 0x1111111111111111\n", __FUNCTION__, test_bitmap.data[2]);
+ return 1;
+ }
+
+ if (test_bitmap.data[3] == 0x1111111111111111)
+ fprintf(stderr, "%s() INDEX 3 is 0x1111111111111111 set OK\n", __FUNCTION__);
+ else {
+ fprintf(stderr, "%s() INDEX 3 is %lx expected 0x1111111111111111\n", __FUNCTION__, test_bitmap.data[3]);
+ return 1;
+ }
+
+ fprintf(stderr, "%s() tests passed\n", __FUNCTION__);
+ return 0;
+}
#ifdef ENABLE_DBENGINE
static inline void rrddim_set_by_pointer_fake_time(RRDDIM *rd, collected_number value, time_t now)
@@ -1571,6 +1772,7 @@ static RRDHOST *dbengine_rrdhost_find_or_create(char *name)
, default_rrdpush_api_key
, default_rrdpush_send_charts_matching
, NULL
+ , 0
);
}
@@ -1590,6 +1792,7 @@ static const int QUERY_BATCH = 4096;
static void test_dbengine_create_charts(RRDHOST *host, RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
int update_every)
{
+ fprintf(stderr, "%s() running...\n", __FUNCTION__ );
int i, j;
char name[101];
@@ -1628,7 +1831,7 @@ static void test_dbengine_create_charts(RRDHOST *host, RRDSET *st[CHARTS], RRDDI
// Fluh pages for subsequent real values
for (i = 0 ; i < CHARTS ; ++i) {
for (j = 0; j < DIMS; ++j) {
- rrdeng_store_metric_flush_current_page(rd[i][j]);
+ rrdeng_store_metric_flush_current_page((rd[i][j])->tiers[0]->db_collection_handle);
}
}
}
@@ -1637,6 +1840,7 @@ static void test_dbengine_create_charts(RRDHOST *host, RRDSET *st[CHARTS], RRDDI
static time_t test_dbengine_create_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
int current_region, time_t time_start)
{
+ fprintf(stderr, "%s() running...\n", __FUNCTION__ );
time_t time_now;
int i, j, c, update_every;
collected_number next;
@@ -1672,13 +1876,14 @@ static time_t test_dbengine_create_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS
static int test_dbengine_check_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
int current_region, time_t time_start)
{
+ fprintf(stderr, "%s() running...\n", __FUNCTION__ );
uint8_t same;
- time_t time_now, time_retrieved;
+ time_t time_now, time_retrieved, end_time;
int i, j, k, c, errors, update_every;
collected_number last;
- calculated_number value, expected;
- storage_number n;
+ NETDATA_DOUBLE value, expected;
struct rrddim_query_handle handle;
+ size_t value_errors = 0, time_errors = 0;
update_every = REGION_UPDATE_EVERY[current_region];
errors = 0;
@@ -1688,32 +1893,45 @@ static int test_dbengine_check_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DI
time_now = time_start + (c + 1) * update_every;
for (i = 0 ; i < CHARTS ; ++i) {
for (j = 0; j < DIMS; ++j) {
- rd[i][j]->state->query_ops.init(rd[i][j], &handle, time_now, time_now + QUERY_BATCH * update_every);
+ rd[i][j]->tiers[0]->query_ops.init(rd[i][j]->tiers[0]->db_metric_handle, &handle, time_now, time_now + QUERY_BATCH * update_every, TIER_QUERY_FETCH_SUM);
for (k = 0; k < QUERY_BATCH; ++k) {
last = ((collected_number)i * DIMS) * REGION_POINTS[current_region] +
j * REGION_POINTS[current_region] + c + k;
- expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_DEFAULT_FLAGS));
+ expected = unpack_storage_number(pack_storage_number((NETDATA_DOUBLE)last, SN_DEFAULT_FLAGS));
- n = rd[i][j]->state->query_ops.next_metric(&handle, &time_retrieved);
- value = unpack_storage_number(n);
+ STORAGE_POINT sp = rd[i][j]->tiers[0]->query_ops.next_metric(&handle);
+ value = sp.sum;
+ time_retrieved = sp.start_time;
+ end_time = sp.end_time;
- same = (calculated_number_round(value) == calculated_number_round(expected)) ? 1 : 0;
+ same = (roundndd(value) == roundndd(expected)) ? 1 : 0;
if(!same) {
- fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, expecting value "
- CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT ", ### E R R O R ###\n",
+ if(!value_errors)
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, expecting value " NETDATA_DOUBLE_FORMAT
+ ", found " NETDATA_DOUBLE_FORMAT ", ### E R R O R ###\n",
st[i]->name, rd[i][j]->name, (unsigned long)time_now + k * update_every, expected, value);
+ value_errors++;
errors++;
}
- if(time_retrieved != time_now + k * update_every) {
- fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, found timestamp %lu ### E R R O R ###\n",
+ if(end_time != time_now + k * update_every) {
+ if(!time_errors)
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, found timestamp %lu ### E R R O R ###\n",
st[i]->name, rd[i][j]->name, (unsigned long)time_now + k * update_every, (unsigned long)time_retrieved);
+ time_errors++;
errors++;
}
}
- rd[i][j]->state->query_ops.finalize(&handle);
+ rd[i][j]->tiers[0]->query_ops.finalize(&handle);
}
}
}
+
+ if(value_errors)
+ fprintf(stderr, "%zu value errors encountered\n", value_errors);
+
+ if(time_errors)
+ fprintf(stderr, "%zu time errors encountered\n", time_errors);
+
return errors;
}
@@ -1721,49 +1939,55 @@ static int test_dbengine_check_metrics(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DI
static int test_dbengine_check_rrdr(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS],
int current_region, time_t time_start, time_t time_end)
{
+ int update_every = REGION_UPDATE_EVERY[current_region];
+ fprintf(stderr, "%s() running on region %d, start time %ld, end time %ld, update every %d...\n", __FUNCTION__, current_region, time_start, time_end, update_every);
uint8_t same;
time_t time_now, time_retrieved;
- int i, j, errors, update_every;
+ int i, j, errors, value_errors = 0, time_errors = 0;
long c;
collected_number last;
- calculated_number value, expected;
+ NETDATA_DOUBLE value, expected;
errors = 0;
- update_every = REGION_UPDATE_EVERY[current_region];
long points = (time_end - time_start) / update_every;
for (i = 0 ; i < CHARTS ; ++i) {
ONEWAYALLOC *owa = onewayalloc_create(0);
- RRDR *r = rrd2rrdr(owa, st[i], points, time_start + update_every, time_end, RRDR_GROUPING_AVERAGE, 0, 0, NULL, NULL, 0);
+ RRDR *r = rrd2rrdr(owa, st[i], points, time_start, time_end,
+ RRDR_GROUPING_AVERAGE, 0, RRDR_OPTION_NATURAL_POINTS,
+ NULL, NULL, NULL, 0, 0);
+
if (!r) {
- fprintf(stderr, " DB-engine unittest %s: empty RRDR ### E R R O R ###\n", st[i]->name);
+ fprintf(stderr, " DB-engine unittest %s: empty RRDR on region %d ### E R R O R ###\n", st[i]->name, current_region);
return ++errors;
} else {
assert(r->st == st[i]);
for (c = 0; c != rrdr_rows(r) ; ++c) {
RRDDIM *d;
- time_now = time_start + (c + 2) * update_every;
+ time_now = time_start + (c + 1) * update_every;
time_retrieved = r->t[c];
// for each dimension
for (j = 0, d = r->st->dimensions ; d && j < r->d ; ++j, d = d->next) {
- calculated_number *cn = &r->v[ c * r->d ];
+ NETDATA_DOUBLE *cn = &r->v[ c * r->d ];
value = cn[j];
assert(rd[i][j] == d);
- last = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c + 1;
- expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_DEFAULT_FLAGS));
+ last = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c;
+ expected = unpack_storage_number(pack_storage_number((NETDATA_DOUBLE)last, SN_DEFAULT_FLAGS));
- same = (calculated_number_round(value) == calculated_number_round(expected)) ? 1 : 0;
+ same = (roundndd(value) == roundndd(expected)) ? 1 : 0;
if(!same) {
- fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, expecting value "
- CALCULATED_NUMBER_FORMAT ", RRDR found " CALCULATED_NUMBER_FORMAT ", ### E R R O R ###\n",
+ if(value_errors < 20)
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, expecting value " NETDATA_DOUBLE_FORMAT
+ ", RRDR found " NETDATA_DOUBLE_FORMAT ", ### E R R O R ###\n",
st[i]->name, rd[i][j]->name, (unsigned long)time_now, expected, value);
- errors++;
+ value_errors++;
}
if(time_retrieved != time_now) {
- fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, found RRDR timestamp %lu ### E R R O R ###\n",
+ if(time_errors < 20)
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, found RRDR timestamp %lu ### E R R O R ###\n",
st[i]->name, rd[i][j]->name, (unsigned long)time_now, (unsigned long)time_retrieved);
- errors++;
+ time_errors++;
}
}
}
@@ -1771,12 +1995,20 @@ static int test_dbengine_check_rrdr(RRDSET *st[CHARTS], RRDDIM *rd[CHARTS][DIMS]
}
onewayalloc_destroy(owa);
}
- return errors;
+
+ if(value_errors)
+ fprintf(stderr, "%d value errors encountered\n", value_errors);
+
+ if(time_errors)
+ fprintf(stderr, "%d time errors encountered\n", time_errors);
+
+ return errors + value_errors + time_errors;
}
int test_dbengine(void)
{
- int i, j, errors, update_every, current_region;
+ fprintf(stderr, "%s() running...\n", __FUNCTION__ );
+ int i, j, errors, value_errors = 0, time_errors = 0, update_every, current_region;
RRDHOST *host = NULL;
RRDSET *st[CHARTS];
RRDDIM *rd[CHARTS][DIMS];
@@ -1809,7 +2041,7 @@ int test_dbengine(void)
for (i = 0 ; i < CHARTS ; ++i) {
st[i]->update_every = update_every;
for (j = 0; j < DIMS; ++j) {
- rrdeng_store_metric_flush_current_page(rd[i][j]);
+ rrdeng_store_metric_flush_current_page((rd[i][j])->tiers[0]->db_collection_handle);
}
}
@@ -1828,7 +2060,7 @@ int test_dbengine(void)
for (i = 0 ; i < CHARTS ; ++i) {
st[i]->update_every = update_every;
for (j = 0; j < DIMS; ++j) {
- rrdeng_store_metric_flush_current_page(rd[i][j]);
+ rrdeng_store_metric_flush_current_page((rd[i][j])->tiers[0]->db_collection_handle);
}
}
@@ -1854,7 +2086,9 @@ int test_dbengine(void)
long point_offset = (time_start[current_region] - time_start[0]) / update_every;
for (i = 0 ; i < CHARTS ; ++i) {
ONEWAYALLOC *owa = onewayalloc_create(0);
- RRDR *r = rrd2rrdr(owa, st[i], points, time_start[0] + update_every, time_end[REGIONS - 1], RRDR_GROUPING_AVERAGE, 0, 0, NULL, NULL, 0);
+ RRDR *r = rrd2rrdr(owa, st[i], points, time_start[0] + update_every,
+ time_end[REGIONS - 1], RRDR_GROUPING_AVERAGE, 0,
+ RRDR_OPTION_NATURAL_POINTS, NULL, NULL, NULL, 0, 0);
if (!r) {
fprintf(stderr, " DB-engine unittest %s: empty RRDR ### E R R O R ###\n", st[i]->name);
++errors;
@@ -1870,24 +2104,26 @@ int test_dbengine(void)
// for each dimension
for(j = 0, d = r->st->dimensions ; d && j < r->d ; ++j, d = d->next) {
- calculated_number *cn = &r->v[ c * r->d ];
- calculated_number value = cn[j];
+ NETDATA_DOUBLE *cn = &r->v[ c * r->d ];
+ NETDATA_DOUBLE value = cn[j];
assert(rd[i][j] == d);
collected_number last = i * DIMS * REGION_POINTS[current_region] + j * REGION_POINTS[current_region] + c - point_offset + 1;
- calculated_number expected = unpack_storage_number(pack_storage_number((calculated_number)last, SN_DEFAULT_FLAGS));
+ NETDATA_DOUBLE expected = unpack_storage_number(pack_storage_number((NETDATA_DOUBLE)last, SN_DEFAULT_FLAGS));
- uint8_t same = (calculated_number_round(value) == calculated_number_round(expected)) ? 1 : 0;
+ uint8_t same = (roundndd(value) == roundndd(expected)) ? 1 : 0;
if(!same) {
- fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, expecting value "
- CALCULATED_NUMBER_FORMAT ", RRDR found " CALCULATED_NUMBER_FORMAT ", ### E R R O R ###\n",
+ if(!value_errors)
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, expecting value " NETDATA_DOUBLE_FORMAT
+ ", RRDR found " NETDATA_DOUBLE_FORMAT ", ### E R R O R ###\n",
st[i]->name, rd[i][j]->name, (unsigned long)time_now, expected, value);
- errors++;
+ value_errors++;
}
if(time_retrieved != time_now) {
- fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, found RRDR timestamp %lu ### E R R O R ###\n",
+ if(!time_errors)
+ fprintf(stderr, " DB-engine unittest %s/%s: at %lu secs, found RRDR timestamp %lu ### E R R O R ###\n",
st[i]->name, rd[i][j]->name, (unsigned long)time_now, (unsigned long)time_retrieved);
- errors++;
+ time_errors++;
}
}
}
@@ -1897,12 +2133,12 @@ int test_dbengine(void)
}
error_out:
rrd_wrlock();
- rrdeng_prepare_exit(host->rrdeng_ctx);
+ rrdeng_prepare_exit((struct rrdengine_instance *)host->storage_instance[0]);
rrdhost_delete_charts(host);
- rrdeng_exit(host->rrdeng_ctx);
+ rrdeng_exit((struct rrdengine_instance *)host->storage_instance[0]);
rrd_unlock();
- return errors;
+ return errors + value_errors + time_errors;
}
struct dbengine_chart_thread {
@@ -1937,6 +2173,7 @@ collected_number generate_dbengine_chart_value(int chart_i, int dim_i, time_t ti
static void generate_dbengine_chart(void *arg)
{
+ fprintf(stderr, "%s() running...\n", __FUNCTION__ );
struct dbengine_chart_thread *thread_info = (struct dbengine_chart_thread *)arg;
RRDHOST *host = thread_info->host;
char *chartname = thread_info->chartname;
@@ -1983,12 +2220,13 @@ static void generate_dbengine_chart(void *arg)
thread_info->time_max = time_current;
}
for (j = 0; j < DSET_DIMS; ++j) {
- rrdeng_store_metric_finalize(rd[j]);
+ rrdeng_store_metric_finalize((rd[j])->tiers[0]->db_collection_handle);
}
}
void generate_dbengine_dataset(unsigned history_seconds)
{
+ fprintf(stderr, "%s() running...\n", __FUNCTION__ );
const int DSET_CHARTS = 16;
const int DSET_DIMS = 128;
const uint64_t EXPECTED_COMPRESSION_RATIO = 20;
@@ -2042,7 +2280,7 @@ void generate_dbengine_dataset(unsigned history_seconds)
}
freez(thread_info);
rrd_wrlock();
- rrdhost_free(host);
+ rrdhost_free(host, 1);
rrd_unlock();
}
@@ -2063,6 +2301,7 @@ struct dbengine_query_thread {
static void query_dbengine_chart(void *arg)
{
+ fprintf(stderr, "%s() running...\n", __FUNCTION__ );
struct dbengine_query_thread *thread_info = (struct dbengine_query_thread *)arg;
const int DSET_CHARTS = thread_info->dset_charts;
const int DSET_DIMS = thread_info->dset_dims;
@@ -2071,11 +2310,11 @@ static void query_dbengine_chart(void *arg)
RRDSET *st;
RRDDIM *rd;
uint8_t same;
- time_t time_now, time_retrieved;
+ time_t time_now, time_retrieved, end_time;
collected_number generatedv;
- calculated_number value, expected;
- storage_number n;
+ NETDATA_DOUBLE value, expected;
struct rrddim_query_handle handle;
+ size_t value_errors = 0, time_errors = 0;
do {
// pick a chart and dimension
@@ -2101,60 +2340,74 @@ static void query_dbengine_chart(void *arg)
time_before = MIN(time_after + duration, time_max); /* up to 1 hour queries */
}
- rd->state->query_ops.init(rd, &handle, time_after, time_before);
+ rd->tiers[0]->query_ops.init(rd->tiers[0]->db_metric_handle, &handle, time_after, time_before, TIER_QUERY_FETCH_SUM);
++thread_info->queries_nr;
for (time_now = time_after ; time_now <= time_before ; time_now += update_every) {
generatedv = generate_dbengine_chart_value(i, j, time_now);
- expected = unpack_storage_number(pack_storage_number((calculated_number) generatedv, SN_DEFAULT_FLAGS));
+ expected = unpack_storage_number(pack_storage_number((NETDATA_DOUBLE) generatedv, SN_DEFAULT_FLAGS));
- if (unlikely(rd->state->query_ops.is_finished(&handle))) {
+ if (unlikely(rd->tiers[0]->query_ops.is_finished(&handle))) {
if (!thread_info->delete_old_data) { /* data validation only when we don't delete */
- fprintf(stderr, " DB-engine stresstest %s/%s: at %lu secs, expecting value "
- CALCULATED_NUMBER_FORMAT ", found data gap, ### E R R O R ###\n",
+ fprintf(stderr, " DB-engine stresstest %s/%s: at %lu secs, expecting value " NETDATA_DOUBLE_FORMAT
+ ", found data gap, ### E R R O R ###\n",
st->name, rd->name, (unsigned long) time_now, expected);
++thread_info->errors;
}
break;
}
- n = rd->state->query_ops.next_metric(&handle, &time_retrieved);
- if (SN_EMPTY_SLOT == n) {
+
+ STORAGE_POINT sp = rd->tiers[0]->query_ops.next_metric(&handle);
+ value = sp.sum;
+ time_retrieved = sp.start_time;
+ end_time = sp.end_time;
+
+ if (!netdata_double_isnumber(value)) {
if (!thread_info->delete_old_data) { /* data validation only when we don't delete */
- fprintf(stderr, " DB-engine stresstest %s/%s: at %lu secs, expecting value "
- CALCULATED_NUMBER_FORMAT ", found data gap, ### E R R O R ###\n",
+ fprintf(stderr, " DB-engine stresstest %s/%s: at %lu secs, expecting value " NETDATA_DOUBLE_FORMAT
+ ", found data gap, ### E R R O R ###\n",
st->name, rd->name, (unsigned long) time_now, expected);
++thread_info->errors;
}
break;
}
++thread_info->queried_metrics_nr;
- value = unpack_storage_number(n);
- same = (calculated_number_round(value) == calculated_number_round(expected)) ? 1 : 0;
+ same = (roundndd(value) == roundndd(expected)) ? 1 : 0;
if (!same) {
if (!thread_info->delete_old_data) { /* data validation only when we don't delete */
- fprintf(stderr, " DB-engine stresstest %s/%s: at %lu secs, expecting value "
- CALCULATED_NUMBER_FORMAT ", found " CALCULATED_NUMBER_FORMAT
- ", ### E R R O R ###\n",
+ if(!value_errors)
+ fprintf(stderr, " DB-engine stresstest %s/%s: at %lu secs, expecting value " NETDATA_DOUBLE_FORMAT
+ ", found " NETDATA_DOUBLE_FORMAT ", ### E R R O R ###\n",
st->name, rd->name, (unsigned long) time_now, expected, value);
- ++thread_info->errors;
+ value_errors++;
+ thread_info->errors++;
}
}
- if (time_retrieved != time_now) {
+ if (end_time != time_now) {
if (!thread_info->delete_old_data) { /* data validation only when we don't delete */
- fprintf(stderr,
+ if(!time_errors)
+ fprintf(stderr,
" DB-engine stresstest %s/%s: at %lu secs, found timestamp %lu ### E R R O R ###\n",
st->name, rd->name, (unsigned long) time_now, (unsigned long) time_retrieved);
- ++thread_info->errors;
+ time_errors++;
+ thread_info->errors++;
}
}
}
- rd->state->query_ops.finalize(&handle);
+ rd->tiers[0]->query_ops.finalize(&handle);
} while(!thread_info->done);
+
+ if(value_errors)
+ fprintf(stderr, "%zu value errors encountered\n", value_errors);
+
+ if(time_errors)
+ fprintf(stderr, "%zu time errors encountered\n", time_errors);
}
void dbengine_stress_test(unsigned TEST_DURATION_SEC, unsigned DSET_CHARTS, unsigned QUERY_THREADS,
unsigned RAMP_UP_SECONDS, unsigned PAGE_CACHE_MB, unsigned DISK_SPACE_MB)
{
+ fprintf(stderr, "%s() running...\n", __FUNCTION__ );
const unsigned DSET_DIMS = 128;
const uint64_t EXPECTED_COMPRESSION_RATIO = 20;
const unsigned HISTORY_SECONDS = 3600 * 24 * 365 * 50; /* 50 year of history */
@@ -2289,9 +2542,9 @@ void dbengine_stress_test(unsigned TEST_DURATION_SEC, unsigned DSET_CHARTS, unsi
}
freez(query_threads);
rrd_wrlock();
- rrdeng_prepare_exit(host->rrdeng_ctx);
+ rrdeng_prepare_exit((struct rrdengine_instance *)host->storage_instance[0]);
rrdhost_delete_charts(host);
- rrdeng_exit(host->rrdeng_ctx);
+ rrdeng_exit((struct rrdengine_instance *)host->storage_instance[0]);
rrd_unlock();
}
diff --git a/daemon/unit_test.h b/daemon/unit_test.h
index 6a7a966c3..2d2533afe 100644
--- a/daemon/unit_test.h
+++ b/daemon/unit_test.h
@@ -10,6 +10,7 @@ extern int unit_test_str2ld(void);
extern int unit_test_buffer(void);
extern int unit_test_static_threads(void);
extern int test_sqlite(void);
+extern int unit_test_bitmap256(void);
#ifdef ENABLE_DBENGINE
extern int test_dbengine(void);
extern void generate_dbengine_dataset(unsigned history_seconds);