summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.github/CODEOWNERS6
-rw-r--r--.github/codeql/c-cpp-config.yml2
-rw-r--r--.github/data/distros.yml13
-rw-r--r--.github/labeler.yml4
-rwxr-xr-x.github/scripts/pkg-test.sh20
-rw-r--r--.github/workflows/build.yml4
-rw-r--r--.github/workflows/codeql.yml1
-rw-r--r--.gitignore15
-rw-r--r--.gitmodules4
-rw-r--r--CHANGELOG.md136
-rw-r--r--CMakeLists.txt51
-rw-r--r--Makefile.am179
-rw-r--r--aclk/aclk_capas.c4
-rw-r--r--aclk/aclk_otp.c10
-rw-r--r--aclk/aclk_query.c12
-rw-r--r--aclk/aclk_tx_msgs.c12
-rw-r--r--aclk/aclk_tx_msgs.h2
-rw-r--r--aclk/https_client.c2
-rw-r--r--aclk/schema-wrappers/alarm_stream.cc4
-rw-r--r--aclk/schema-wrappers/alarm_stream.h3
-rw-r--r--cli/cli.c107
-rw-r--r--collectors/Makefile.am1
-rw-r--r--collectors/all.h10
-rw-r--r--collectors/apps.plugin/apps_groups.conf1
-rw-r--r--collectors/charts.d.plugin/README.md2
-rw-r--r--collectors/charts.d.plugin/ap/README.md2
-rw-r--r--collectors/charts.d.plugin/apcupsd/README.md2
-rw-r--r--collectors/charts.d.plugin/libreswan/README.md2
-rw-r--r--collectors/charts.d.plugin/nut/README.md2
-rw-r--r--collectors/charts.d.plugin/opensips/README.md2
-rw-r--r--collectors/charts.d.plugin/sensors/README.md6
-rw-r--r--collectors/debugfs.plugin/Makefile.am9
-rw-r--r--collectors/debugfs.plugin/README.md65
-rw-r--r--collectors/debugfs.plugin/debugfs_extfrag.c123
-rw-r--r--collectors/debugfs.plugin/debugfs_plugin.c246
-rw-r--r--collectors/debugfs.plugin/debugfs_plugin.h16
-rw-r--r--collectors/debugfs.plugin/debugfs_zswap.c437
-rw-r--r--collectors/debugfs.plugin/metrics.csv12
-rw-r--r--collectors/diskspace.plugin/README.md2
-rw-r--r--collectors/ebpf.plugin/README.md44
-rw-r--r--collectors/ebpf.plugin/ebpf.c200
-rw-r--r--collectors/ebpf.plugin/ebpf.d.conf7
-rw-r--r--collectors/ebpf.plugin/ebpf.d/cachestat.conf3
-rw-r--r--collectors/ebpf.plugin/ebpf.d/dcstat.conf3
-rw-r--r--collectors/ebpf.plugin/ebpf.d/fd.conf3
-rw-r--r--collectors/ebpf.plugin/ebpf.d/network.conf4
-rw-r--r--collectors/ebpf.plugin/ebpf.d/process.conf7
-rw-r--r--collectors/ebpf.plugin/ebpf.d/shm.conf3
-rw-r--r--collectors/ebpf.plugin/ebpf.d/swap.conf3
-rw-r--r--collectors/ebpf.plugin/ebpf.d/sync.conf4
-rw-r--r--collectors/ebpf.plugin/ebpf.d/vfs.conf13
-rw-r--r--collectors/ebpf.plugin/ebpf.h6
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.c37
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.h4
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.c92
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.c93
-rw-r--r--collectors/ebpf.plugin/ebpf_disk.c34
-rw-r--r--collectors/ebpf.plugin/ebpf_disk.h3
-rw-r--r--collectors/ebpf.plugin/ebpf_fd.c81
-rw-r--r--collectors/ebpf.plugin/ebpf_filesystem.c182
-rw-r--r--collectors/ebpf.plugin/ebpf_filesystem.h10
-rw-r--r--collectors/ebpf.plugin/ebpf_hardirq.c18
-rw-r--r--collectors/ebpf.plugin/ebpf_mdflush.c23
-rw-r--r--collectors/ebpf.plugin/ebpf_mount.c32
-rw-r--r--collectors/ebpf.plugin/ebpf_oomkill.c15
-rw-r--r--collectors/ebpf.plugin/ebpf_process.c96
-rw-r--r--collectors/ebpf.plugin/ebpf_process.h7
-rw-r--r--collectors/ebpf.plugin/ebpf_shm.c74
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.c132
-rw-r--r--collectors/ebpf.plugin/ebpf_softirq.c32
-rw-r--r--collectors/ebpf.plugin/ebpf_swap.c81
-rw-r--r--collectors/ebpf.plugin/ebpf_sync.c172
-rw-r--r--collectors/ebpf.plugin/ebpf_unittest.c83
-rw-r--r--collectors/ebpf.plugin/ebpf_unittest.h10
-rw-r--r--collectors/ebpf.plugin/ebpf_vfs.c89
-rw-r--r--collectors/freeipmi.plugin/freeipmi_plugin.c22
-rw-r--r--collectors/nfacct.plugin/README.md5
-rw-r--r--collectors/perf.plugin/README.md3
-rw-r--r--collectors/plugins.d/plugins_d.c2
-rw-r--r--collectors/plugins.d/plugins_d.h1
-rw-r--r--collectors/plugins.d/pluginsd_parser.c39
-rw-r--r--collectors/proc.plugin/proc_self_mountinfo.c12
-rw-r--r--collectors/python.d.plugin/oracledb/README.md15
-rw-r--r--collectors/python.d.plugin/oracledb/oracledb.chart.py57
-rw-r--r--collectors/python.d.plugin/oracledb/oracledb.conf10
-rw-r--r--collectors/python.d.plugin/smartd_log/smartd_log.conf1
-rw-r--r--collectors/python.d.plugin/tor/tor.chart.py4
-rw-r--r--collectors/python.d.plugin/tor/tor.conf2
-rw-r--r--collectors/slabinfo.plugin/README.md3
-rw-r--r--collectors/tc.plugin/plugin_tc.c2
-rw-r--r--config.cmake.h.in1
-rw-r--r--configs.signatures764
-rw-r--r--configure.ac85
-rw-r--r--contrib/debian/control148
-rw-r--r--contrib/debian/netdata-ebpf-code-legacy.postinst13
-rw-r--r--contrib/debian/netdata-ebpf-code-legacy.preinst13
-rw-r--r--contrib/debian/netdata-plugin-apps.postinst14
-rw-r--r--contrib/debian/netdata-plugin-apps.preinst13
-rw-r--r--contrib/debian/netdata-plugin-chartsd.postinst13
-rw-r--r--contrib/debian/netdata-plugin-chartsd.preinst13
-rw-r--r--contrib/debian/netdata-plugin-debugfs.postinst14
-rw-r--r--contrib/debian/netdata-plugin-debugfs.preinst13
-rw-r--r--contrib/debian/netdata-plugin-ebpf.postinst14
-rw-r--r--contrib/debian/netdata-plugin-ebpf.preinst13
-rw-r--r--contrib/debian/netdata-plugin-freeipmi.postinst14
-rw-r--r--contrib/debian/netdata-plugin-freeipmi.preinst13
-rw-r--r--contrib/debian/netdata-plugin-go.postinst14
-rw-r--r--contrib/debian/netdata-plugin-go.preinst13
-rw-r--r--contrib/debian/netdata-plugin-nfacct.postinst14
-rw-r--r--contrib/debian/netdata-plugin-nfacct.preinst13
-rw-r--r--contrib/debian/netdata-plugin-perf.postinst18
-rw-r--r--contrib/debian/netdata-plugin-perf.preinst13
-rw-r--r--contrib/debian/netdata-plugin-pythond.postinst13
-rw-r--r--contrib/debian/netdata-plugin-pythond.preinst13
-rw-r--r--contrib/debian/netdata-plugin-slabinfo.postinst14
-rw-r--r--contrib/debian/netdata-plugin-slabinfo.preinst13
-rw-r--r--contrib/debian/netdata.postinst38
-rw-r--r--contrib/debian/netdata.preinst23
-rwxr-xr-xcontrib/debian/rules104
-rw-r--r--daemon/analytics.c8
-rwxr-xr-xdaemon/anonymous-statistics.sh.in7
-rw-r--r--daemon/buildinfo.c33
-rw-r--r--daemon/common.h5
-rw-r--r--daemon/main.c18
-rw-r--r--daemon/main.h3
-rw-r--r--daemon/signals.c98
-rw-r--r--daemon/static_threads.c12
-rw-r--r--database/README.md3
-rw-r--r--database/contexts/api_v2.c19
-rw-r--r--database/contexts/query_target.c2
-rw-r--r--database/contexts/rrdcontext.h39
-rw-r--r--database/rrd.h3
-rw-r--r--database/rrdcalc.c6
-rw-r--r--database/rrdcalc.h5
-rw-r--r--database/rrdcalctemplate.c7
-rw-r--r--database/rrdcalctemplate.h4
-rw-r--r--database/rrdhost.c4
-rw-r--r--database/rrdlabels.c5
-rw-r--r--database/sqlite/sqlite3.c4299
-rw-r--r--database/sqlite/sqlite3.h238
-rw-r--r--database/sqlite/sqlite_aclk_alert.c101
-rw-r--r--database/sqlite/sqlite_db_migration.c35
-rw-r--r--database/sqlite/sqlite_db_migration.h1
-rw-r--r--database/sqlite/sqlite_functions.c2
-rw-r--r--database/sqlite/sqlite_health.c362
-rw-r--r--database/sqlite/sqlite_health.h2
-rw-r--r--docs/Demo-Sites.md23
-rw-r--r--docs/anonymous-statistics.md6
-rw-r--r--docs/category-overview-pages/accessing-netdata-dashboards.md3
-rw-r--r--docs/category-overview-pages/build-the-netdata-agent-yourself.md3
-rw-r--r--docs/category-overview-pages/install-netdata-on-embedded-systems.md3
-rw-r--r--docs/category-overview-pages/install-with-a-cicd-provisioning-system.md3
-rw-r--r--docs/category-overview-pages/machine-learning-and-assisted-troubleshooting.md3
-rw-r--r--docs/category-overview-pages/maintenance-operations-on-netdata-agents.md3
-rw-r--r--docs/category-overview-pages/metrics-streaming-and-replication.md3
-rw-r--r--docs/category-overview-pages/misc-overview.md18
-rw-r--r--docs/category-overview-pages/monitor-your-infrastructure.md3
-rw-r--r--docs/category-overview-pages/netdata-apis.md5
-rw-r--r--docs/category-overview-pages/netdata-architecture.md3
-rw-r--r--docs/category-overview-pages/netdata-dashboards-and-visualizations.md3
-rw-r--r--docs/category-overview-pages/optimizing-metrics-database.md3
-rw-r--r--docs/cloud/alerts-notifications/add-discord-notification.md2
-rw-r--r--docs/cloud/alerts-notifications/add-mattermost-notification-configuration.md51
-rw-r--r--docs/cloud/alerts-notifications/add-opsgenie-notification-configuration.md4
-rw-r--r--docs/cloud/alerts-notifications/add-pagerduty-notification-configuration.md4
-rw-r--r--docs/cloud/alerts-notifications/add-slack-notification-configuration.md6
-rw-r--r--docs/cloud/alerts-notifications/add-webhook-notification-configuration.md10
-rw-r--r--docs/cloud/alerts-notifications/manage-alert-notification-silencing-rules.md58
-rw-r--r--docs/cloud/alerts-notifications/manage-notification-methods.md5
-rw-r--r--docs/cloud/alerts-notifications/notifications.md42
-rw-r--r--docs/cloud/insights/events-feed.md22
-rw-r--r--docs/cloud/manage/plans.md21
-rw-r--r--docs/cloud/manage/role-based-access.md7
-rw-r--r--docs/cloud/manage/view-plan-billing.md65
-rw-r--r--docs/cloud/visualize/interact-new-charts.md404
-rw-r--r--docs/guides/troubleshoot/troubleshooting-agent-with-cloud-connection.md138
-rw-r--r--docs/netdata-security.md499
-rw-r--r--exporting/README.md2
-rw-r--r--exporting/WALKTHROUGH.md17
-rw-r--r--exporting/clean_connectors.c5
-rw-r--r--exporting/exporting_engine.c2
-rw-r--r--exporting/exporting_engine.h3
-rw-r--r--exporting/graphite/graphite.c5
-rw-r--r--exporting/json/json.c5
-rw-r--r--exporting/opentsdb/opentsdb.c10
-rw-r--r--exporting/prometheus/prometheus.c85
-rw-r--r--exporting/prometheus/remote_write/remote_write.c5
-rw-r--r--exporting/prometheus/remote_write/remote_write_request.cc16
-rw-r--r--exporting/send_data.c105
-rw-r--r--health/Makefile.am1
-rw-r--r--health/REFERENCE.md35
-rw-r--r--health/health.c81
-rw-r--r--health/health.d/boinc.conf4
-rw-r--r--health/health.d/btrfs.conf9
-rw-r--r--health/health.d/cockroachdb.conf10
-rw-r--r--health/health.d/disks.conf10
-rw-r--r--health/health.d/exporting.conf2
-rw-r--r--health/health.d/httpcheck.conf5
-rw-r--r--health/health.d/ioping.conf1
-rw-r--r--health/health.d/mdstat.conf2
-rw-r--r--health/health.d/net.conf18
-rw-r--r--health/health.d/nvme.conf1
-rw-r--r--health/health.d/ping.conf3
-rw-r--r--health/health.d/plugin.conf11
-rw-r--r--health/health.d/portcheck.conf3
-rw-r--r--health/health.d/redis.conf4
-rw-r--r--health/health.d/vsphere.conf8
-rw-r--r--health/health.d/web_log.conf12
-rw-r--r--health/health.d/windows.conf4
-rw-r--r--health/health.h4
-rw-r--r--health/health_config.c93
-rw-r--r--health/health_json.c170
-rw-r--r--health/health_log.c9
-rw-r--r--httpd/h2o_utils.c60
-rw-r--r--httpd/h2o_utils.h38
-rw-r--r--httpd/http_server.c339
-rw-r--r--httpd/http_server.h10
-rw-r--r--libnetdata/buffer/buffer.c8
-rw-r--r--libnetdata/buffer/buffer.h8
-rw-r--r--libnetdata/ebpf/ebpf.c121
-rw-r--r--libnetdata/ebpf/ebpf.h14
-rwxr-xr-xlibnetdata/gorilla/benchmark.sh14
-rwxr-xr-xlibnetdata/gorilla/fuzzer.sh14
-rw-r--r--libnetdata/gorilla/gorilla.cc620
-rw-r--r--libnetdata/gorilla/gorilla.h60
-rw-r--r--libnetdata/http/http_defs.h30
-rw-r--r--libnetdata/libjudy/src/JudyL/JudyLTables.c338
-rw-r--r--libnetdata/libjudy/src/JudyL/JudyLTablesGen.c301
-rw-r--r--libnetdata/libnetdata.h4
-rw-r--r--libnetdata/parser/parser.c81
-rw-r--r--libnetdata/parser/parser.h2
-rw-r--r--libnetdata/popen/popen.c106
-rw-r--r--libnetdata/popen/popen.h9
-rw-r--r--libnetdata/simple_pattern/README.md2
-rw-r--r--libnetdata/socket/security.c562
-rw-r--r--libnetdata/socket/security.h63
-rw-r--r--libnetdata/socket/socket.c115
-rw-r--r--libnetdata/socket/socket.h24
-rw-r--r--ml/Config.cc8
-rw-r--r--ml/README.md14
-rw-r--r--ml/ml.cc2
-rwxr-xr-xnetdata-installer.sh110
-rw-r--r--netdata.spec.in480
-rw-r--r--packaging/PLATFORM_SUPPORT.md6
-rwxr-xr-xpackaging/bundle-ebpf.sh7
-rwxr-xr-xpackaging/bundle-libbpf.sh6
-rw-r--r--packaging/current_libbpf.checksums2
-rw-r--r--packaging/current_libbpf.version2
-rw-r--r--packaging/docker/Dockerfile7
-rw-r--r--packaging/ebpf-co-re.checksums2
-rw-r--r--packaging/ebpf-co-re.version2
-rw-r--r--packaging/ebpf.checksums6
-rw-r--r--packaging/ebpf.version2
-rw-r--r--packaging/go.d.checksums34
-rw-r--r--packaging/go.d.version2
-rwxr-xr-xpackaging/installer/install-required-packages.sh4
-rwxr-xr-xpackaging/installer/kickstart.sh60
-rw-r--r--packaging/libbpf.checksums1
-rw-r--r--packaging/libbpf.version1
-rwxr-xr-xpackaging/makeself/install-or-update.sh46
-rwxr-xr-xpackaging/makeself/jobs/99-makeself.install.sh1
-rw-r--r--packaging/version2
-rw-r--r--streaming/receiver.c152
-rw-r--r--streaming/replication.c11
-rw-r--r--streaming/rrdpush.c186
-rw-r--r--streaming/rrdpush.h41
-rw-r--r--streaming/sender.c391
-rw-r--r--web/api/formatters/json/json.c26
-rw-r--r--web/api/formatters/rrd2json.h2
-rw-r--r--web/api/netdata-swagger.yaml11
-rw-r--r--web/api/queries/query.c87
-rw-r--r--web/api/queries/query.h1
-rw-r--r--web/api/web_api_v1.c2
-rw-r--r--web/gui/dashboard_info.js21
-rw-r--r--web/server/static/static-threaded.c59
-rw-r--r--web/server/web_client.c142
-rw-r--r--web/server/web_client.h38
-rw-r--r--web/server/web_client_cache.c10
278 files changed, 12080 insertions, 5224 deletions
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 7857b9a73..bea69deac 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -19,7 +19,7 @@ collectors/cups.plugin/ @thiagoftsm
exporting/ @thiagoftsm
daemon/ @thiagoftsm @vkalintiris
database/ @thiagoftsm @vkalintiris
-docs/ @tkatsoulas @andrewm4894 @cakrit
+docs/ @tkatsoulas @andrewm4894 @Ancairon
health/ @thiagoftsm @vkalintiris @MrZammler
health/health.d/ @thiagoftsm @MrZammler
health/notifications/ @Ferroin @thiagoftsm @MrZammler
@@ -35,8 +35,8 @@ web/gui/ @jacekkolasa
# Ownership by filetype (overwrites ownership by directory)
*.am @Ferroin @tkatsoulas
-*.md @tkatsoulas @andrewm4894 @cakrit
-*.mdx @tkatsoulas @andrewm4894 @cakrit
+*.md @tkatsoulas @andrewm4894 @Ancairon
+*.mdx @tkatsoulas @andrewm4894 @Ancairon
Dockerfile* @Ferroin @tkatsoulas
# Ownership of specific files
diff --git a/.github/codeql/c-cpp-config.yml b/.github/codeql/c-cpp-config.yml
new file mode 100644
index 000000000..cd7c24011
--- /dev/null
+++ b/.github/codeql/c-cpp-config.yml
@@ -0,0 +1,2 @@
+paths-ignore:
+ - httpd/h2o
diff --git a/.github/data/distros.yml b/.github/data/distros.yml
index bfe8b7615..355378f5a 100644
--- a/.github/data/distros.yml
+++ b/.github/data/distros.yml
@@ -161,13 +161,6 @@ include:
repo_distro: fedora/37
test:
ebpf-core: true
- - <<: *fedora
- version: "36"
- packages:
- <<: *fedora_packages
- repo_distro: fedora/36
- test:
- ebpf-core: true
- &opensuse
distro: opensuse
@@ -184,6 +177,12 @@ include:
- aarch64
test:
ebpf-core: true
+ - <<: *opensuse
+ version: "15.5"
+ base_image: opensuse/leap:15.5
+ packages:
+ <<: *opensuse_packages
+ repo_distro: opensuse/15.5
- &oracle
distro: oraclelinux
diff --git a/.github/labeler.yml b/.github/labeler.yml
index 4d3a614d4..44c493b10 100644
--- a/.github/labeler.yml
+++ b/.github/labeler.yml
@@ -78,6 +78,10 @@ collectors/cups:
- collectors/cups.plugin/*
- collectors/cups.plugin/**/*
+collectors/debugfs:
+ - collectors/debugfs.plugin/*
+ - collectors/debugfs.plugin/**/*
+
collectors/diskspace:
- collectors/diskspace.plugin/*
- collectors/diskspace.plugin/**/*
diff --git a/.github/scripts/pkg-test.sh b/.github/scripts/pkg-test.sh
index 45b8c320b..85e8b2e8d 100755
--- a/.github/scripts/pkg-test.sh
+++ b/.github/scripts/pkg-test.sh
@@ -14,7 +14,7 @@ install_debian_like() {
# Install Netdata
# Strange quoting is required here so that glob matching works.
- apt-get install -y /netdata/artifacts/netdata_"${VERSION}"*_*.deb || exit 1
+ apt-get install -y $(find /netdata/artifacts -type f -name 'netdata*.deb' ! -name '*dbgsym*' ! -name '*cups*' ! -name '*freeipmi*') || exit 3
# Install testing tools
apt-get install -y --no-install-recommends curl "${netcat}" jq || exit 1
@@ -26,15 +26,13 @@ install_fedora_like() {
PKGMGR="$( (command -v dnf > /dev/null && echo "dnf") || echo "yum")"
- pkg_version="$(echo "${VERSION}" | tr - .)"
-
if [ "${PKGMGR}" = "dnf" ]; then
opts="--allowerasing"
fi
# Install Netdata
# Strange quoting is required here so that glob matching works.
- "$PKGMGR" install -y /netdata/artifacts/netdata-"${pkg_version}"-*.rpm || exit 1
+ "$PKGMGR" install -y /netdata/artifacts/netdata*.rpm || exit 1
# Install testing tools
"$PKGMGR" install -y curl nc jq || exit 1
@@ -46,8 +44,6 @@ install_centos() {
PKGMGR="$( (command -v dnf > /dev/null && echo "dnf") || echo "yum")"
- pkg_version="$(echo "${VERSION}" | tr - .)"
-
if [ "${PKGMGR}" = "dnf" ]; then
opts="--allowerasing"
fi
@@ -57,7 +53,7 @@ install_centos() {
# Install Netdata
# Strange quoting is required here so that glob matching works.
- "$PKGMGR" install -y /netdata/artifacts/netdata-"${pkg_version}"-*.rpm || exit 1
+ "$PKGMGR" install -y /netdata/artifacts/netdata*.rpm || exit 1
# Install testing tools
# shellcheck disable=SC2086
@@ -67,15 +63,13 @@ install_centos() {
install_amazon_linux() {
PKGMGR="$( (command -v dnf > /dev/null && echo "dnf") || echo "yum")"
- pkg_version="$(echo "${VERSION}" | tr - .)"
-
if [ "${PKGMGR}" = "dnf" ]; then
opts="--allowerasing"
fi
# Install Netdata
# Strange quoting is required here so that glob matching works.
- "$PKGMGR" install -y /netdata/artifacts/netdata-"${pkg_version}"-*.rpm || exit 1
+ "$PKGMGR" install -y /netdata/artifacts/netdata*.rpm || exit 1
# Install testing tools
# shellcheck disable=SC2086
@@ -86,14 +80,12 @@ install_suse_like() {
# Using a glob pattern here because I can't reliably determine what the
# resulting package name will be (TODO: There must be a better way!)
- pkg_version="$(echo "${VERSION}" | tr - .)"
-
# Install Netdata
# Strange quoting is required here so that glob matching works.
- zypper install -y --allow-unsigned-rpm /netdata/artifacts/netdata-"${pkg_version}"-*.rpm || exit 1
+ zypper install -y --allow-downgrade --allow-unsigned-rpm /netdata/artifacts/netdata*.rpm || exit 1
# Install testing tools
- zypper install -y --no-recommends curl netcat-openbsd jq || exit 1
+ zypper install -y --allow-downgrade --no-recommends curl netcat-openbsd jq || exit 1
}
dump_log() {
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index c349e4fdd..da828f51e 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -659,10 +659,10 @@ jobs:
credentials_json: ${{ secrets.GCS_STORAGE_SERVICE_KEY_JSON }}
- name: Setup GCS
id: gcs-setup
- uses: google-github-actions/setup-gcloud@v1.1.0
+ uses: google-github-actions/setup-gcloud@v1.1.1
- name: Upload Artifacts
id: upload
- uses: google-github-actions/upload-cloud-storage@v1.0.1
+ uses: google-github-actions/upload-cloud-storage@v1.0.3
with:
destination: ${{ secrets.GCP_NIGHTLY_STORAGE_BUCKET }}
gzip: false
diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml
index b2af615e4..174f650ea 100644
--- a/.github/workflows/codeql.yml
+++ b/.github/workflows/codeql.yml
@@ -84,6 +84,7 @@ jobs:
uses: github/codeql-action/init@v2
with:
languages: cpp
+ config-file: ./.github/codeql/c-cpp-config.yml
- name: Prepare environment
run: ./packaging/installer/install-required-packages.sh --dont-wait --non-interactive netdata
- name: Build netdata
diff --git a/.gitignore b/.gitignore
index 14ba1c61c..ec56d649d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -74,6 +74,9 @@ collectors/ebpf.plugin/reset_netdata_trace.sh
!ebpf.plugin/
collectors/ebpf.plugin/includes/
+debugfs.plugin
+!debugfs.plugin/
+
# protoc generated files
*.pb.cc
*.pb.h
@@ -86,9 +89,6 @@ packaging/installer/.environment.sh
# netdata makeself downloads
packaging/makeself/tmp/
-# Libbpf is always overwritten depending of kernel version
-packaging/libbpf.*
-
# coverity
cov-int/
netdata-coverity-analysis.tgz
@@ -229,10 +229,11 @@ Session.*.vim
# Jupyter notebook checkpoints
.ipynb_checkpoints
-# Judy stuff
-JudyLTables.c
-judyltablesgen
-
# m4 generated ksys
database/engine/journalfile_v2.ksy
database/engine/journalfile_v2_virtmemb.ksy
+
+# gorilla benchmark & fuzz binaries
+libnetdata/gorilla/gorilla_benchmark
+libnetdata/gorilla/gorilla_fuzzer
+libnetdata/gorilla/fuzz-*.log
diff --git a/.gitmodules b/.gitmodules
index dd687fee8..d2730eb8a 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -9,3 +9,7 @@
url = https://github.com/davisking/dlib.git
shallow = true
ignore = dirty
+[submodule "httpd/h2o"]
+ path = httpd/h2o
+ url = https://github.com/h2o/h2o.git
+ ignore = untracked
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e45e0fe68..2f08eb7a4 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,25 +1,84 @@
# Changelog
-## [v1.39.1](https://github.com/netdata/netdata/tree/v1.39.1) (2023-05-18)
+## [v1.40.0](https://github.com/netdata/netdata/tree/v1.40.0) (2023-06-14)
-[Full Changelog](https://github.com/netdata/netdata/compare/v1.39.0...v1.39.1)
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.39.1...v1.40.0)
**Merged pull requests:**
+- ebpf: disable sync by default [\#15190](https://github.com/netdata/netdata/pull/15190) ([ilyam8](https://github.com/ilyam8))
+- Add support for SUSE 15.5 [\#15189](https://github.com/netdata/netdata/pull/15189) ([tkatsoulas](https://github.com/tkatsoulas))
+- bump go.d.plugin to v0.53.2 [\#15184](https://github.com/netdata/netdata/pull/15184) ([ilyam8](https://github.com/ilyam8))
+- Do strdupz on empty string [\#15183](https://github.com/netdata/netdata/pull/15183) ([MrZammler](https://github.com/MrZammler))
+- set setuid for go.d.plugin in container [\#15180](https://github.com/netdata/netdata/pull/15180) ([ilyam8](https://github.com/ilyam8))
+- bump go.d.plugin to v0.53.1 [\#15179](https://github.com/netdata/netdata/pull/15179) ([ilyam8](https://github.com/ilyam8))
+- Update smartd\_log.conf [\#15171](https://github.com/netdata/netdata/pull/15171) ([TougeAI](https://github.com/TougeAI))
+- Change package conflicts policy on deb based packages [\#15170](https://github.com/netdata/netdata/pull/15170) ([tkatsoulas](https://github.com/tkatsoulas))
+- Fix coverity issues [\#15169](https://github.com/netdata/netdata/pull/15169) ([stelfrag](https://github.com/stelfrag))
+- Fix user and group handling in DEB packages. [\#15166](https://github.com/netdata/netdata/pull/15166) ([Ferroin](https://github.com/Ferroin))
+- change mandatory packages for RPMs [\#15165](https://github.com/netdata/netdata/pull/15165) ([tkatsoulas](https://github.com/tkatsoulas))
+- Fix CID 385073 -- Uninitialized scalar variable [\#15163](https://github.com/netdata/netdata/pull/15163) ([stelfrag](https://github.com/stelfrag))
+- api v2 nodes for streaming statuses [\#15162](https://github.com/netdata/netdata/pull/15162) ([ktsaou](https://github.com/ktsaou))
+- Restrict ebpf dep in DEB package to amd64 only. [\#15161](https://github.com/netdata/netdata/pull/15161) ([Ferroin](https://github.com/Ferroin))
+- Make plugin packages hard dependencies. [\#15160](https://github.com/netdata/netdata/pull/15160) ([Ferroin](https://github.com/Ferroin))
+- freeipmi: add availability status chart and alarm [\#15151](https://github.com/netdata/netdata/pull/15151) ([ilyam8](https://github.com/ilyam8))
+- Check null transition id and config hash [\#15147](https://github.com/netdata/netdata/pull/15147) ([stelfrag](https://github.com/stelfrag))
+- eBPF unittest + bug fix [\#15146](https://github.com/netdata/netdata/pull/15146) ([thiagoftsm](https://github.com/thiagoftsm))
+- Mattermost cloud integration docs [\#15141](https://github.com/netdata/netdata/pull/15141) ([car12o](https://github.com/car12o))
+- send EXIT before exiting in freeipmi and debugfs plugins [\#15140](https://github.com/netdata/netdata/pull/15140) ([ilyam8](https://github.com/ilyam8))
+- minor - fix syntax in config.ac [\#15139](https://github.com/netdata/netdata/pull/15139) ([underhood](https://github.com/underhood))
+- fix a typo in `libnetdata/simple_pattern/README.md` [\#15135](https://github.com/netdata/netdata/pull/15135) ([n0099](https://github.com/n0099))
+- updated events docs and minor fix on silecing rules table [\#15134](https://github.com/netdata/netdata/pull/15134) ([hugovalente-pm](https://github.com/hugovalente-pm))
+- Provide necessary permission for the kickstart to run the netdata-updater script [\#15132](https://github.com/netdata/netdata/pull/15132) ([tkatsoulas](https://github.com/tkatsoulas))
+- fix: allow square brackets in label value [\#15131](https://github.com/netdata/netdata/pull/15131) ([ilyam8](https://github.com/ilyam8))
+- Add library to encode/decode Gorilla compressed buffers. [\#15128](https://github.com/netdata/netdata/pull/15128) ([vkalintiris](https://github.com/vkalintiris))
+- Fix bundling of eBPF legacy code for DEB packages. [\#15127](https://github.com/netdata/netdata/pull/15127) ([Ferroin](https://github.com/Ferroin))
+- Percentage of group aggregatable at cloud - fixed for backwards compatibility [\#15126](https://github.com/netdata/netdata/pull/15126) ([ktsaou](https://github.com/ktsaou))
+- Fix package versioning issues. [\#15125](https://github.com/netdata/netdata/pull/15125) ([Ferroin](https://github.com/Ferroin))
+- Revert "percentage of group is now aggregatable at cloud across multiple nodes" [\#15122](https://github.com/netdata/netdata/pull/15122) ([ktsaou](https://github.com/ktsaou))
+- add netdata demo rooms to the list of demo urls [\#15120](https://github.com/netdata/netdata/pull/15120) ([andrewm4894](https://github.com/andrewm4894))
+- Fix handling of eBPF plugin for DEB packages. [\#15117](https://github.com/netdata/netdata/pull/15117) ([Ferroin](https://github.com/Ferroin))
+- Re-write of SSL support in Netdata; restoration of SIGCHLD; detection of stale plugins; streaming improvements [\#15113](https://github.com/netdata/netdata/pull/15113) ([ktsaou](https://github.com/ktsaou))
+- initial draft for the silencing docs [\#15112](https://github.com/netdata/netdata/pull/15112) ([hugovalente-pm](https://github.com/hugovalente-pm))
+- Generate, store and transmit a unique alert event\_hash\_id [\#15111](https://github.com/netdata/netdata/pull/15111) ([MrZammler](https://github.com/MrZammler))
+- Only queue an alert to the cloud when it's inserted [\#15110](https://github.com/netdata/netdata/pull/15110) ([MrZammler](https://github.com/MrZammler))
+- percentage of group is now aggregatable at cloud across multiple nodes [\#15109](https://github.com/netdata/netdata/pull/15109) ([ktsaou](https://github.com/ktsaou))
+- percentage-of-group: fix uninitialized array vh [\#15106](https://github.com/netdata/netdata/pull/15106) ([ktsaou](https://github.com/ktsaou))
+- fix the units when returning percentage of a group [\#15105](https://github.com/netdata/netdata/pull/15105) ([ktsaou](https://github.com/ktsaou))
+- oracledb: make conn protocol configurable [\#15104](https://github.com/netdata/netdata/pull/15104) ([ilyam8](https://github.com/ilyam8))
+- /api/v2/data percentage calculation on grouped queries [\#15100](https://github.com/netdata/netdata/pull/15100) ([ktsaou](https://github.com/ktsaou))
+- Add chart labels to Prometheus. [\#15099](https://github.com/netdata/netdata/pull/15099) ([thiagoftsm](https://github.com/thiagoftsm))
+- Invert order in remote write [\#15097](https://github.com/netdata/netdata/pull/15097) ([thiagoftsm](https://github.com/thiagoftsm))
+- fix cockroachdb alarms [\#15095](https://github.com/netdata/netdata/pull/15095) ([ilyam8](https://github.com/ilyam8))
+- Address issue with Thanos Receiver [\#15094](https://github.com/netdata/netdata/pull/15094) ([thiagoftsm](https://github.com/thiagoftsm))
+- update ml defaults to 24h [\#15093](https://github.com/netdata/netdata/pull/15093) ([andrewm4894](https://github.com/andrewm4894))
+- Create category overview pages for learn's restructure [\#15091](https://github.com/netdata/netdata/pull/15091) ([Ancairon](https://github.com/Ancairon))
+- Release buffer in case of error -- CID 385075 [\#15090](https://github.com/netdata/netdata/pull/15090) ([stelfrag](https://github.com/stelfrag))
+- health: remove "families" from alarms config [\#15086](https://github.com/netdata/netdata/pull/15086) ([ilyam8](https://github.com/ilyam8))
+- update agent telemetry url to be cloud function instead of posthog [\#15085](https://github.com/netdata/netdata/pull/15085) ([andrewm4894](https://github.com/andrewm4894))
+- mentioned waive off of space subscription price [\#15082](https://github.com/netdata/netdata/pull/15082) ([hugovalente-pm](https://github.com/hugovalente-pm))
+- Python Dependency Migration - OracleDB Python Module [\#15074](https://github.com/netdata/netdata/pull/15074) ([EricAndrechek](https://github.com/EricAndrechek))
+- Free context when establishing ACLK connection [\#15073](https://github.com/netdata/netdata/pull/15073) ([stelfrag](https://github.com/stelfrag))
+- Update Security doc [\#15072](https://github.com/netdata/netdata/pull/15072) ([tkatsoulas](https://github.com/tkatsoulas))
- Update netdata-security.md [\#15068](https://github.com/netdata/netdata/pull/15068) ([cakrit](https://github.com/cakrit))
- Update netdata-security.md [\#15067](https://github.com/netdata/netdata/pull/15067) ([cakrit](https://github.com/cakrit))
+- Simplify loop in alert checkpoint [\#15065](https://github.com/netdata/netdata/pull/15065) ([MrZammler](https://github.com/MrZammler))
- Update CODEOWNERS [\#15064](https://github.com/netdata/netdata/pull/15064) ([cakrit](https://github.com/cakrit))
- Update netdata-security.md [\#15063](https://github.com/netdata/netdata/pull/15063) ([sashwathn](https://github.com/sashwathn))
+- Fix CodeQL warning [\#15062](https://github.com/netdata/netdata/pull/15062) ([stelfrag](https://github.com/stelfrag))
+- Improve some of the error messages in the kickstart script. [\#15061](https://github.com/netdata/netdata/pull/15061) ([Ferroin](https://github.com/Ferroin))
- Fix memory leak when sending alerts checkoint [\#15060](https://github.com/netdata/netdata/pull/15060) ([stelfrag](https://github.com/stelfrag))
- bump go.d.plugin to v0.53.0 [\#15059](https://github.com/netdata/netdata/pull/15059) ([ilyam8](https://github.com/ilyam8))
- Fix ACLK memleak [\#15055](https://github.com/netdata/netdata/pull/15055) ([underhood](https://github.com/underhood))
- fix\(debugfs/zswap\): don't collect metrics if Zswap is disabled [\#15054](https://github.com/netdata/netdata/pull/15054) ([ilyam8](https://github.com/ilyam8))
- Comment out default `role_recipients_*` values [\#15047](https://github.com/netdata/netdata/pull/15047) ([jamgregory](https://github.com/jamgregory))
- Small update ml defaults [\#15046](https://github.com/netdata/netdata/pull/15046) ([andrewm4894](https://github.com/andrewm4894))
+- Better cleanup of health log table [\#15045](https://github.com/netdata/netdata/pull/15045) ([MrZammler](https://github.com/MrZammler))
- Fix handling of permissions in static installs. [\#15042](https://github.com/netdata/netdata/pull/15042) ([Ferroin](https://github.com/Ferroin))
- Update tor.chart.py [\#15041](https://github.com/netdata/netdata/pull/15041) ([jmphilippe](https://github.com/jmphilippe))
- Wording fix in interact with charts doc [\#15040](https://github.com/netdata/netdata/pull/15040) ([Ancairon](https://github.com/Ancairon))
- fatal in claim\(\) only if --claim-only is used [\#15039](https://github.com/netdata/netdata/pull/15039) ([ilyam8](https://github.com/ilyam8))
+- Update libbpf [\#15038](https://github.com/netdata/netdata/pull/15038) ([thiagoftsm](https://github.com/thiagoftsm))
- Slight wording fix on the database readme [\#15034](https://github.com/netdata/netdata/pull/15034) ([Ancairon](https://github.com/Ancairon))
- Update SQLITE to version 3.41.2 [\#15031](https://github.com/netdata/netdata/pull/15031) ([stelfrag](https://github.com/stelfrag))
- Update troubleshooting-agent-with-cloud-connection.md [\#15029](https://github.com/netdata/netdata/pull/15029) ([cakrit](https://github.com/cakrit))
@@ -32,12 +91,19 @@
- Update chart documentation [\#15010](https://github.com/netdata/netdata/pull/15010) ([Ancairon](https://github.com/Ancairon))
- Honor maximum message size limit of MQTT server [\#15009](https://github.com/netdata/netdata/pull/15009) ([underhood](https://github.com/underhood))
- libjudy: remove JudyLTablesGen [\#14984](https://github.com/netdata/netdata/pull/14984) ([mochaaP](https://github.com/mochaaP))
+- Use chart labels to filter alerts [\#14982](https://github.com/netdata/netdata/pull/14982) ([MrZammler](https://github.com/MrZammler))
- Remove Fedora 36 from CI and platform support. [\#14938](https://github.com/netdata/netdata/pull/14938) ([Ferroin](https://github.com/Ferroin))
- make zlib compulsory dep [\#14928](https://github.com/netdata/netdata/pull/14928) ([underhood](https://github.com/underhood))
+- Try to detect bind mounts [\#14831](https://github.com/netdata/netdata/pull/14831) ([MrZammler](https://github.com/MrZammler))
- Remove old logic for handling of legacy stock config files. [\#14829](https://github.com/netdata/netdata/pull/14829) ([Ferroin](https://github.com/Ferroin))
- fix infiniband bytes counters multiplier and divisor [\#14748](https://github.com/netdata/netdata/pull/14748) ([ilyam8](https://github.com/ilyam8))
+- New eBPF option [\#14691](https://github.com/netdata/netdata/pull/14691) ([thiagoftsm](https://github.com/thiagoftsm))
- initial minimal h2o webserver integration [\#14585](https://github.com/netdata/netdata/pull/14585) ([underhood](https://github.com/underhood))
+## [v1.39.1](https://github.com/netdata/netdata/tree/v1.39.1) (2023-05-18)
+
+[Full Changelog](https://github.com/netdata/netdata/compare/v1.39.0...v1.39.1)
+
## [v1.39.0](https://github.com/netdata/netdata/tree/v1.39.0) (2023-05-08)
[Full Changelog](https://github.com/netdata/netdata/compare/v1.38.1...v1.39.0)
@@ -342,67 +408,6 @@
- Roles docs: Add Early Bird and Member role [\#14537](https://github.com/netdata/netdata/pull/14537) ([hugovalente-pm](https://github.com/hugovalente-pm))
- Fix broken Alma Linux entries in build matrix generation. [\#14536](https://github.com/netdata/netdata/pull/14536) ([Ferroin](https://github.com/Ferroin))
- Re-index when machine guid changes [\#14535](https://github.com/netdata/netdata/pull/14535) ([MrZammler](https://github.com/MrZammler))
-- Use BoxListItemRegexLink component in docs/quickstart/insfrastructure.md [\#14533](https://github.com/netdata/netdata/pull/14533) ([Ancairon](https://github.com/Ancairon))
-- Update main metric retention docs [\#14530](https://github.com/netdata/netdata/pull/14530) ([cakrit](https://github.com/cakrit))
-- Add Debian 12 to our CI and platform support document. [\#14529](https://github.com/netdata/netdata/pull/14529) ([Ferroin](https://github.com/Ferroin))
-- Update role-based-access.md [\#14528](https://github.com/netdata/netdata/pull/14528) ([cakrit](https://github.com/cakrit))
-- added section to explain impacts on member role [\#14527](https://github.com/netdata/netdata/pull/14527) ([hugovalente-pm](https://github.com/hugovalente-pm))
-- fix setting go.d.plugin capabilities [\#14525](https://github.com/netdata/netdata/pull/14525) ([ilyam8](https://github.com/ilyam8))
-- Simplify parser README.md and add parser files to CMakeLists.txt [\#14523](https://github.com/netdata/netdata/pull/14523) ([stelfrag](https://github.com/stelfrag))
-- Link statically libnetfilter\_acct into our static builds [\#14516](https://github.com/netdata/netdata/pull/14516) ([tkatsoulas](https://github.com/tkatsoulas))
-- Fix broken links in markdown files [\#14513](https://github.com/netdata/netdata/pull/14513) ([Ancairon](https://github.com/Ancairon))
-- Make external plugins a category page in learn [\#14511](https://github.com/netdata/netdata/pull/14511) ([cakrit](https://github.com/cakrit))
-- Learn integrations category changes [\#14510](https://github.com/netdata/netdata/pull/14510) ([cakrit](https://github.com/cakrit))
-- Move collectors under Integrations/Monitoring [\#14509](https://github.com/netdata/netdata/pull/14509) ([cakrit](https://github.com/cakrit))
-- Guides and collectors reorg and cleanup part 1 [\#14507](https://github.com/netdata/netdata/pull/14507) ([cakrit](https://github.com/cakrit))
-- replicating gaps [\#14506](https://github.com/netdata/netdata/pull/14506) ([ktsaou](https://github.com/ktsaou))
-- More learn reorg/reordering [\#14505](https://github.com/netdata/netdata/pull/14505) ([cakrit](https://github.com/cakrit))
-- Revert changes to platform support policy [\#14504](https://github.com/netdata/netdata/pull/14504) ([cakrit](https://github.com/cakrit))
-- Top level learn changes [\#14503](https://github.com/netdata/netdata/pull/14503) ([cakrit](https://github.com/cakrit))
-- Fix broken links in collectors/COLLECTORS.md [\#14502](https://github.com/netdata/netdata/pull/14502) ([Ancairon](https://github.com/Ancairon))
-- Update Demo-Sites.md [\#14501](https://github.com/netdata/netdata/pull/14501) ([cakrit](https://github.com/cakrit))
-- Member role on roles permissions docs [\#14500](https://github.com/netdata/netdata/pull/14500) ([hugovalente-pm](https://github.com/hugovalente-pm))
-- Reorganize contents of Getting Started [\#14499](https://github.com/netdata/netdata/pull/14499) ([cakrit](https://github.com/cakrit))
-- Correct title of contribute to doccumentation [\#14498](https://github.com/netdata/netdata/pull/14498) ([cakrit](https://github.com/cakrit))
-- Delete getting-started-overview.md [\#14497](https://github.com/netdata/netdata/pull/14497) ([Ancairon](https://github.com/Ancairon))
-- added Challenge secret and rooms object on the payload [\#14496](https://github.com/netdata/netdata/pull/14496) ([hugovalente-pm](https://github.com/hugovalente-pm))
-- Category overview pages [\#14495](https://github.com/netdata/netdata/pull/14495) ([Ancairon](https://github.com/Ancairon))
-- JSON internal API, IEEE754 base64/hex streaming, weights endpoint optimization [\#14493](https://github.com/netdata/netdata/pull/14493) ([ktsaou](https://github.com/ktsaou))
-- Fix crash when child connects [\#14492](https://github.com/netdata/netdata/pull/14492) ([stelfrag](https://github.com/stelfrag))
-- Plans docs [\#14491](https://github.com/netdata/netdata/pull/14491) ([hugovalente-pm](https://github.com/hugovalente-pm))
-- Try making it landing page of getting started directly [\#14489](https://github.com/netdata/netdata/pull/14489) ([cakrit](https://github.com/cakrit))
-- Update Demo-Sites.md [\#14488](https://github.com/netdata/netdata/pull/14488) ([Ancairon](https://github.com/Ancairon))
-- Make the introduction a category link [\#14485](https://github.com/netdata/netdata/pull/14485) ([Ancairon](https://github.com/Ancairon))
-- Update AD title [\#14484](https://github.com/netdata/netdata/pull/14484) ([thiagoftsm](https://github.com/thiagoftsm))
-- Fix coverity issues [\#14480](https://github.com/netdata/netdata/pull/14480) ([stelfrag](https://github.com/stelfrag))
-- Remove obsolete or redundant docs [\#14476](https://github.com/netdata/netdata/pull/14476) ([cakrit](https://github.com/cakrit))
-- Incorporate interoperability and fix edit link [\#14475](https://github.com/netdata/netdata/pull/14475) ([cakrit](https://github.com/cakrit))
-- Upgrade demo sites to the getting started section [\#14474](https://github.com/netdata/netdata/pull/14474) ([cakrit](https://github.com/cakrit))
-- Add a file to Learn [\#14473](https://github.com/netdata/netdata/pull/14473) ([Ancairon](https://github.com/Ancairon))
-- fix a possible bug with an image in the md file [\#14472](https://github.com/netdata/netdata/pull/14472) ([Ancairon](https://github.com/Ancairon))
-- Add sbindir\_POST template for v235 service file [\#14471](https://github.com/netdata/netdata/pull/14471) ([MrZammler](https://github.com/MrZammler))
-- Fix random crash on agent shutdown [\#14470](https://github.com/netdata/netdata/pull/14470) ([stelfrag](https://github.com/stelfrag))
-- Move ansible md [\#14469](https://github.com/netdata/netdata/pull/14469) ([cakrit](https://github.com/cakrit))
-- Correct link to ansible playbook [\#14468](https://github.com/netdata/netdata/pull/14468) ([cakrit](https://github.com/cakrit))
-- Moved contents of get started to installer readme [\#14467](https://github.com/netdata/netdata/pull/14467) ([cakrit](https://github.com/cakrit))
-- Add markdown files in Learn [\#14466](https://github.com/netdata/netdata/pull/14466) ([Ancairon](https://github.com/Ancairon))
-- Virtual hosts for data collection [\#14464](https://github.com/netdata/netdata/pull/14464) ([ktsaou](https://github.com/ktsaou))
-- Memory management eBPF [\#14462](https://github.com/netdata/netdata/pull/14462) ([thiagoftsm](https://github.com/thiagoftsm))
-- Add contents of packaging/installer/readme.md [\#14461](https://github.com/netdata/netdata/pull/14461) ([cakrit](https://github.com/cakrit))
-- Add mention of cloud in next steps UI etc [\#14459](https://github.com/netdata/netdata/pull/14459) ([cakrit](https://github.com/cakrit))
-- Fix links and add to learn [\#14458](https://github.com/netdata/netdata/pull/14458) ([cakrit](https://github.com/cakrit))
-- Add export for people running their own registry [\#14457](https://github.com/netdata/netdata/pull/14457) ([cakrit](https://github.com/cakrit))
-- Support installing extra packages in Docker images at runtime. [\#14456](https://github.com/netdata/netdata/pull/14456) ([Ferroin](https://github.com/Ferroin))
-- Prevent crash when running '-W createdataset' [\#14455](https://github.com/netdata/netdata/pull/14455) ([MrZammler](https://github.com/MrZammler))
-- remove deprecated python.d collectors announced in v1.38.0 [\#14454](https://github.com/netdata/netdata/pull/14454) ([ilyam8](https://github.com/ilyam8))
-- Update static build dependencies [\#14450](https://github.com/netdata/netdata/pull/14450) ([tkatsoulas](https://github.com/tkatsoulas))
-- do not report dimensions that failed to be queried [\#14447](https://github.com/netdata/netdata/pull/14447) ([ktsaou](https://github.com/ktsaou))
-- Fix agent build failure on FreeBSD 14.0 due to new tcpstat struct [\#14446](https://github.com/netdata/netdata/pull/14446) ([Dim-P](https://github.com/Dim-P))
-- minor fix in the metadata of libnetdata/ebpf AND log documents [\#14445](https://github.com/netdata/netdata/pull/14445) ([tkatsoulas](https://github.com/tkatsoulas))
-- Roles permissions docs [\#14444](https://github.com/netdata/netdata/pull/14444) ([hugovalente-pm](https://github.com/hugovalente-pm))
-- Only load required charts for rrdvars [\#14443](https://github.com/netdata/netdata/pull/14443) ([MrZammler](https://github.com/MrZammler))
-- Typos in in notification docs [\#14440](https://github.com/netdata/netdata/pull/14440) ([iorvd](https://github.com/iorvd))
-- Streaming interpolated values [\#14431](https://github.com/netdata/netdata/pull/14431) ([ktsaou](https://github.com/ktsaou))
## [v1.38.1](https://github.com/netdata/netdata/tree/v1.38.1) (2023-02-13)
@@ -412,11 +417,6 @@
[Full Changelog](https://github.com/netdata/netdata/compare/v1.37.1...v1.38.0)
-**Merged pull requests:**
-
-- Updated w1sensor.chart.py [\#14435](https://github.com/netdata/netdata/pull/14435) ([martindue](https://github.com/martindue))
-- replication to streaming transition when there are gaps [\#14434](https://github.com/netdata/netdata/pull/14434) ([ktsaou](https://github.com/ktsaou))
-
## [v1.37.1](https://github.com/netdata/netdata/tree/v1.37.1) (2022-12-05)
[Full Changelog](https://github.com/netdata/netdata/compare/v1.37.0...v1.37.1)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 2447abb6c..34c381619 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -86,7 +86,6 @@ pkg_check_modules(ZLIB REQUIRED zlib)
set(NETDATA_COMMON_CFLAGS ${NETDATA_COMMON_CFLAGS} ${ZLIB_CFLAGS_OTHER})
set(NETDATA_COMMON_LIBRARIES ${NETDATA_COMMON_LIBRARIES} ${ZLIB_LIBRARIES})
set(NETDATA_COMMON_INCLUDE_DIRS ${NETDATA_COMMON_INCLUDE_DIRS} ${ZLIB_INCLUDE_DIRS})
-# set(NETDATA_REQUIRED_DEFINES "${NETDATA_REQUIRED_DEFINES} -DNETDATA_WITH_ZLIB=1")
# -----------------------------------------------------------------------------
# libuv multi-platform support library with a focus on asynchronous I/O
@@ -387,35 +386,14 @@ set(LIBJUDY_SOURCES
libnetdata/libjudy/src/JudyL/JudyLNextEmpty.c
libnetdata/libjudy/src/JudyL/JudyLPrev.c
libnetdata/libjudy/src/JudyL/JudyLPrevEmpty.c
- JudyLTables.c
+ libnetdata/libjudy/src/JudyL/JudyLTables.c
libnetdata/libjudy/src/JudyHS/JudyHS.c)
ADD_LIBRARY(judy STATIC
${LIBJUDY_SOURCES})
-ADD_EXECUTABLE(judyltablesgen
- libnetdata/libjudy/src/JudyL/JudyLTablesGen.c)
-
-target_include_directories(judyltablesgen PUBLIC
- libnetdata/libjudy/src
- libnetdata/libjudy/src/JudyCommon)
-
-target_compile_options(judyltablesgen PUBLIC
- -Wno-format
- -Wno-format-security)
-
include_directories(BEFORE ${CMAKE_SOURCE_DIR}/libnetdata/libjudy/src)
-target_compile_definitions(judyltablesgen PUBLIC
- JU_64BIT
- JUDYL)
-
-add_custom_command(
- OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/JudyLTables.c
- COMMAND judyltablesgen
- DEPENDS judyltablesgen
- )
-
target_include_directories(judy PUBLIC
libnetdata/libjudy/src
libnetdata/libjudy/src/JudyCommon)
@@ -468,6 +446,8 @@ set(LIBNETDATA_FILES
libnetdata/dictionary/dictionary.h
libnetdata/eval/eval.c
libnetdata/eval/eval.h
+ libnetdata/gorilla/gorilla.cc
+ libnetdata/gorilla/gorilla.h
libnetdata/health/health.c
libnetdata/health/health.h
libnetdata/july/july.c
@@ -513,6 +493,7 @@ set(LIBNETDATA_FILES
libnetdata/worker_utilization/worker_utilization.h
libnetdata/parser/parser.h
libnetdata/parser/parser.c
+ libnetdata/http/http_defs.h
)
IF(ENABLE_PLUGIN_EBPF)
@@ -528,6 +509,13 @@ target_include_directories(libnetdata BEFORE PUBLIC ${GENERATED_CONFIG_H_DIR})
set(APPS_PLUGIN_FILES
collectors/apps.plugin/apps_plugin.c)
+set(DEBUGFS_PLUGIN_FILES
+ collectors/debugfs.plugin/debugfs_plugin.c
+ collectors/debugfs.plugin/debugfs_plugin.h
+ collectors/debugfs.plugin/debugfs_extfrag.c
+ collectors/debugfs.plugin/debugfs_zswap.c
+ )
+
set(FREEBSD_PLUGIN_FILES
collectors/freebsd.plugin/plugin_freebsd.c
collectors/freebsd.plugin/plugin_freebsd.h
@@ -632,6 +620,8 @@ set(EBPF_PROCESS_PLUGIN_FILES
collectors/ebpf.plugin/ebpf_apps.h
collectors/ebpf.plugin/ebpf_cgroup.c
collectors/ebpf.plugin/ebpf_cgroup.h
+ collectors/ebpf.plugin/ebpf_unittest.c
+ collectors/ebpf.plugin/ebpf_unittest.h
)
set(PROC_PLUGIN_FILES
@@ -1385,6 +1375,21 @@ target_compile_options(netdatacli PUBLIC ${NETDATA_COMMON_CFLAGS})
# -----------------------------------------------------------------------------
+# debugfs.plugin
+
+IF(ENABLE_PLUGIN_DEBUGFS)
+ message(STATUS "debugfs.plugin: enabled")
+ add_executable(debugfs.plugin ${GENERATED_CONFIG_H} ${DEBUGFS_PLUGIN_FILES})
+ target_link_libraries (debugfs.plugin libnetdata ${NETDATA_COMMON_LIBRARIES} ${CAP_LIBRARIES})
+ target_include_directories(debugfs.plugin PUBLIC ${NETDATA_COMMON_INCLUDE_DIRS} ${CAP_INCLUDE_DIRS})
+ target_include_directories(debugfs.plugin BEFORE PUBLIC ${GENERATED_CONFIG_H_DIR})
+ target_compile_options(debugfs.plugin PUBLIC ${NETDATA_COMMON_CFLAGS} ${CAP_CFLAGS_OTHER})
+ELSE()
+ message(STATUS "debugfs.plugin: disabled")
+ENDIF()
+
+
+# -----------------------------------------------------------------------------
# apps.plugin
IF(ENABLE_PLUGIN_APPS)
diff --git a/Makefile.am b/Makefile.am
index 7d9abd549..666847dc3 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -54,7 +54,6 @@ SUBDIRS = \
dist_noinst_DATA = \
CHANGELOG.md \
cppcheck.sh \
- configs.signatures \
contrib \
docs \
mqtt_websockets \
@@ -77,12 +76,15 @@ dist_noinst_DATA = \
packaging/jsonc.version \
packaging/yaml.checksums \
packaging/yaml.version \
- packaging/libbpf.checksums \
- packaging/libbpf.version \
+ packaging/current_libbpf.checksums \
+ packaging/current_libbpf.version \
+ packaging/libbpf_0_0_9.checksums \
+ packaging/libbpf_0_0_9.version \
packaging/protobuf.checksums \
packaging/protobuf.version \
packaging/version \
database/engine/journalfile_v2.ksy.in \
+ httpd/h2o \
$(NULL)
# until integrated within build
@@ -117,7 +119,7 @@ SUBDIRS += \
AM_CFLAGS = \
$(OPTIONAL_MATH_CFLAGS) \
$(OPTIONAL_NFACCT_CFLAGS) \
- $(OPTIONAL_ZLIB_CFLAGS) \
+ $(ZLIB_CFLAGS) \
$(OPTIONAL_UUID_CFLAGS) \
$(OPTIONAL_MQTT_CFLAGS) \
$(OPTIONAL_LIBCAP_LIBS) \
@@ -152,6 +154,8 @@ LIBNETDATA_FILES = \
libnetdata/dictionary/dictionary.h \
libnetdata/eval/eval.c \
libnetdata/eval/eval.h \
+ libnetdata/gorilla/gorilla.h \
+ libnetdata/gorilla/gorilla.cc \
libnetdata/inlined.h \
libnetdata/july/july.c \
libnetdata/july/july.h \
@@ -197,6 +201,7 @@ LIBNETDATA_FILES = \
libnetdata/string/utf8.h \
libnetdata/worker_utilization/worker_utilization.c \
libnetdata/worker_utilization/worker_utilization.h \
+ libnetdata/http/http_defs.h \
$(NULL)
if ENABLE_PLUGIN_EBPF
@@ -211,6 +216,14 @@ APPS_PLUGIN_FILES = \
$(LIBNETDATA_FILES) \
$(NULL)
+DEBUGFS_PLUGIN_FILES = \
+ collectors/debugfs.plugin/debugfs_plugin.c \
+ collectors/debugfs.plugin/debugfs_plugin.h \
+ collectors/debugfs.plugin/debugfs_extfrag.c \
+ collectors/debugfs.plugin/debugfs_zswap.c \
+ $(LIBNETDATA_FILES) \
+ $(NULL)
+
FREEBSD_PLUGIN_FILES = \
collectors/freebsd.plugin/plugin_freebsd.c \
collectors/freebsd.plugin/plugin_freebsd.h \
@@ -347,6 +360,8 @@ EBPF_PLUGIN_FILES = \
collectors/ebpf.plugin/ebpf_apps.h \
collectors/ebpf.plugin/ebpf_cgroup.c \
collectors/ebpf.plugin/ebpf_cgroup.h \
+ collectors/ebpf.plugin/ebpf_unittest.c \
+ collectors/ebpf.plugin/ebpf_unittest.h \
$(LIBNETDATA_FILES) \
$(NULL)
@@ -500,15 +515,10 @@ libjudy_a_SOURCES = libnetdata/libjudy/src/Judy.h \
libnetdata/libjudy/src/JudyL/JudyLNextEmpty.c \
libnetdata/libjudy/src/JudyL/JudyLPrev.c \
libnetdata/libjudy/src/JudyL/JudyLPrevEmpty.c \
+ libnetdata/libjudy/src/JudyL/JudyLTables.c \
libnetdata/libjudy/src/JudyHS/JudyHS.c \
$(NULL)
-nodist_libjudy_a_SOURCES = JudyLTables.c
-
-BUILT_SOURCES += JudyLTables.c
-
-CLEANFILES += JudyLTables.c
-
libjudy_a_CFLAGS = $(LIBJUDY_CFLAGS) -DJUDYL -I$(abs_top_srcdir)/libnetdata/libjudy/src -I$(abs_top_srcdir)/libnetdata/libjudy/src/JudyCommon -Wno-sign-compare -Wno-implicit-fallthrough
libnetdata/libjudy/src/JudyL/libjudy_a-JudyLPrev.$(OBJEXT) : CFLAGS += -DJUDYPREV
@@ -518,16 +528,6 @@ libnetdata/libjudy/src/JudyL/libjudy_a-JudyLNextEmpty.$(OBJEXT) : CFLAGS += -DJU
libnetdata/libjudy/src/JudyL/libjudy_a-JudyLByCount.$(OBJEXT) : CFLAGS += -DNOSMARTJBB -DNOSMARTJBU -DNOSMARTJLB
libnetdata/libjudy/src/JudyL/libjudy_a-j__udyLGet.$(OBJEXT) : CFLAGS += -DJUDYGETINLINE
-noinst_PROGRAMS = judyltablesgen
-
-judyltablesgen_SOURCES = libnetdata/libjudy/src/JudyL/JudyLTablesGen.c
-judyltablesgen_CFLAGS = $(LIBJUDY_CFLAGS) -DJUDYL -I$(abs_top_srcdir)/libnetdata/libjudy/src -I$(abs_top_srcdir)/libnetdata/libjudy/src/JudyCommon -Wno-sign-compare -Wno-implicit-fallthrough
-
-$(builddir)/judyltablesgen$(EXEEXT) : CFLAGS += -Wno-format -Wno-format-security
-
-JudyLTables.c: $(abs_top_srcdir)/libnetdata/libjudy/src/JudyL/JudyLTablesGen.c $(builddir)/judyltablesgen$(EXEEXT)
- $(builddir)/judyltablesgen$(EXEEXT)
-
libjudy_a-JudyLTables.$(OBJEXT) : CFLAGS += -I$(abs_top_srcdir)/libnetdata/libjudy/src/JudyL
if ENABLE_DBENGINE
@@ -749,7 +749,8 @@ libmqttwebsockets_a_SOURCES = \
mqtt_websockets/c-rbuf/src/ringbuffer_internal.h \
mqtt_websockets/c_rhash/src/c_rhash.c \
mqtt_websockets/c_rhash/include/c_rhash.h \
- mqtt_websockets/c_rhash/src/c_rhash_internal.h
+ mqtt_websockets/c_rhash/src/c_rhash_internal.h \
+ $(NULL)
libmqttwebsockets_a_CFLAGS = $(CFLAGS) -DMQTT_WSS_CUSTOM_ALLOC -DRBUF_CUSTOM_MALLOC -DMQTT_WSS_CPUSTATS -I$(srcdir)/aclk/helpers -I$(srcdir)/mqtt_websockets/c_rhash/include
@@ -939,6 +940,123 @@ DAEMON_FILES = \
daemon/unit_test.h \
$(NULL)
+HTTPD_FILES = \
+ httpd/http_server.c \
+ httpd/http_server.h \
+ httpd/h2o_utils.c \
+ httpd/h2o_utils.h \
+ $(NULL)
+
+libh2o_dir = httpd/h2o
+
+libh2o_a_SOURCES = \
+ $(libh2o_dir)/deps/cloexec/cloexec.c \
+ $(libh2o_dir)/deps/libgkc/gkc.c \
+ $(libh2o_dir)/deps/libyrmcds/close.c \
+ $(libh2o_dir)/deps/libyrmcds/connect.c \
+ $(libh2o_dir)/deps/libyrmcds/recv.c \
+ $(libh2o_dir)/deps/libyrmcds/send.c \
+ $(libh2o_dir)/deps/libyrmcds/send_text.c \
+ $(libh2o_dir)/deps/libyrmcds/socket.c \
+ $(libh2o_dir)/deps/libyrmcds/strerror.c \
+ $(libh2o_dir)/deps/libyrmcds/text_mode.c \
+ $(libh2o_dir)/deps/picohttpparser/picohttpparser.c \
+ $(libh2o_dir)/lib/common/cache.c \
+ $(libh2o_dir)/lib/common/file.c \
+ $(libh2o_dir)/lib/common/filecache.c \
+ $(libh2o_dir)/lib/common/hostinfo.c \
+ $(libh2o_dir)/lib/common/http1client.c \
+ $(libh2o_dir)/lib/common/memcached.c \
+ $(libh2o_dir)/lib/common/memory.c \
+ $(libh2o_dir)/lib/common/multithread.c \
+ $(libh2o_dir)/lib/common/serverutil.c \
+ $(libh2o_dir)/lib/common/socket.c \
+ $(libh2o_dir)/lib/common/socketpool.c \
+ $(libh2o_dir)/lib/common/string.c \
+ $(libh2o_dir)/lib/common/time.c \
+ $(libh2o_dir)/lib/common/timeout.c \
+ $(libh2o_dir)/lib/common/url.c \
+ $(libh2o_dir)/lib/core/config.c \
+ $(libh2o_dir)/lib/core/configurator.c \
+ $(libh2o_dir)/lib/core/context.c \
+ $(libh2o_dir)/lib/core/headers.c \
+ $(libh2o_dir)/lib/core/logconf.c \
+ $(libh2o_dir)/lib/core/proxy.c \
+ $(libh2o_dir)/lib/core/request.c \
+ $(libh2o_dir)/lib/core/token.c \
+ $(libh2o_dir)/lib/core/util.c \
+ $(libh2o_dir)/lib/handler/access_log.c \
+ $(libh2o_dir)/lib/handler/chunked.c \
+ $(libh2o_dir)/lib/handler/compress.c \
+ $(libh2o_dir)/lib/handler/compress/gzip.c \
+ $(libh2o_dir)/lib/handler/errordoc.c \
+ $(libh2o_dir)/lib/handler/expires.c \
+ $(libh2o_dir)/lib/handler/fastcgi.c \
+ $(libh2o_dir)/lib/handler/file.c \
+ $(libh2o_dir)/lib/handler/headers.c \
+ $(libh2o_dir)/lib/handler/mimemap.c \
+ $(libh2o_dir)/lib/handler/proxy.c \
+ $(libh2o_dir)/lib/handler/redirect.c \
+ $(libh2o_dir)/lib/handler/reproxy.c \
+ $(libh2o_dir)/lib/handler/throttle_resp.c \
+ $(libh2o_dir)/lib/handler/status.c \
+ $(libh2o_dir)/lib/handler/headers_util.c \
+ $(libh2o_dir)/lib/handler/status/events.c \
+ $(libh2o_dir)/lib/handler/status/requests.c \
+ $(libh2o_dir)/lib/handler/http2_debug_state.c \
+ $(libh2o_dir)/lib/handler/status/durations.c \
+ $(libh2o_dir)/lib/handler/configurator/access_log.c \
+ $(libh2o_dir)/lib/handler/configurator/compress.c \
+ $(libh2o_dir)/lib/handler/configurator/errordoc.c \
+ $(libh2o_dir)/lib/handler/configurator/expires.c \
+ $(libh2o_dir)/lib/handler/configurator/fastcgi.c \
+ $(libh2o_dir)/lib/handler/configurator/file.c \
+ $(libh2o_dir)/lib/handler/configurator/headers.c \
+ $(libh2o_dir)/lib/handler/configurator/proxy.c \
+ $(libh2o_dir)/lib/handler/configurator/redirect.c \
+ $(libh2o_dir)/lib/handler/configurator/reproxy.c \
+ $(libh2o_dir)/lib/handler/configurator/throttle_resp.c \
+ $(libh2o_dir)/lib/handler/configurator/status.c \
+ $(libh2o_dir)/lib/handler/configurator/http2_debug_state.c \
+ $(libh2o_dir)/lib/handler/configurator/headers_util.c \
+ $(libh2o_dir)/lib/http1.c \
+ $(libh2o_dir)/lib/tunnel.c \
+ $(libh2o_dir)/lib/http2/cache_digests.c \
+ $(libh2o_dir)/lib/http2/casper.c \
+ $(libh2o_dir)/lib/http2/connection.c \
+ $(libh2o_dir)/lib/http2/frame.c \
+ $(libh2o_dir)/lib/http2/hpack.c \
+ $(libh2o_dir)/lib/http2/scheduler.c \
+ $(libh2o_dir)/lib/http2/stream.c \
+ $(libh2o_dir)/lib/http2/http2_debug_state.c \
+ $(NULL)
+
+libh2o_a_INCLUDES = \
+ -I$(srcdir)/$(libh2o_dir)/include \
+ -I$(srcdir)/$(libh2o_dir)/deps/cloexec \
+ -I$(srcdir)/$(libh2o_dir)/deps/brotli/enc \
+ -I$(srcdir)/$(libh2o_dir)/deps/golombset \
+ -I$(srcdir)/$(libh2o_dir)/deps/libgkc \
+ -I$(srcdir)/$(libh2o_dir)/deps/libyrmcds \
+ -I$(srcdir)/$(libh2o_dir)/deps/klib \
+ -I$(srcdir)/$(libh2o_dir)/deps/neverbleed \
+ -I$(srcdir)/$(libh2o_dir)/deps/picohttpparser \
+ -I$(srcdir)/$(libh2o_dir)/deps/picotest \
+ -I$(srcdir)/$(libh2o_dir)/deps/yaml/include \
+ -I$(srcdir)/$(libh2o_dir)/deps/yoml \
+ $(NULL)
+
+if ENABLE_HTTPD
+noinst_LIBRARIES += libh2o.a
+
+# until h2o updates support for OpenSSL 3.0 we silence the warnings
+libh2o_a_CFLAGS = $(CFLAGS) -Wno-deprecated-declarations -Wno-unused-parameter -Wno-sign-compare -Wno-missing-field-initializers -DH2O_USE_LIBUV=0 $(libh2o_a_INCLUDES)
+
+if LINUX
+ libh2o_a_CFLAGS += -D_GNU_SOURCE
+endif
+endif #ENABLE_HTTPD
+
NETDATA_FILES = \
collectors/all.h \
$(DAEMON_FILES) \
@@ -988,7 +1106,7 @@ endif
NETDATA_COMMON_LIBS = \
$(OPTIONAL_MATH_LIBS) \
$(OPTIONAL_BPF_LIBS) \
- $(OPTIONAL_ZLIB_LIBS) \
+ $(ZLIB_LIBS) \
$(OPTIONAL_SSL_LIBS) \
$(OPTIONAL_UUID_LIBS) \
$(OPTIONAL_MQTT_LIBS) \
@@ -1007,6 +1125,11 @@ if ENABLE_ACLK
NETDATA_COMMON_LIBS += libmqttwebsockets.a
endif
+if ENABLE_HTTPD
+ NETDATA_FILES += $(HTTPD_FILES)
+ NETDATA_COMMON_LIBS += libh2o.a
+endif
+
if LINK_STATIC_JSONC
NETDATA_COMMON_LIBS += $(abs_top_srcdir)/externaldeps/jsonc/libjson-c.a
endif
@@ -1017,7 +1140,8 @@ endif
NETDATACLI_FILES = \
daemon/commands.h \
- $(LIBNETDATA_FILES) \
+ libnetdata/buffer/buffer.c \
+ libnetdata/buffer/buffer.h \
cli/cli.c \
cli/cli.h \
$(NULL)
@@ -1058,6 +1182,15 @@ if ENABLE_PLUGIN_APPS
$(NULL)
endif
+if ENABLE_PLUGIN_DEBUGFS
+ plugins_PROGRAMS += debugfs.plugin
+ debugfs_plugin_SOURCES = $(DEBUGFS_PLUGIN_FILES)
+ debugfs_plugin_LDADD = \
+ $(NETDATA_COMMON_LIBS) \
+ $(OPTIONAL_LIBCAP_LIBS) \
+ $(NULL)
+endif
+
if ENABLE_PLUGIN_CGROUP_NETWORK
plugins_PROGRAMS += cgroup-network
cgroup_network_SOURCES = $(CGROUP_NETWORK_FILES)
diff --git a/aclk/aclk_capas.c b/aclk/aclk_capas.c
index 55f6fd3b4..b38a928a5 100644
--- a/aclk/aclk_capas.c
+++ b/aclk/aclk_capas.c
@@ -13,7 +13,7 @@ const struct capability *aclk_get_agent_capas()
{ .name = "mc", .version = 0, .enabled = 0 },
{ .name = "ctx", .version = 1, .enabled = 1 },
{ .name = "funcs", .version = 1, .enabled = 1 },
- { .name = "http_api_v2", .version = 1, .enabled = 1 },
+ { .name = "http_api_v2", .version = 3, .enabled = 1 },
{ .name = "health", .version = 1, .enabled = 0 },
{ .name = "req_cancel", .version = 1, .enabled = 1 },
{ .name = NULL, .version = 0, .enabled = 0 }
@@ -39,7 +39,7 @@ struct capability *aclk_get_node_instance_capas(RRDHOST *host)
.enabled = enable_metric_correlations },
{ .name = "ctx", .version = 1, .enabled = 1 },
{ .name = "funcs", .version = 0, .enabled = 0 },
- { .name = "http_api_v2", .version = 2, .enabled = 1 },
+ { .name = "http_api_v2", .version = 3, .enabled = 1 },
{ .name = "health", .version = 1, .enabled = host->health.health_enabled },
{ .name = "req_cancel", .version = 1, .enabled = 1 },
{ .name = NULL, .version = 0, .enabled = 0 }
diff --git a/aclk/aclk_otp.c b/aclk/aclk_otp.c
index 391313ffe..66d751be6 100644
--- a/aclk/aclk_otp.c
+++ b/aclk/aclk_otp.c
@@ -444,11 +444,15 @@ static int private_decrypt(RSA *p_key, unsigned char * enc_data, int data_len, u
if (!ctx)
return 1;
- if (EVP_PKEY_decrypt_init(ctx) <= 0)
+ if (EVP_PKEY_decrypt_init(ctx) <= 0) {
+ EVP_PKEY_CTX_free(ctx);
return 1;
+ }
- if (EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_OAEP_PADDING) <= 0)
+ if (EVP_PKEY_CTX_set_rsa_padding(ctx, RSA_PKCS1_OAEP_PADDING) <= 0) {
+ EVP_PKEY_CTX_free(ctx);
return 1;
+ }
*decrypted = mallocz(outlen);
@@ -456,6 +460,8 @@ static int private_decrypt(RSA *p_key, unsigned char * enc_data, int data_len, u
result = (int) outlen;
else
result = -1;
+
+ EVP_PKEY_CTX_free(ctx);
#else
*decrypted = mallocz(RSA_size(p_key));
result = RSA_private_decrypt(data_len, enc_data, *decrypted, p_key, RSA_PKCS1_OAEP_PADDING);
diff --git a/aclk/aclk_query.c b/aclk/aclk_query.c
index 46d1e1e5e..0698c2d60 100644
--- a/aclk/aclk_query.c
+++ b/aclk/aclk_query.c
@@ -95,11 +95,9 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query)
size_t size = 0;
size_t sent = 0;
-#ifdef NETDATA_WITH_ZLIB
int z_ret;
BUFFER *z_buffer = buffer_create(NETDATA_WEB_RESPONSE_INITIAL_SIZE, &netdata_buffers_statistics.buffers_aclk);
char *start, *end;
-#endif
struct web_client *w = web_client_get_from_cache();
w->acl = WEB_CLIENT_ACL_ACLK;
@@ -152,7 +150,6 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query)
size = w->response.data->len;
sent = size;
-#ifdef NETDATA_WITH_ZLIB
// check if gzip encoding can and should be used
if ((start = strstr((char *)query->data.http_api_v2.payload, WEB_HDR_ACCEPT_ENC))) {
start += strlen(WEB_HDR_ACCEPT_ENC);
@@ -199,7 +196,6 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query)
w->response.data = z_buffer;
z_buffer = NULL;
}
-#endif
w->response.data->date = w->timings.tv_ready.tv_sec;
web_client_build_http_header(w);
@@ -209,22 +205,18 @@ static int http_api_v2(struct aclk_query_thread *query_thr, aclk_query_t query)
buffer_strcat(local_buffer, w->response.header_output->buffer);
if (w->response.data->len) {
-#ifdef NETDATA_WITH_ZLIB
if (w->response.zinitialized) {
buffer_need_bytes(local_buffer, w->response.data->len);
memcpy(&local_buffer->buffer[local_buffer->len], w->response.data->buffer, w->response.data->len);
local_buffer->len += w->response.data->len;
sent = sent - size + w->response.data->len;
} else {
-#endif
buffer_strcat(local_buffer, w->response.data->buffer);
-#ifdef NETDATA_WITH_ZLIB
}
-#endif
}
// send msg.
- aclk_http_msg_v2(query_thr->client, query->callback_topic, query->msg_id, t, query->created, w->response.code, local_buffer->buffer, local_buffer->len);
+ w->response.code = aclk_http_msg_v2(query_thr->client, query->callback_topic, query->msg_id, t, query->created, w->response.code, local_buffer->buffer, local_buffer->len);
struct timeval tv;
@@ -249,9 +241,7 @@ cleanup:
pending_req_list_rm(query->msg_id);
-#ifdef NETDATA_WITH_ZLIB
buffer_free(z_buffer);
-#endif
buffer_free(local_buffer);
return retval;
}
diff --git a/aclk/aclk_tx_msgs.c b/aclk/aclk_tx_msgs.c
index 86ee818ed..d11e96cfb 100644
--- a/aclk/aclk_tx_msgs.c
+++ b/aclk/aclk_tx_msgs.c
@@ -83,7 +83,10 @@ static int aclk_send_message_with_bin_payload(mqtt_wss_client client, json_objec
memcpy(&full_msg[len], payload, payload_len);
}
- mqtt_wss_publish5(client, (char*)topic, NULL, full_msg, &freez_aclk_publish5b, full_msg_len, MQTT_WSS_PUB_QOS1, &packet_id);
+ int rc = mqtt_wss_publish5(client, (char*)topic, NULL, full_msg, &freez_aclk_publish5b, full_msg_len, MQTT_WSS_PUB_QOS1, &packet_id);
+
+ if (rc == MQTT_WSS_ERR_TOO_BIG_FOR_SERVER)
+ return HTTP_RESP_FORBIDDEN;
#ifdef NETDATA_INTERNAL_CHECKS
aclk_stats_msg_published(packet_id);
@@ -169,11 +172,11 @@ void aclk_http_msg_v2_err(mqtt_wss_client client, const char *topic, const char
json_object_object_add(msg, "error-description", tmp);
if (aclk_send_message_with_bin_payload(client, msg, topic, payload, payload_len)) {
- error("Failed to send cancelation message for http reply");
+ error("Failed to send cancellation message for http reply %zu %s", payload_len, payload);
}
}
-void aclk_http_msg_v2(mqtt_wss_client client, const char *topic, const char *msg_id, usec_t t_exec, usec_t created, int http_code, const char *payload, size_t payload_len)
+int aclk_http_msg_v2(mqtt_wss_client client, const char *topic, const char *msg_id, usec_t t_exec, usec_t created, int http_code, const char *payload, size_t payload_len)
{
json_object *tmp, *msg;
@@ -192,7 +195,7 @@ void aclk_http_msg_v2(mqtt_wss_client client, const char *topic, const char *msg
switch (rc) {
case HTTP_RESP_FORBIDDEN:
- aclk_http_msg_v2_err(client, topic, msg_id, rc, CLOUD_EC_REQ_REPLY_TOO_BIG, CLOUD_EMSG_REQ_REPLY_TOO_BIG, payload, payload_len);
+ aclk_http_msg_v2_err(client, topic, msg_id, rc, CLOUD_EC_REQ_REPLY_TOO_BIG, CLOUD_EMSG_REQ_REPLY_TOO_BIG, NULL, 0);
break;
case HTTP_RESP_INTERNAL_SERVER_ERROR:
aclk_http_msg_v2_err(client, topic, msg_id, rc, CLOUD_EC_FAIL_TOPIC, CLOUD_EMSG_FAIL_TOPIC, payload, payload_len);
@@ -201,6 +204,7 @@ void aclk_http_msg_v2(mqtt_wss_client client, const char *topic, const char *msg
aclk_http_msg_v2_err(client, topic, msg_id, rc, CLOUD_EC_SND_TIMEOUT, CLOUD_EMSG_SND_TIMEOUT, payload, payload_len);
break;
}
+ return rc ? rc : http_code;
}
uint16_t aclk_send_agent_connection_update(mqtt_wss_client client, int reachable) {
diff --git a/aclk/aclk_tx_msgs.h b/aclk/aclk_tx_msgs.h
index 31e592410..9e7d89077 100644
--- a/aclk/aclk_tx_msgs.h
+++ b/aclk/aclk_tx_msgs.h
@@ -12,7 +12,7 @@
uint16_t aclk_send_bin_message_subtopic_pid(mqtt_wss_client client, char *msg, size_t msg_len, enum aclk_topics subtopic, const char *msgname);
void aclk_http_msg_v2_err(mqtt_wss_client client, const char *topic, const char *msg_id, int http_code, int ec, const char* emsg, const char *payload, size_t payload_len);
-void aclk_http_msg_v2(mqtt_wss_client client, const char *topic, const char *msg_id, usec_t t_exec, usec_t created, int http_code, const char *payload, size_t payload_len);
+int aclk_http_msg_v2(mqtt_wss_client client, const char *topic, const char *msg_id, usec_t t_exec, usec_t created, int http_code, const char *payload, size_t payload_len);
uint16_t aclk_send_agent_connection_update(mqtt_wss_client client, int reachable);
char *aclk_generate_lwt(size_t *size);
diff --git a/aclk/https_client.c b/aclk/https_client.c
index e2a42eef3..345cf65a8 100644
--- a/aclk/https_client.c
+++ b/aclk/https_client.c
@@ -528,7 +528,7 @@ int https_request(https_req_t *request, https_req_response_t *response) {
}
ctx->request = request;
- ctx->ssl_ctx = security_initialize_openssl_client();
+ ctx->ssl_ctx = netdata_ssl_create_client_ctx(0);
if (ctx->ssl_ctx==NULL) {
error("Cannot allocate SSL context");
goto exit_sock;
diff --git a/aclk/schema-wrappers/alarm_stream.cc b/aclk/schema-wrappers/alarm_stream.cc
index af0b891ca..11b9284f5 100644
--- a/aclk/schema-wrappers/alarm_stream.cc
+++ b/aclk/schema-wrappers/alarm_stream.cc
@@ -86,6 +86,7 @@ void destroy_alarm_log_entry(struct alarm_log_entry *entry)
freez(entry->rendered_info);
freez(entry->chart_context);
+ freez(entry->transition_id);
}
static void fill_alarm_log_entry(struct alarm_log_entry *data, AlarmLogEntry *proto)
@@ -134,6 +135,9 @@ static void fill_alarm_log_entry(struct alarm_log_entry *data, AlarmLogEntry *pr
proto->set_rendered_info(data->rendered_info);
proto->set_chart_context(data->chart_context);
+
+ proto->set_event_id(data->event_id);
+ proto->set_transition_id(data->transition_id);
}
char *generate_alarm_log_entry(size_t *len, struct alarm_log_entry *data)
diff --git a/aclk/schema-wrappers/alarm_stream.h b/aclk/schema-wrappers/alarm_stream.h
index 83e7c1bce..e0bf31ce6 100644
--- a/aclk/schema-wrappers/alarm_stream.h
+++ b/aclk/schema-wrappers/alarm_stream.h
@@ -73,6 +73,9 @@ struct alarm_log_entry {
char *rendered_info;
char *chart_context;
+
+ uint64_t event_id;
+ char *transition_id;
};
struct send_alarm_checkpoint {
diff --git a/cli/cli.c b/cli/cli.c
index 108c77626..efcf5fce0 100644
--- a/cli/cli.c
+++ b/cli/cli.c
@@ -1,7 +1,109 @@
// SPDX-License-Identifier: GPL-3.0-or-later
#include "cli.h"
-#include "libnetdata/required_dummies.h"
+
+void error_int(int is_collector __maybe_unused, const char *prefix __maybe_unused, const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt, ... ) {
+ FILE *fp = stderr;
+
+ va_list args;
+ va_start( args, fmt );
+ vfprintf(fp, fmt, args );
+ va_end( args );
+}
+
+#ifdef NETDATA_INTERNAL_CHECKS
+
+uint64_t debug_flags;
+
+void debug_int( const char *file __maybe_unused , const char *function __maybe_unused , const unsigned long line __maybe_unused, const char *fmt __maybe_unused, ... )
+{
+
+}
+
+void fatal_int( const char *file __maybe_unused, const char *function __maybe_unused, const unsigned long line __maybe_unused, const char *fmt __maybe_unused, ... )
+{
+ abort();
+};
+#endif
+
+#ifdef NETDATA_TRACE_ALLOCATIONS
+void *callocz_int(size_t nmemb, size_t size, const char *file __maybe_unused, const char *function __maybe_unused, size_t line __maybe_unused)
+{
+ void *p = calloc(nmemb, size);
+ if (unlikely(!p)) {
+ error("Cannot allocate %zu bytes of memory.", nmemb * size);
+ exit(1);
+ }
+ return p;
+}
+
+void *mallocz_int(size_t size, const char *file __maybe_unused, const char *function __maybe_unused, size_t line __maybe_unused)
+{
+ void *p = malloc(size);
+ if (unlikely(!p)) {
+ error("Cannot allocate %zu bytes of memory.", size);
+ exit(1);
+ }
+ return p;
+}
+
+void *reallocz_int(void *ptr, size_t size, const char *file __maybe_unused, const char *function __maybe_unused, size_t line __maybe_unused)
+{
+ void *p = realloc(ptr, size);
+ if (unlikely(!p)) {
+ error("Cannot allocate %zu bytes of memory.", size);
+ exit(1);
+ }
+ return p;
+}
+
+void freez_int(void *ptr, const char *file __maybe_unused, const char *function __maybe_unused, size_t line __maybe_unused)
+{
+ free(ptr);
+}
+#else
+void freez(void *ptr) {
+ free(ptr);
+}
+
+void *mallocz(size_t size) {
+ void *p = malloc(size);
+ if (unlikely(!p)) {
+ error("Cannot allocate %zu bytes of memory.", size);
+ exit(1);
+ }
+ return p;
+}
+
+void *callocz(size_t nmemb, size_t size) {
+ void *p = calloc(nmemb, size);
+ if (unlikely(!p)) {
+ error("Cannot allocate %zu bytes of memory.", nmemb * size);
+ exit(1);
+ }
+ return p;
+}
+
+void *reallocz(void *ptr, size_t size) {
+ void *p = realloc(ptr, size);
+ if (unlikely(!p)) {
+ error("Cannot allocate %zu bytes of memory.", size);
+ exit(1);
+ }
+ return p;
+}
+#endif
+
+int vsnprintfz(char *dst, size_t n, const char *fmt, va_list args) {
+ if(unlikely(!n)) return 0;
+
+ int size = vsnprintf(dst, n, fmt, args);
+ dst[n - 1] = '\0';
+
+ if (unlikely((size_t) size > n)) size = (int)n;
+
+ return size;
+}
static uv_pipe_t client_pipe;
static uv_write_t write_req;
@@ -174,8 +276,9 @@ int main(int argc, char **argv)
size_t to_copy;
to_copy = MIN(strlen(argv[i]), MAX_COMMAND_LENGTH - 1 - command_string_size);
- strncpyz(command_string + command_string_size, argv[i], to_copy);
+ strncpy(command_string + command_string_size, argv[i], to_copy);
command_string_size += to_copy;
+ command_string[command_string_size] = '\0';
if (command_string_size < MAX_COMMAND_LENGTH - 1) {
command_string[command_string_size++] = ' ';
diff --git a/collectors/Makefile.am b/collectors/Makefile.am
index 24e4c3f09..2aec3dd3e 100644
--- a/collectors/Makefile.am
+++ b/collectors/Makefile.am
@@ -8,6 +8,7 @@ SUBDIRS = \
cgroups.plugin \
charts.d.plugin \
cups.plugin \
+ debugfs.plugin \
diskspace.plugin \
timex.plugin \
ioping.plugin \
diff --git a/collectors/all.h b/collectors/all.h
index a0ce5d7fc..653729bbc 100644
--- a/collectors/all.h
+++ b/collectors/all.h
@@ -24,8 +24,17 @@
#define NETDATA_CHART_PRIO_SYSTEM_PGPGIO 151
#define NETDATA_CHART_PRIO_SYSTEM_RAM 200
#define NETDATA_CHART_PRIO_SYSTEM_SWAP 201
+#define NETDATA_CHART_PRIO_SYSTEM_SWAP_CALLS 202
#define NETDATA_CHART_PRIO_SYSTEM_SWAPIO 250
#define NETDATA_CHART_PRIO_SYSTEM_ZSWAPIO 300
+#define NETDATA_CHART_PRIO_SYSTEM_ZSWAP_COMPRESS_RATIO 301
+#define NETDATA_CHART_PRIO_SYSTEM_ZSWAP_POOL_TOT_SIZE 302
+#define NETDATA_CHART_PRIO_SYSTEM_ZSWAP_STORED_PAGE 303
+#define NETDATA_CHART_PRIO_SYSTEM_ZSWAP_REJECTS 304
+#define NETDATA_CHART_PRIO_SYSTEM_ZSWAP_POOL_LIM_HIT 305
+#define NETDATA_CHART_PRIO_SYSTEM_ZSWAP_WRT_BACK_PAGES 306
+#define NETDATA_CHART_PRIO_SYSTEM_ZSWAP_SAME_FILL_PAGE 307
+#define NETDATA_CHART_PRIO_SYSTEM_ZSWAP_DUPP_ENTRY 308
#define NETDATA_CHART_PRIO_SYSTEM_NET 500
#define NETDATA_CHART_PRIO_SYSTEM_IPV4 500 // freebsd only
#define NETDATA_CHART_PRIO_SYSTEM_IP 501
@@ -103,6 +112,7 @@
#define NETDATA_CHART_PRIO_MEM_ZRAM_SAVINGS 1601
#define NETDATA_CHART_PRIO_MEM_ZRAM_RATIO 1602
#define NETDATA_CHART_PRIO_MEM_ZRAM_EFFICIENCY 1603
+#define NETDATA_CHART_PRIO_MEM_FRAGMENTATION 1700
// Disks
diff --git a/collectors/apps.plugin/apps_groups.conf b/collectors/apps.plugin/apps_groups.conf
index f35454fde..659bd0f03 100644
--- a/collectors/apps.plugin/apps_groups.conf
+++ b/collectors/apps.plugin/apps_groups.conf
@@ -89,6 +89,7 @@ ioping: ioping
go.d.plugin: *go.d.plugin*
slabinfo.plugin: slabinfo.plugin
ebpf.plugin: *ebpf.plugin*
+debugfs.plugin: *debugfs.plugin*
# agent-service-discovery
agent_sd: agent_sd
diff --git a/collectors/charts.d.plugin/README.md b/collectors/charts.d.plugin/README.md
index 3e4edf562..97c2446fa 100644
--- a/collectors/charts.d.plugin/README.md
+++ b/collectors/charts.d.plugin/README.md
@@ -17,6 +17,8 @@ memory, collecting data with as little overheads as possible
`charts.d.plugin` looks for scripts in `/usr/lib/netdata/charts.d`.
The scripts should have the filename suffix: `.chart.sh`.
+By default, `charts.d.plugin` is not included as part of the install when using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md). You can install it by installing the `netdata-plugin-chartsd` package.
+
## Configuration
`charts.d.plugin` itself can be [configured](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) using the configuration file `/etc/netdata/charts.d.conf`. This file is also a BASH script.
diff --git a/collectors/charts.d.plugin/ap/README.md b/collectors/charts.d.plugin/ap/README.md
index bc7460a28..339ad1375 100644
--- a/collectors/charts.d.plugin/ap/README.md
+++ b/collectors/charts.d.plugin/ap/README.md
@@ -85,6 +85,8 @@ Station 40:b8:37:5a:ed:5e (on wlan0)
## Configuration
+If using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), make sure `netdata-plugin-chartsd` is installed.
+
Edit the `charts.d/ap.conf` configuration file using `edit-config` from the Netdata [config
directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
diff --git a/collectors/charts.d.plugin/apcupsd/README.md b/collectors/charts.d.plugin/apcupsd/README.md
index 6934d59c0..00e9697dc 100644
--- a/collectors/charts.d.plugin/apcupsd/README.md
+++ b/collectors/charts.d.plugin/apcupsd/README.md
@@ -13,6 +13,8 @@ Monitors different APC UPS models and retrieves status information using `apcacc
## Configuration
+If using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), make sure `netdata-plugin-chartsd` is installed.
+
Edit the `charts.d/apcupsd.conf` configuration file using `edit-config` from the Netdata [config
directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
diff --git a/collectors/charts.d.plugin/libreswan/README.md b/collectors/charts.d.plugin/libreswan/README.md
index a20eb86c0..b6eeb0180 100644
--- a/collectors/charts.d.plugin/libreswan/README.md
+++ b/collectors/charts.d.plugin/libreswan/README.md
@@ -24,6 +24,8 @@ The following charts are created, **per tunnel**:
## Configuration
+If using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), make sure `netdata-plugin-chartsd` is installed.
+
Edit the `charts.d/libreswan.conf` configuration file using `edit-config` from the Netdata [config
directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
diff --git a/collectors/charts.d.plugin/nut/README.md b/collectors/charts.d.plugin/nut/README.md
index 448825445..4608ce3e1 100644
--- a/collectors/charts.d.plugin/nut/README.md
+++ b/collectors/charts.d.plugin/nut/README.md
@@ -53,6 +53,8 @@ The following charts will be created:
## Configuration
+If using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), make sure `netdata-plugin-chartsd` is installed.
+
Edit the `charts.d/nut.conf` configuration file using `edit-config` from the Netdata [config
directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
diff --git a/collectors/charts.d.plugin/opensips/README.md b/collectors/charts.d.plugin/opensips/README.md
index c278b53a0..1d7322140 100644
--- a/collectors/charts.d.plugin/opensips/README.md
+++ b/collectors/charts.d.plugin/opensips/README.md
@@ -11,6 +11,8 @@ learn_rel_path: "Integrations/Monitor/Networking"
## Configuration
+If using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), make sure `netdata-plugin-chartsd` is installed.
+
Edit the `charts.d/opensips.conf` configuration file using `edit-config` from the Netdata [config
directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md), which is typically at `/etc/netdata`.
diff --git a/collectors/charts.d.plugin/sensors/README.md b/collectors/charts.d.plugin/sensors/README.md
index 2601a2b65..0dbe96225 100644
--- a/collectors/charts.d.plugin/sensors/README.md
+++ b/collectors/charts.d.plugin/sensors/README.md
@@ -21,13 +21,15 @@ One chart for every sensor chip found and each of the above will be created.
## Enable the collector
+If using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), make sure `netdata-plugin-chartsd` is installed.
+
The `sensors` collector is disabled by default.
-To enable the collector, you need to edit the configuration file of `charts.d/sensors.conf`. You can do so by using the `edit config` script.
+To enable the collector, you need to edit the configuration file of `charts.d/sensors.conf`. You can do so by using the `edit config` script.
> ### Info
>
-> To edit configuration files in a safe way, we provide the [`edit config` script](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) located in your [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory) (typically is `/etc/netdata`) that creates the proper file and opens it in an editor automatically.
+> To edit configuration files in a safe way, we provide the [`edit config` script](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) located in your [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory) (typically is `/etc/netdata`) that creates the proper file and opens it in an editor automatically.
> It is recommended to use this way for configuring Netdata.
>
> Please also note that after most configuration changes you will need to [restart the Agent](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) for the changes to take effect.
diff --git a/collectors/debugfs.plugin/Makefile.am b/collectors/debugfs.plugin/Makefile.am
new file mode 100644
index 000000000..02fe3a314
--- /dev/null
+++ b/collectors/debugfs.plugin/Makefile.am
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+AUTOMAKE_OPTIONS = subdir-objects
+MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
+
+dist_noinst_DATA = \
+ README.md \
+ $(NULL)
+
diff --git a/collectors/debugfs.plugin/README.md b/collectors/debugfs.plugin/README.md
new file mode 100644
index 000000000..a2dc9c0f6
--- /dev/null
+++ b/collectors/debugfs.plugin/README.md
@@ -0,0 +1,65 @@
+# OS provided metrics (debugfs.plugin)
+
+`debugfs.plugin` gathers metrics from the `/sys/kernel/debug` folder on Linux
+systems. [Debugfs](https://docs.kernel.org/filesystems/debugfs.html) exists as an easy way for kernel developers to
+make information available to user space.
+
+This plugin
+is [external](https://github.com/netdata/netdata/tree/master/collectors#collector-architecture-and-terminology),
+the netdata daemon spawns it as a long-running independent process.
+
+In detail, it collects metrics from:
+
+- `/sys/kernel/debug/extfrag` (Memory fragmentation index for each order and zone).
+- `/sys/kernel/debug/zswap` ([Zswap](https://www.kernel.org/doc/Documentation/vm/zswap.txt) performance statistics).
+
+## Prerequisites
+
+### Permissions
+
+> No user action required.
+
+The debugfs root directory is accessible only to the root user by default. Netdata
+uses [Linux Capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html) to give the plugin access
+to debugfs. `CAP_DAC_READ_SEARCH` is added automatically during installation. This capability allows bypassing file read
+permission checks and directory read and execute permission checks. If file capabilities are not usable, then the plugin is instead installed with the SUID bit set in permissions so that it runs as root.
+
+## Metrics
+
+| Metric | Scope | Dimensions | Units | Labels |
+|-------------------------------------|:---------:|:---------------------------------------------------------------------------------------:|:------------:|:---------:|
+| mem.fragmentation_index_dma | numa node | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index | numa_node |
+| mem.fragmentation_index_dma32 | numa node | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index | numa_node |
+| mem.fragmentation_index_normal | numa node | order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10 | index | numa_node |
+| system.zswap_pool_compression_ratio | | compression_ratio | ratio | |
+| system.zswap_pool_compressed_size | | compressed_size | bytes | |
+| system.zswap_pool_raw_size | | uncompressed_size | bytes | |
+| system.zswap_rejections | | compress_poor, kmemcache_fail, alloc_fail, reclaim_fail | rejections/s | |
+| system.zswap_pool_limit_hit | | limit | events/s | |
+| system.zswap_written_back_raw_bytes | | written_back | bytes/s | |
+| system.zswap_same_filled_raw_size | | same_filled | bytes | |
+| system.zswap_duplicate_entry | | entries | entries/s | |
+
+## Troubleshooting
+
+To troubleshoot issues with the collector, run the `debugfs.plugin` in the terminal. The output
+should give you clues as to why the collector isn't working.
+
+- Navigate to the `plugins.d` directory, usually at `/usr/libexec/netdata/plugins.d/`. If that's not the case on
+ your system, open `netdata.conf` and look for the `plugins` setting under `[directories]`.
+
+ ```bash
+ cd /usr/libexec/netdata/plugins.d/
+ ```
+
+- Switch to the `netdata` user.
+
+ ```bash
+ sudo -u netdata -s
+ ```
+
+- Run the `debugfs.plugin` to debug the collector:
+
+ ```bash
+ ./debugfs.plugin
+ ```
diff --git a/collectors/debugfs.plugin/debugfs_extfrag.c b/collectors/debugfs.plugin/debugfs_extfrag.c
new file mode 100644
index 000000000..75da4deca
--- /dev/null
+++ b/collectors/debugfs.plugin/debugfs_extfrag.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "debugfs_plugin.h"
+
+#define NETDATA_ORDER_FRAGMENTATION 11
+
+static char *orders[NETDATA_ORDER_FRAGMENTATION] = { "order0", "order1", "order2", "order3", "order4",
+ "order5", "order6", "order7", "order8", "order9",
+ "order10"
+};
+
+static struct netdata_extrafrag {
+ char *node_zone;
+ uint32_t hash;
+
+ char *id;
+
+ collected_number orders[NETDATA_ORDER_FRAGMENTATION];
+
+ struct netdata_extrafrag *next;
+} *netdata_extrafrags_root = NULL;
+
+static struct netdata_extrafrag *find_or_create_extrafrag(const char *name)
+{
+ struct netdata_extrafrag *extrafrag;
+ uint32_t hash = simple_hash(name);
+
+ // search it, from beginning to the end
+ for (extrafrag = netdata_extrafrags_root ; extrafrag ; extrafrag = extrafrag->next) {
+ if (unlikely(hash == extrafrag->hash && !strcmp(name, extrafrag->node_zone))) {
+ return extrafrag;
+ }
+ }
+
+ extrafrag = callocz(1, sizeof(struct netdata_extrafrag));
+ extrafrag->node_zone = strdupz(name);
+ extrafrag->hash = hash;
+
+ if (netdata_extrafrags_root) {
+ struct netdata_extrafrag *last_node;
+ for (last_node = netdata_extrafrags_root; last_node->next ; last_node = last_node->next);
+
+ last_node->next = extrafrag;
+ } else
+ netdata_extrafrags_root = extrafrag;
+
+
+ return extrafrag;
+}
+
+static void extfrag_send_chart(char *chart_id, collected_number *values)
+{
+ int i;
+ fprintf(stdout, "BEGIN mem.fragmentation_index_%s\n", chart_id);
+ for (i = 0; i < NETDATA_ORDER_FRAGMENTATION; i++) {
+ fprintf(stdout, "SET %s = %lld\n", orders[i], values[i]);
+ }
+ fprintf(stdout, "END\n");
+ fflush(stdout);
+}
+
+int do_debugfs_extfrag(int update_every, const char *name) {
+ static procfile *ff = NULL;
+ static int chart_order = NETDATA_CHART_PRIO_MEM_FRAGMENTATION;
+
+ if (unlikely(!ff)) {
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename,
+ FILENAME_MAX,
+ "%s%s",
+ netdata_configured_host_prefix,
+ "/sys/kernel/debug/extfrag/extfrag_index");
+
+ ff = procfile_open(filename, " \t,", PROCFILE_FLAG_DEFAULT);
+ if (unlikely(!ff)) return 1;
+ }
+
+ ff = procfile_readall(ff);
+ if (unlikely(!ff)) return 1;
+
+ size_t l, i, j, lines = procfile_lines(ff);
+ for (l = 0; l < lines; l++) {
+ char chart_id[64];
+ char zone_lowercase[32];
+ if (unlikely(procfile_linewords(ff, l) < 15)) continue;
+ char *zone = procfile_lineword(ff, l, 3);
+ strncpyz(zone_lowercase, zone, 31);
+ debugfs2lower(zone_lowercase);
+
+ char *id = procfile_lineword(ff, l, 1);
+ snprintfz(chart_id, 63, "node_%s_%s", id, zone_lowercase);
+ debugfs2lower(chart_id);
+
+ struct netdata_extrafrag *extrafrag = find_or_create_extrafrag(chart_id);
+ collected_number *line_orders = extrafrag->orders;
+ for (i = 4, j = 0 ; i < 15; i++, j++) {
+ NETDATA_DOUBLE value = str2ndd(procfile_lineword(ff, l, i), NULL);
+ line_orders[j] = (collected_number) (value * 1000.0);
+ }
+
+ if (unlikely(!extrafrag->id)) {
+ extrafrag->id = extrafrag->node_zone;
+ fprintf(
+ stdout,
+ "CHART mem.fragmentation_index_%s '' 'Memory fragmentation index for each order' 'index' 'fragmentation' 'mem.fragmentation_index_%s' 'line' %d %d '' 'debugfs.plugin' '%s'\n",
+ extrafrag->node_zone,
+ zone_lowercase,
+ chart_order++, // FIXME: the same zones must have the same order
+ update_every,
+ name);
+ for (i = 0; i < NETDATA_ORDER_FRAGMENTATION; i++) {
+ fprintf(stdout, "DIMENSION '%s' '%s' absolute 1 1000 ''\n", orders[i], orders[i]);
+ }
+ fprintf(stdout,
+ "CLABEL 'numa_node' 'node%s' 1\n"
+ "CLABEL_COMMIT\n",
+ id);
+ }
+ extfrag_send_chart(chart_id, line_orders);
+ }
+
+ return 0;
+}
diff --git a/collectors/debugfs.plugin/debugfs_plugin.c b/collectors/debugfs.plugin/debugfs_plugin.c
new file mode 100644
index 000000000..9713be320
--- /dev/null
+++ b/collectors/debugfs.plugin/debugfs_plugin.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "debugfs_plugin.h"
+#include "libnetdata/required_dummies.h"
+
+static char *user_config_dir = CONFIG_DIR;
+static char *stock_config_dir = LIBCONFIG_DIR;
+
+static int update_every = 1;
+
+static struct debugfs_module {
+ const char *name;
+
+ int enabled;
+
+ int (*func)(int update_every, const char *name);
+} debugfs_modules[] = {
+ // Memory Fragmentation
+ { .name = "/sys/kernel/debug/extfrag", .enabled = CONFIG_BOOLEAN_YES,
+ .func = do_debugfs_extfrag},
+ { .name = "/sys/kernel/debug/zswap", .enabled = CONFIG_BOOLEAN_YES,
+ .func = do_debugfs_zswap},
+
+ // The terminator
+ { .name = NULL, .enabled = CONFIG_BOOLEAN_NO, .func = NULL}
+};
+
+#ifdef HAVE_CAPABILITY
+static int debugfs_check_capabilities()
+{
+ cap_t caps = cap_get_proc();
+ if (!caps) {
+ error("Cannot get current capabilities.");
+ return 0;
+ }
+
+ int ret = 1;
+ cap_flag_value_t cfv = CAP_CLEAR;
+ if (cap_get_flag(caps, CAP_DAC_READ_SEARCH, CAP_EFFECTIVE, &cfv) == -1) {
+ error("Cannot find if CAP_DAC_READ_SEARCH is effective.");
+ ret = 0;
+ } else {
+ if (cfv != CAP_SET) {
+ error("debugfs.plugin should run with CAP_DAC_READ_SEARCH.");
+ ret = 0;
+ }
+ }
+ cap_free(caps);
+
+ return ret;
+}
+#else
+static int debugfs_check_capabilities()
+{
+ return 0;
+}
+#endif
+
+// TODO: This is a function used by 3 different collector, we should do it global (next PR)
+static int debugfs_am_i_running_as_root()
+{
+ uid_t uid = getuid(), euid = geteuid();
+
+ if (uid == 0 || euid == 0) {
+ return 1;
+ }
+
+ return 0;
+}
+
+void debugfs2lower(char *name)
+{
+ while (*name) {
+ *name = tolower(*name);
+ name++;
+ }
+}
+
+// Consiidering our goal to redce binaries, I preferred to copy function, instead to force link with unecessary libs
+const char *debugfs_rrdset_type_name(RRDSET_TYPE chart_type) {
+ switch(chart_type) {
+ case RRDSET_TYPE_LINE:
+ default:
+ return RRDSET_TYPE_LINE_NAME;
+
+ case RRDSET_TYPE_AREA:
+ return RRDSET_TYPE_AREA_NAME;
+
+ case RRDSET_TYPE_STACKED:
+ return RRDSET_TYPE_STACKED_NAME;
+ }
+}
+
+const char *debugfs_rrd_algorithm_name(RRD_ALGORITHM algorithm) {
+ switch(algorithm) {
+ case RRD_ALGORITHM_ABSOLUTE:
+ default:
+ return RRD_ALGORITHM_ABSOLUTE_NAME;
+
+ case RRD_ALGORITHM_INCREMENTAL:
+ return RRD_ALGORITHM_INCREMENTAL_NAME;
+
+ case RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL:
+ return RRD_ALGORITHM_PCENT_OVER_ROW_TOTAL_NAME;
+
+ case RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL:
+ return RRD_ALGORITHM_PCENT_OVER_DIFF_TOTAL_NAME;
+ }
+}
+
+int debugfs_check_sys_permission() {
+ int ret = 0;
+
+ char filename[FILENAME_MAX + 1];
+
+ snprintfz(filename, FILENAME_MAX, "%s/sys/kernel/debug/extfrag/extfrag_index", netdata_configured_host_prefix);
+
+ procfile *ff = procfile_open(filename, NULL, PROCFILE_FLAG_NO_ERROR_ON_FILE_IO);
+ if(!ff) goto dcsp_cleanup;
+
+ ff = procfile_readall(ff);
+ if(!ff) goto dcsp_cleanup;
+
+ ret = 1;
+
+dcsp_cleanup:
+ if (!ret)
+ perror("Cannot open /sys/kernel/debug/extfrag/extfrag_index file");
+ procfile_close(ff);
+ return ret;
+}
+
+static void debugfs_parse_args(int argc, char **argv)
+{
+ int i, freq = 0;
+ for(i = 1; i < argc; i++) {
+ if(!freq) {
+ int n = (int)str2l(argv[i]);
+ if(n > 0) {
+ freq = n;
+ continue;
+ }
+ }
+
+ if(strcmp("test-permissions", argv[i]) == 0 || strcmp("-t", argv[i]) == 0) {
+ if(!debugfs_check_sys_permission()) {
+ exit(2);
+ }
+ printf("OK\n");
+ exit(0);
+ }
+ }
+
+ if(freq > 0) update_every = freq;
+}
+
+int main(int argc, char **argv)
+{
+ // debug_flags = D_PROCFILE;
+ stderror = stderr;
+
+ // set the name for logging
+ program_name = "debugfs.plugin";
+
+ // disable syslog for debugfs.plugin
+ error_log_syslog = 0;
+
+ netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
+ if (verify_netdata_host_prefix() == -1)
+ exit(1);
+
+ user_config_dir = getenv("NETDATA_USER_CONFIG_DIR");
+ if (user_config_dir == NULL) {
+ user_config_dir = CONFIG_DIR;
+ }
+
+ stock_config_dir = getenv("NETDATA_STOCK_CONFIG_DIR");
+ if (stock_config_dir == NULL) {
+ // info("NETDATA_CONFIG_DIR is not passed from netdata");
+ stock_config_dir = LIBCONFIG_DIR;
+ }
+
+ // FIXME: should first check if /sys/kernel/debug is mounted
+
+ // FIXME: remove debugfs_check_sys_permission() after https://github.com/netdata/netdata/issues/15048 is fixed
+ if (!debugfs_check_capabilities() && !debugfs_am_i_running_as_root() && !debugfs_check_sys_permission()) {
+ uid_t uid = getuid(), euid = geteuid();
+#ifdef HAVE_CAPABILITY
+ error(
+ "debugfs.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. "
+ "Without these, debugfs.plugin cannot access /sys/kernel/debug. "
+ "To enable capabilities run: sudo setcap cap_dac_read_search,cap_sys_ptrace+ep %s; "
+ "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; ",
+ uid,
+ euid,
+ argv[0],
+ argv[0],
+ argv[0]);
+#else
+ error(
+ "debugfs.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities. "
+ "Without these, debugfs.plugin cannot access /sys/kernel/debug."
+ "Your system does not support capabilities. "
+ "To enable setuid to root run: sudo chown root:netdata %s; sudo chmod 4750 %s; ",
+ uid,
+ euid,
+ argv[0],
+ argv[0]);
+#endif
+ exit(1);
+ }
+
+ // if (!debugfs_check_sys_permission()) {
+ // exit(2);
+ // }
+
+ debugfs_parse_args(argc, argv);
+
+ size_t iteration;
+ usec_t step = update_every * USEC_PER_SEC;
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ for (iteration = 0; iteration < 86400; iteration++) {
+ heartbeat_next(&hb, step);
+ int enabled = 0;
+
+ for (int i = 0; debugfs_modules[i].name; i++) {
+ struct debugfs_module *pm = &debugfs_modules[i];
+ if (unlikely(!pm->enabled))
+ continue;
+
+ pm->enabled = !pm->func(update_every, pm->name);
+ if (likely(pm->enabled))
+ enabled++;
+ }
+ if (!enabled) {
+ info("all modules are disabled, exiting...");
+ return 1;
+ }
+ }
+
+ fprintf(stdout, "EXIT\n");
+ fflush(stdout);
+ return 0;
+}
diff --git a/collectors/debugfs.plugin/debugfs_plugin.h b/collectors/debugfs.plugin/debugfs_plugin.h
new file mode 100644
index 000000000..c53187d6e
--- /dev/null
+++ b/collectors/debugfs.plugin/debugfs_plugin.h
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_DEBUGFS_PLUGIN_H
+#define NETDATA_DEBUGFS_PLUGIN_H 1
+
+#include "libnetdata/libnetdata.h"
+#include "collectors/all.h"
+#include "database/rrd.h"
+
+int do_debugfs_extfrag(int update_every, const char *name);
+int do_debugfs_zswap(int update_every, const char *name);
+void debugfs2lower(char *name);
+const char *debugfs_rrdset_type_name(RRDSET_TYPE chart_type);
+const char *debugfs_rrd_algorithm_name(RRD_ALGORITHM algorithm);
+
+#endif // NETDATA_DEBUGFS_PLUGIN_H
diff --git a/collectors/debugfs.plugin/debugfs_zswap.c b/collectors/debugfs.plugin/debugfs_zswap.c
new file mode 100644
index 000000000..a2991b9f1
--- /dev/null
+++ b/collectors/debugfs.plugin/debugfs_zswap.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "debugfs_plugin.h"
+
+static long system_page_size = 4096;
+
+static collected_number pages_to_bytes(collected_number value)
+{
+ return value * system_page_size;
+}
+
+struct netdata_zswap_metric {
+ const char *filename;
+
+ const char *chart_id;
+ const char *title;
+ const char *units;
+ RRDSET_TYPE charttype;
+ int prio;
+ const char *dimension;
+ RRD_ALGORITHM algorithm;
+ int divisor;
+
+ int enabled;
+ int chart_created;
+
+ collected_number value;
+ collected_number (*convertv)(collected_number v);
+};
+
+static struct netdata_zswap_metric zswap_calculated_metrics[] = {
+ {.filename = "",
+ .chart_id = "pool_compression_ratio",
+ .dimension = "compression_ratio",
+ .units = "ratio",
+ .title = "Zswap compression ratio",
+ .algorithm = RRD_ALGORITHM_ABSOLUTE,
+ .charttype = RRDSET_TYPE_LINE,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .chart_created = CONFIG_BOOLEAN_NO,
+ .prio = NETDATA_CHART_PRIO_SYSTEM_ZSWAP_COMPRESS_RATIO,
+ .divisor = 100,
+ .convertv = NULL,
+ .value = -1},
+};
+
+enum netdata_zswap_calculated {
+ NETDATA_ZSWAP_COMPRESSION_RATIO_CHART,
+};
+
+enum netdata_zwap_independent {
+ NETDATA_ZSWAP_POOL_TOTAL_SIZE,
+ NETDATA_ZSWAP_STORED_PAGES,
+ NETDATA_ZSWAP_POOL_LIMIT_HIT,
+ NETDATA_ZSWAP_WRITTEN_BACK_PAGES,
+ NETDATA_ZSWAP_SAME_FILLED_PAGES,
+ NETDATA_ZSWAP_DUPLICATE_ENTRY,
+
+ // Terminator
+ NETDATA_ZSWAP_SITE_END
+};
+
+static struct netdata_zswap_metric zswap_independent_metrics[] = {
+ // https://elixir.bootlin.com/linux/latest/source/mm/zswap.c
+ {.filename = "/sys/kernel/debug/zswap/pool_total_size",
+ .chart_id = "pool_compressed_size",
+ .dimension = "compressed_size",
+ .units = "bytes",
+ .title = "Zswap compressed bytes currently stored",
+ .algorithm = RRD_ALGORITHM_ABSOLUTE,
+ .charttype = RRDSET_TYPE_AREA,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .chart_created = CONFIG_BOOLEAN_NO,
+ .prio = NETDATA_CHART_PRIO_SYSTEM_ZSWAP_POOL_TOT_SIZE,
+ .divisor = 1,
+ .convertv = NULL,
+ .value = -1},
+ {.filename = "/sys/kernel/debug/zswap/stored_pages",
+ .chart_id = "pool_raw_size",
+ .dimension = "uncompressed_size",
+ .units = "bytes",
+ .title = "Zswap uncompressed bytes currently stored",
+ .algorithm = RRD_ALGORITHM_ABSOLUTE,
+ .charttype = RRDSET_TYPE_AREA,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .chart_created = CONFIG_BOOLEAN_NO,
+ .prio = NETDATA_CHART_PRIO_SYSTEM_ZSWAP_STORED_PAGE,
+ .divisor = 1,
+ .convertv = pages_to_bytes,
+ .value = -1},
+ {.filename = "/sys/kernel/debug/zswap/pool_limit_hit",
+ .chart_id = "pool_limit_hit",
+ .dimension = "limit",
+ .units = "events/s",
+ .title = "Zswap pool limit was reached",
+ .algorithm = RRD_ALGORITHM_INCREMENTAL,
+ .charttype = RRDSET_TYPE_LINE,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .chart_created = CONFIG_BOOLEAN_NO,
+ .prio = NETDATA_CHART_PRIO_SYSTEM_ZSWAP_POOL_LIM_HIT,
+ .divisor = 1,
+ .convertv = NULL,
+ .value = -1},
+ {.filename = "/sys/kernel/debug/zswap/written_back_pages",
+ .chart_id = "written_back_raw_bytes",
+ .dimension = "written_back",
+ .units = "bytes/s",
+ .title = "Zswap uncomressed bytes written back when pool limit was reached",
+ .algorithm = RRD_ALGORITHM_INCREMENTAL,
+ .charttype = RRDSET_TYPE_AREA,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .chart_created = CONFIG_BOOLEAN_NO,
+ .prio = NETDATA_CHART_PRIO_SYSTEM_ZSWAP_WRT_BACK_PAGES,
+ .divisor = 1,
+ .convertv = pages_to_bytes,
+ .value = -1},
+ {.filename = "/sys/kernel/debug/zswap/same_filled_pages",
+ .chart_id = "same_filled_raw_size",
+ .dimension = "same_filled",
+ .units = "bytes",
+ .title = "Zswap same-value filled uncompressed bytes currently stored",
+ .algorithm = RRD_ALGORITHM_ABSOLUTE,
+ .charttype = RRDSET_TYPE_AREA,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .chart_created = CONFIG_BOOLEAN_NO,
+ .prio = NETDATA_CHART_PRIO_SYSTEM_ZSWAP_SAME_FILL_PAGE,
+ .divisor = 1,
+ .convertv = pages_to_bytes,
+ .value = -1},
+ {.filename = "/sys/kernel/debug/zswap/duplicate_entry",
+ .chart_id = "duplicate_entry",
+ .dimension = "duplicate",
+ .units = "entries/s",
+ .title = "Zswap duplicate store was encountered",
+ .algorithm = RRD_ALGORITHM_INCREMENTAL,
+ .charttype = RRDSET_TYPE_LINE,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .chart_created = CONFIG_BOOLEAN_NO,
+ .prio = NETDATA_CHART_PRIO_SYSTEM_ZSWAP_DUPP_ENTRY,
+ .divisor = 1,
+ .convertv = NULL,
+ .value = -1},
+
+ // The terminator
+ {.filename = NULL,
+ .chart_id = NULL,
+ .dimension = NULL,
+ .units = NULL,
+ .title = NULL,
+ .algorithm = RRD_ALGORITHM_ABSOLUTE,
+ .charttype = RRDSET_TYPE_LINE,
+ .enabled = CONFIG_BOOLEAN_NO,
+ .chart_created = CONFIG_BOOLEAN_NO,
+ .prio = -1,
+ .value = -1}};
+
+enum netdata_zswap_rejected {
+ NETDATA_ZSWAP_REJECTED_CHART,
+ NETDATA_ZSWAP_REJECTED_COMPRESS_POOR,
+ NETDATA_ZSWAP_REJECTED_KMEM_FAIL,
+ NETDATA_ZSWAP_REJECTED_RALLOC_FAIL,
+ NETDATA_ZSWAP_REJECTED_RRECLAIM_FAIL,
+
+ // Terminator
+ NETDATA_ZSWAP_REJECTED_END
+};
+
+static struct netdata_zswap_metric zswap_rejected_metrics[] = {
+ {.filename = "/sys/kernel/debug/zswap/",
+ .chart_id = "rejections",
+ .dimension = NULL,
+ .units = "rejections/s",
+ .title = "Zswap rejections",
+ .algorithm = RRD_ALGORITHM_INCREMENTAL,
+ .charttype = RRDSET_TYPE_STACKED,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .chart_created = CONFIG_BOOLEAN_NO,
+ .prio = NETDATA_CHART_PRIO_SYSTEM_ZSWAP_REJECTS,
+ .divisor = 1,
+ .convertv = NULL,
+ .value = -1},
+ {.filename = "/sys/kernel/debug/zswap/reject_compress_poor",
+ .chart_id = "reject_compress_poor",
+ .dimension = "compress_poor",
+ .units = NULL,
+ .title = NULL,
+ .algorithm = RRD_ALGORITHM_INCREMENTAL,
+ .charttype = RRDSET_TYPE_STACKED,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .chart_created = CONFIG_BOOLEAN_NO,
+ .prio = NETDATA_CHART_PRIO_SYSTEM_ZSWAP_REJECTS,
+ .divisor = 1,
+ .convertv = NULL,
+ .value = -1},
+ {.filename = "/sys/kernel/debug/zswap/reject_kmemcache_fail",
+ .chart_id = "reject_kmemcache_fail",
+ .dimension = "kmemcache_fail",
+ .units = NULL,
+ .title = NULL,
+ .algorithm = RRD_ALGORITHM_INCREMENTAL,
+ .charttype = RRDSET_TYPE_STACKED,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .chart_created = CONFIG_BOOLEAN_NO,
+ .prio = NETDATA_CHART_PRIO_SYSTEM_ZSWAP_REJECTS,
+ .divisor = 1,
+ .convertv = NULL,
+ .value = -1},
+ {.filename = "/sys/kernel/debug/zswap/reject_alloc_fail",
+ .chart_id = "reject_alloc_fail",
+ .dimension = "alloc_fail",
+ .units = NULL,
+ .title = NULL,
+ .algorithm = RRD_ALGORITHM_INCREMENTAL,
+ .charttype = RRDSET_TYPE_STACKED,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .chart_created = CONFIG_BOOLEAN_NO,
+ .prio = NETDATA_CHART_PRIO_SYSTEM_ZSWAP_REJECTS,
+ .divisor = 1,
+ .convertv = NULL,
+ .value = -1},
+ {.filename = "/sys/kernel/debug/zswap/reject_reclaim_fail",
+ .chart_id = "reject_reclaim_fail",
+ .dimension = "reclaim_fail",
+ .units = NULL,
+ .title = NULL,
+ .algorithm = RRD_ALGORITHM_INCREMENTAL,
+ .charttype = RRDSET_TYPE_STACKED,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .chart_created = CONFIG_BOOLEAN_NO,
+ .prio = NETDATA_CHART_PRIO_SYSTEM_ZSWAP_REJECTS,
+ .divisor = 1,
+ .convertv = NULL,
+ .value = -1},
+
+ // The terminator
+ {.filename = NULL,
+ .chart_id = NULL,
+ .dimension = NULL,
+ .units = NULL,
+ .title = NULL,
+ .algorithm = RRD_ALGORITHM_ABSOLUTE,
+ .charttype = RRDSET_TYPE_STACKED,
+ .enabled = CONFIG_BOOLEAN_NO,
+ .chart_created = CONFIG_BOOLEAN_NO,
+ .prio = -1,
+ .value = -1}};
+
+int zswap_collect_data(struct netdata_zswap_metric *metric)
+{
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, metric->filename);
+
+ if (read_single_number_file(filename, (unsigned long long *)&metric->value)) {
+ error("Cannot read file %s", filename);
+ return 1;
+ }
+
+ if (metric->convertv)
+ metric->value = metric->convertv(metric->value);
+
+ return 0;
+}
+
+static void
+zswap_send_chart(struct netdata_zswap_metric *metric, int update_every, const char *name, const char *option)
+{
+ fprintf(
+ stdout,
+ "CHART system.zswap_%s '' '%s' '%s' 'zswap' '' '%s' %d %d '%s' 'debugfs.plugin' '%s'\n",
+ metric->chart_id,
+ metric->title,
+ metric->units,
+ debugfs_rrdset_type_name(metric->charttype),
+ metric->prio,
+ update_every,
+ (!option) ? "" : option,
+ name);
+}
+
+static void zswap_send_dimension(struct netdata_zswap_metric *metric)
+{
+ int div = metric->divisor > 0 ? metric->divisor : 1;
+ fprintf(
+ stdout,
+ "DIMENSION '%s' '%s' %s 1 %d ''\n",
+ metric->dimension,
+ metric->dimension,
+ debugfs_rrd_algorithm_name(metric->algorithm),
+ div);
+}
+
+static void zswap_send_begin(struct netdata_zswap_metric *metric)
+{
+ fprintf(stdout, "BEGIN system.zswap_%s\n", metric->chart_id);
+}
+
+static void zswap_send_set(struct netdata_zswap_metric *metric)
+{
+ fprintf(stdout, "SET %s = %lld\n", metric->dimension, metric->value);
+}
+
+static void zswap_send_end_and_flush()
+{
+ fprintf(stdout, "END\n");
+ fflush(stdout);
+}
+
+static void zswap_independent_chart(struct netdata_zswap_metric *metric, int update_every, const char *name)
+{
+ if (unlikely(!metric->chart_created)) {
+ metric->chart_created = CONFIG_BOOLEAN_YES;
+
+ zswap_send_chart(metric, update_every, name, NULL);
+ zswap_send_dimension(metric);
+ }
+
+ zswap_send_begin(metric);
+ zswap_send_set(metric);
+ zswap_send_end_and_flush();
+}
+
+void zswap_reject_chart(int update_every, const char *name)
+{
+ struct netdata_zswap_metric *metric = &zswap_rejected_metrics[NETDATA_ZSWAP_REJECTED_CHART];
+
+ if (unlikely(!metric->chart_created)) {
+ metric->chart_created = CONFIG_BOOLEAN_YES;
+
+ zswap_send_chart(metric, update_every, name, NULL);
+ for (int i = NETDATA_ZSWAP_REJECTED_COMPRESS_POOR; zswap_rejected_metrics[i].filename; i++) {
+ metric = &zswap_rejected_metrics[i];
+ if (likely(metric->enabled))
+ zswap_send_dimension(metric);
+ }
+ }
+
+ metric = &zswap_rejected_metrics[NETDATA_ZSWAP_REJECTED_CHART];
+ zswap_send_begin(metric);
+ for (int i = NETDATA_ZSWAP_REJECTED_COMPRESS_POOR; zswap_rejected_metrics[i].filename; i++) {
+ metric = &zswap_rejected_metrics[i];
+ if (likely(metric->enabled))
+ zswap_send_set(metric);
+ }
+ zswap_send_end_and_flush();
+}
+
+static void zswap_obsolete_charts(int update_every, const char *name)
+{
+ struct netdata_zswap_metric *metric = NULL;
+
+ for (int i = 0; zswap_independent_metrics[i].filename; i++) {
+ metric = &zswap_independent_metrics[i];
+ if (likely(metric->chart_created))
+ zswap_send_chart(metric, update_every, name, "obsolete");
+ }
+
+ metric = &zswap_rejected_metrics[NETDATA_ZSWAP_REJECTED_CHART];
+ if (likely(metric->chart_created))
+ zswap_send_chart(metric, update_every, name, "obsolete");
+
+ metric = &zswap_calculated_metrics[NETDATA_ZSWAP_COMPRESSION_RATIO_CHART];
+ if (likely(metric->chart_created))
+ zswap_send_chart(metric, update_every, name, "obsolete");
+}
+
+#define ZSWAP_STATE_SIZE 1 // Y or N
+static int debugfs_is_zswap_enabled()
+{
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "/sys/module/zswap/parameters/enabled"); // host prefix is not needed here
+ char state[ZSWAP_STATE_SIZE + 1];
+
+ int ret = read_file(filename, state, ZSWAP_STATE_SIZE);
+
+ if (unlikely(!ret && !strcmp(state, "Y"))) {
+ return 0;
+ }
+ return 1;
+}
+
+int do_debugfs_zswap(int update_every, const char *name)
+{
+ static int check_if_enabled = 1;
+
+ if (likely(check_if_enabled && debugfs_is_zswap_enabled())) {
+ info("Zswap is disabled");
+ return 1;
+ }
+
+ check_if_enabled = 0;
+
+ system_page_size = sysconf(_SC_PAGESIZE);
+ struct netdata_zswap_metric *metric = NULL;
+ int enabled = 0;
+
+ for (int i = 0; zswap_independent_metrics[i].filename; i++) {
+ metric = &zswap_independent_metrics[i];
+ if (unlikely(!metric->enabled))
+ continue;
+ if (unlikely(!(metric->enabled = !zswap_collect_data(metric))))
+ continue;
+ zswap_independent_chart(metric, update_every, name);
+ enabled++;
+ }
+
+ struct netdata_zswap_metric *metric_size = &zswap_independent_metrics[NETDATA_ZSWAP_POOL_TOTAL_SIZE];
+ struct netdata_zswap_metric *metric_raw_size = &zswap_independent_metrics[NETDATA_ZSWAP_STORED_PAGES];
+ if (metric_size->enabled && metric_raw_size->enabled) {
+ metric = &zswap_calculated_metrics[NETDATA_ZSWAP_COMPRESSION_RATIO_CHART];
+ metric->value = 0;
+ if (metric_size->value > 0)
+ metric->value =
+ (collected_number)((NETDATA_DOUBLE)metric_raw_size->value / (NETDATA_DOUBLE)metric_size->value * 100);
+ zswap_independent_chart(metric, update_every, name);
+ }
+
+ int enabled_rejected = 0;
+ for (int i = NETDATA_ZSWAP_REJECTED_COMPRESS_POOR; zswap_rejected_metrics[i].filename; i++) {
+ metric = &zswap_rejected_metrics[i];
+ if (unlikely(!metric->enabled))
+ continue;
+ if (unlikely(!(metric->enabled = !zswap_collect_data(metric))))
+ continue;
+ enabled++;
+ enabled_rejected++;
+ }
+
+ if (likely(enabled_rejected > 0))
+ zswap_reject_chart(update_every, name);
+
+ if (unlikely(!enabled)) {
+ zswap_obsolete_charts(update_every, name);
+ return 1;
+ }
+
+ return 0;
+}
diff --git a/collectors/debugfs.plugin/metrics.csv b/collectors/debugfs.plugin/metrics.csv
new file mode 100644
index 000000000..a21383941
--- /dev/null
+++ b/collectors/debugfs.plugin/metrics.csv
@@ -0,0 +1,12 @@
+metric,scode,dimensions,unit,description,chart_type,labels,plugin,module
+mem.fragmentation_index_dma,node,"order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10",index,Memory fragmentation index for each order,line,numa_node,debugfs.plugin,/sys/kernel/debug/extfrag
+mem.fragmentation_index_dma32,node,"order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10",index,Memory fragmentation index for each order,line,numa_node,debugfs.plugin,/sys/kernel/debug/extfrag
+mem.fragmentation_index_normal,node,"order0, order1, order2, order3, order4, order5, order6, order7, order8, order9, order10",index,Memory fragmentation index for each order,line,numa_node,debugfs.plugin,/sys/kernel/debug/extfrag
+system.zswap_pool_compression_ratio,,compression_ratio,ratio,Zswap compression ratio,line,,debugfs.plugin,/sys/kernel/debug/zswap
+system.zswap_pool_compressed_size,,compressed_size,bytes,Zswap compressed bytes currently stored,area,,debugfs.plugin,/sys/kernel/debug/zswap
+system.zswap_pool_raw_size,,uncompressed_size,bytes,Zswap uncompressed bytes currently stored,area,,debugfs.plugin,/sys/kernel/debug/zswap
+system.zswap_rejections,,"compress_poor, kmemcache_fail, alloc_fail, reclaim_fail",rejections/s,Zswap rejections,stacked,,debugfs.plugin,/sys/kernel/debug/zswap
+system.zswap_pool_limit_hit,,limit,events/s,Zswap pool limit was reached,line,,debugfs.plugin,/sys/kernel/debug/zswap
+system.zswap_written_back_raw_bytes,,written_back,bytes/s,Zswap uncomressed bytes written back when pool limit was reached,area,,debugfs.plugin,/sys/kernel/debug/zswap
+system.zswap_same_filled_raw_size,,same_filled,bytes,Zswap same-value filled uncompressed bytes currently stored,area,,debugfs.plugin,/sys/kernel/debug/zswap
+system.zswap_duplicate_entry,,duplicate,entries/s,Zswap duplicate store was encountered,line,,debugfs.plugin,/sys/kernel/debug/zswap
diff --git a/collectors/diskspace.plugin/README.md b/collectors/diskspace.plugin/README.md
index b70bbf008..5ca1090fd 100644
--- a/collectors/diskspace.plugin/README.md
+++ b/collectors/diskspace.plugin/README.md
@@ -13,6 +13,8 @@ Simple patterns can be used to exclude mounts from showed statistics based on pa
By default, Netdata will enable monitoring metrics only when they are not zero. If they are constantly zero they are ignored. Metrics that will start having values, after Netdata is started, will be detected and charts will be automatically added to the dashboard (a refresh of the dashboard is needed for them to appear though).
+Netdata will try to detect mounts that are duplicates (i.e. from the same device), or binds, and will not display charts for them, as the device is usually already monitored.
+
To configure this plugin, you need to edit the configuration file `netdata.conf`. You can do so by using the `edit config` script.
> ### Info
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md
index 75f44a6e5..94bbc184d 100644
--- a/collectors/ebpf.plugin/README.md
+++ b/collectors/ebpf.plugin/README.md
@@ -99,8 +99,6 @@ accepts the following values:
- `return`: In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates new
charts for the return of these functions, such as errors. Monitoring function returns can help in debugging software,
such as failing to close file descriptors or creating zombie processes.
-- `update every`: Number of seconds used for eBPF to send data for Netdata.
-- `pid table size`: Defines the maximum number of PIDs stored inside the application hash table.
#### Integration with `apps.plugin`
@@ -115,11 +113,6 @@ If you want to enable `apps.plugin` integration, change the "apps" setting to "y
apps = yes
```
-When the integration is enabled, eBPF collector allocates memory for each process running. The total allocated memory
-has direct relationship with the kernel version. When the eBPF plugin is running on kernels newer than `4.15`, it uses
-per-cpu maps to speed up the update of hash tables. This also implies storing data for the same PID for each processor
-it runs.
-
#### Integration with `cgroups.plugin`
The eBPF collector also creates charts for each cgroup through an integration with the
@@ -138,6 +131,13 @@ If you do not need to monitor specific metrics for your `cgroups`, you can enabl
`ebpf.d.conf`, and then disable the plugin for a specific `thread` by following the steps in the
[Configuration](#configuring-ebpfplugin) section.
+#### Maps per Core
+
+When netdata is running on kernels newer than `4.6` users are allowed to modify how the `ebpf.plugin` creates maps (hash or
+array). When `maps per core` is defined as `yes`, plugin will create a map per core on host, on the other hand,
+when the value is set as `no` only one hash table will be created, this option will use less memory, but it also can
+increase overhead for processes.
+
#### Collect PID
When one of the previous integrations is enabled, `ebpf.plugin` will use Process Identifier (`PID`) to identify the
@@ -157,6 +157,16 @@ The threads that have integration with other collectors have an internal clean u
will only enable these threads integrated with other collectors when the kernel is compiled with
`CONFIG_DEBUG_INFO_BTF`, unless you enable them manually.
+#### Collection period
+
+The plugin uses the option `update every` to define the number of seconds used for eBPF to send data for Netdata. The default value
+is 5 seconds.
+
+#### PID table size
+
+The option `pid table size` defines the maximum number of PIDs stored inside the application hash table. The default value
+is defined according [kernel](https://elixir.bootlin.com/linux/v6.0.19/source/include/linux/threads.h#L28) source code.
+
#### Integration Dashboard Elements
When an integration is enabled, your dashboard will also show the following cgroups and apps charts using low-level
@@ -880,14 +890,24 @@ These are tracepoints related to [OOM](https://en.wikipedia.org/wiki/Out_of_memo
eBPF monitoring is complex and produces a large volume of metrics. We've discovered scenarios where the eBPF plugin
significantly increases kernel memory usage by several hundred MB.
-If your node is experiencing high memory usage and there is no obvious culprit to be found in the `apps.mem` chart,
-consider testing for high kernel memory usage by [disabling eBPF monitoring](#configuring-ebpfplugin). Next,
-[restart Netdata](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) with `sudo systemctl restart netdata` to see if system memory
-usage (see the `system.ram` chart) has dropped significantly.
+When the integration with apps or cgroup is enabled, the eBPF collector allocates memory for each process running. If your
+node is experiencing high memory usage and there is no obvious culprit to be found in the `apps.mem` chart, consider:
+
+- Modify [maps per core](#maps-per-core) to use only one map.
+- Disable [integration with apps](#integration-with-appsplugin).
+- Disable [integration with cgroup](#integration-with-cgroupsplugin).
-Beginning with `v1.31`, kernel memory usage is configurable via the [`pid table size` setting](#ebpf-load-mode)
+If with these changes you still suspect eBPF using too much memory, and there is no obvious culprit to be found
+in the `apps.mem` chart, consider testing for high kernel memory usage by [disabling eBPF monitoring](#configuring-ebpfplugin).
+Next, [restart Netdata](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) with
+`sudo systemctl restart netdata` to see if system memory usage (see the `system.ram` chart) has dropped significantly.
+
+Beginning with `v1.31`, kernel memory usage is configurable via the [`pid table size` setting](#pid-table-size)
in `ebpf.conf`.
+The total memory usage is a well known [issue](https://lore.kernel.org/all/167821082315.1693.6957546778534183486.git-patchwork-notify@kernel.org/)
+for eBPF, this is not a bug present in plugin.
+
### SELinux
When [SELinux](https://www.redhat.com/en/topics/linux/what-is-selinux) is enabled, it may prevent `ebpf.plugin` from
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
index c0764c600..45303574f 100644
--- a/collectors/ebpf.plugin/ebpf.c
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -6,6 +6,7 @@
#include "ebpf.h"
#include "ebpf_socket.h"
+#include "ebpf_unittest.h"
#include "libnetdata/required_dummies.h"
/*****************************************************************
@@ -54,7 +55,8 @@ ebpf_module_t ebpf_modules[] = {
.config_file = NETDATA_PROCESS_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10 |
NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
{ .thread_name = "socket", .config_name = "socket", .enabled = 0, .start_routine = ebpf_socket_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
@@ -62,7 +64,8 @@ ebpf_module_t ebpf_modules[] = {
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &socket_config,
.config_file = NETDATA_NETWORK_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = socket_targets, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = socket_targets, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
{ .thread_name = "cachestat", .config_name = "cachestat", .enabled = 0, .start_routine = ebpf_cachestat_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
@@ -71,7 +74,8 @@ ebpf_module_t ebpf_modules[] = {
.config_file = NETDATA_CACHESTAT_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18|
NETDATA_V5_4 | NETDATA_V5_14 | NETDATA_V5_15 | NETDATA_V5_16,
- .load = EBPF_LOAD_LEGACY, .targets = cachestat_targets, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = cachestat_targets, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
{ .thread_name = "sync", .config_name = "sync", .enabled = 0, .start_routine = ebpf_sync_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
@@ -79,7 +83,8 @@ ebpf_module_t ebpf_modules[] = {
.config_file = NETDATA_SYNC_CONFIG_FILE,
// All syscalls have the same kernels
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = sync_targets, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = sync_targets, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
{ .thread_name = "dc", .config_name = "dc", .enabled = 0, .start_routine = ebpf_dcstat_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
@@ -87,7 +92,8 @@ ebpf_module_t ebpf_modules[] = {
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &dcstat_config,
.config_file = NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = dc_targets, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = dc_targets, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
{ .thread_name = "swap", .config_name = "swap", .enabled = 0, .start_routine = ebpf_swap_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
@@ -95,7 +101,8 @@ ebpf_module_t ebpf_modules[] = {
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &swap_config,
.config_file = NETDATA_DIRECTORY_SWAP_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = swap_targets, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = swap_targets, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
{ .thread_name = "vfs", .config_name = "vfs", .enabled = 0, .start_routine = ebpf_vfs_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
@@ -103,28 +110,32 @@ ebpf_module_t ebpf_modules[] = {
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &vfs_config,
.config_file = NETDATA_DIRECTORY_VFS_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = vfs_targets, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = vfs_targets, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
{ .thread_name = "filesystem", .config_name = "filesystem", .enabled = 0, .start_routine = ebpf_filesystem_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
.apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fs_config,
.config_file = NETDATA_FILESYSTEM_CONFIG_FILE,
//We are setting kernels as zero, because we load eBPF programs according the kernel running.
- .kernels = 0, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL },
+ .kernels = 0, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES },
{ .thread_name = "disk", .config_name = "disk", .enabled = 0, .start_routine = ebpf_disk_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
.apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &disk_config,
.config_file = NETDATA_DISK_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
{ .thread_name = "mount", .config_name = "mount", .enabled = 0, .start_routine = ebpf_mount_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
.apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mount_config,
.config_file = NETDATA_MOUNT_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = mount_targets, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = mount_targets, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
{ .thread_name = "fd", .config_name = "fd", .enabled = 0, .start_routine = ebpf_fd_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
@@ -133,21 +144,24 @@ ebpf_module_t ebpf_modules[] = {
.config_file = NETDATA_FD_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_11 |
NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = fd_targets, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = fd_targets, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
{ .thread_name = "hardirq", .config_name = "hardirq", .enabled = 0, .start_routine = ebpf_hardirq_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
.apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &hardirq_config,
.config_file = NETDATA_HARDIRQ_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
{ .thread_name = "softirq", .config_name = "softirq", .enabled = 0, .start_routine = ebpf_softirq_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
.apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &softirq_config,
.config_file = NETDATA_SOFTIRQ_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
{ .thread_name = "oomkill", .config_name = "oomkill", .enabled = 0, .start_routine = ebpf_oomkill_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
@@ -155,7 +169,8 @@ ebpf_module_t ebpf_modules[] = {
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &oomkill_config,
.config_file = NETDATA_OOMKILL_CONFIG_FILE,
.kernels = NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
{ .thread_name = "shm", .config_name = "shm", .enabled = 0, .start_routine = ebpf_shm_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
@@ -163,19 +178,21 @@ ebpf_module_t ebpf_modules[] = {
.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &shm_config,
.config_file = NETDATA_DIRECTORY_SHM_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = shm_targets, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = shm_targets, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
{ .thread_name = "mdflush", .config_name = "mdflush", .enabled = 0, .start_routine = ebpf_mdflush_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
.apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mdflush_config,
.config_file = NETDATA_DIRECTORY_MDFLUSH_CONFIG_FILE,
.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
+ .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
{ .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_every = EBPF_DEFAULT_UPDATE_EVERY,
.global_charts = 0, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_NOT_SET,
.cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
.pid_map_size = 0, .names = NULL, .cfg = NULL, .config_name = NULL, .kernels = 0, .load = EBPF_LOAD_LEGACY,
- .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL},
+ .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
};
struct netdata_static_thread ebpf_threads[] = {
@@ -360,7 +377,8 @@ ebpf_filesystem_partitions_t localfs[] =
.flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
.enabled = CONFIG_BOOLEAN_YES,
.addresses = {.function = NULL, .addr = 0},
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4},
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .fs_maps = NULL},
{.filesystem = "xfs",
.optional_filesystem = NULL,
.family = "xfs",
@@ -369,7 +387,8 @@ ebpf_filesystem_partitions_t localfs[] =
.flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
.enabled = CONFIG_BOOLEAN_YES,
.addresses = {.function = NULL, .addr = 0},
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4},
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .fs_maps = NULL},
{.filesystem = "nfs",
.optional_filesystem = "nfs4",
.family = "nfs",
@@ -378,7 +397,8 @@ ebpf_filesystem_partitions_t localfs[] =
.flags = NETDATA_FILESYSTEM_ATTR_CHARTS,
.enabled = CONFIG_BOOLEAN_YES,
.addresses = {.function = NULL, .addr = 0},
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4},
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .fs_maps = NULL},
{.filesystem = "zfs",
.optional_filesystem = NULL,
.family = "zfs",
@@ -387,7 +407,8 @@ ebpf_filesystem_partitions_t localfs[] =
.flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
.enabled = CONFIG_BOOLEAN_YES,
.addresses = {.function = NULL, .addr = 0},
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4},
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
+ .fs_maps = NULL},
{.filesystem = "btrfs",
.optional_filesystem = NULL,
.family = "btrfs",
@@ -396,7 +417,8 @@ ebpf_filesystem_partitions_t localfs[] =
.flags = NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE,
.enabled = CONFIG_BOOLEAN_YES,
.addresses = {.function = "btrfs_file_operations", .addr = 0},
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10},
+ .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10,
+ .fs_maps = NULL},
{.filesystem = NULL,
.optional_filesystem = NULL,
.family = NULL,
@@ -405,43 +427,50 @@ ebpf_filesystem_partitions_t localfs[] =
.flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
.enabled = CONFIG_BOOLEAN_YES,
.addresses = {.function = NULL, .addr = 0},
- .kernels = 0}};
+ .kernels = 0, .fs_maps = NULL}};
ebpf_sync_syscalls_t local_syscalls[] = {
{.syscall = NETDATA_SYSCALLS_SYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL
+ .sync_obj = NULL,
#endif
+ .sync_maps = NULL
},
{.syscall = NETDATA_SYSCALLS_SYNCFS, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL
+ .sync_obj = NULL,
#endif
+ .sync_maps = NULL
},
{.syscall = NETDATA_SYSCALLS_MSYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL
+ .sync_obj = NULL,
#endif
+ .sync_maps = NULL
},
{.syscall = NETDATA_SYSCALLS_FSYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL
+ .sync_obj = NULL,
#endif
+ .sync_maps = NULL
},
{.syscall = NETDATA_SYSCALLS_FDATASYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL
+ .sync_obj = NULL,
#endif
+ .sync_maps = NULL
},
{.syscall = NETDATA_SYSCALLS_SYNC_FILE_RANGE, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL
+ .sync_obj = NULL,
#endif
+ .sync_maps = NULL
},
{.syscall = NULL, .enabled = CONFIG_BOOLEAN_NO, .objects = NULL, .probe_links = NULL,
#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL
+ .sync_obj = NULL,
#endif
+ .sync_maps = NULL
}
};
@@ -550,7 +579,7 @@ static void ebpf_exit()
* @param objects objects loaded from eBPF programs
* @param probe_links links from loader
*/
-static void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link **probe_links)
+void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link **probe_links)
{
if (!probe_links || !objects)
return;
@@ -1738,6 +1767,21 @@ static inline void epbf_update_load_mode(char *str, netdata_ebpf_load_mode_t ori
}
/**
+ * Update Map per core
+ *
+ * Define the map type used with some hash tables.
+ */
+static void ebpf_update_map_per_core()
+{
+ int i;
+ int value = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION,
+ EBPF_CFG_MAPS_PER_CORE, CONFIG_BOOLEAN_YES);
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ ebpf_modules[i].maps_per_core = value;
+ }
+}
+
+/**
* Read collector values
*
* @param disable_apps variable to store information related to apps.
@@ -1790,6 +1834,8 @@ static void read_collector_values(int *disable_apps, int *disable_cgroups,
enabled = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_CGROUP, CONFIG_BOOLEAN_NO);
*disable_cgroups = (enabled == CONFIG_BOOLEAN_NO)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_NO;
+ ebpf_update_map_per_core();
+
// Read ebpf programs section
enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION,
ebpf_modules[EBPF_MODULE_PROCESS_IDX].config_name, CONFIG_BOOLEAN_YES);
@@ -2015,6 +2061,48 @@ static inline void ebpf_load_thread_config()
}
/**
+ * Check Conditions
+ *
+ * This function checks kernel that plugin is running and permissions.
+ *
+ * @return It returns 0 on success and -1 otherwise
+ */
+int ebpf_check_conditions()
+{
+ if (!has_condition_to_run(running_on_kernel)) {
+ error("The current collector cannot run on this kernel.");
+ return -1;
+ }
+
+ if (!am_i_running_as_root()) {
+ error(
+ "ebpf.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities..",
+ (unsigned int)getuid(), (unsigned int)geteuid());
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * Adjust memory
+ *
+ * Adjust memory values to load eBPF programs.
+ *
+ * @return It returns 0 on success and -1 otherwise
+ */
+int ebpf_adjust_memory_limit()
+{
+ struct rlimit r = { RLIM_INFINITY, RLIM_INFINITY };
+ if (setrlimit(RLIMIT_MEMLOCK, &r)) {
+ error("Setrlimit(RLIMIT_MEMLOCK)");
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
* Parse arguments given from user.
*
* @param argc the number of arguments
@@ -2052,6 +2140,7 @@ static void ebpf_parse_args(int argc, char **argv)
{"return", no_argument, 0, 0 },
{"legacy", no_argument, 0, 0 },
{"core", no_argument, 0, 0 },
+ {"unittest", no_argument, 0, 0 },
{0, 0, 0, 0}
};
@@ -2242,6 +2331,33 @@ static void ebpf_parse_args(int argc, char **argv)
#endif
break;
}
+ case EBPF_OPTION_UNITTEST: {
+ // if we cannot run until the end, we will cancel the unittest
+ int exit_code = ECANCELED;
+ if (ebpf_check_conditions())
+ goto unittest;
+
+ if (ebpf_adjust_memory_limit())
+ goto unittest;
+
+ // Load binary in entry mode
+ ebpf_ut_initialize_structure(MODE_ENTRY);
+ if (ebpf_ut_load_real_binary())
+ goto unittest;
+
+ ebpf_ut_cleanup_memory();
+
+ // Do not load a binary in entry mode
+ ebpf_ut_initialize_structure(MODE_ENTRY);
+ if (ebpf_ut_load_fake_binary())
+ goto unittest;
+
+ ebpf_ut_cleanup_memory();
+
+ exit_code = 0;
+unittest:
+ exit(exit_code);
+ }
default: {
break;
}
@@ -2460,17 +2576,8 @@ int main(int argc, char **argv)
ebpf_parse_args(argc, argv);
ebpf_manage_pid(getpid());
- if (!has_condition_to_run(running_on_kernel)) {
- error("The current collector cannot run on this kernel.");
+ if (ebpf_check_conditions())
return 2;
- }
-
- if (!am_i_running_as_root()) {
- error(
- "ebpf.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities..",
- (unsigned int)getuid(), (unsigned int)geteuid());
- return 3;
- }
// set name
program_name = "ebpf.plugin";
@@ -2482,11 +2589,8 @@ int main(int argc, char **argv)
error_log_errors_per_period = 100;
error_log_throttle_period = 3600;
- struct rlimit r = { RLIM_INFINITY, RLIM_INFINITY };
- if (setrlimit(RLIMIT_MEMLOCK, &r)) {
- error("Setrlimit(RLIMIT_MEMLOCK)");
- return 4;
- }
+ if (ebpf_adjust_memory_limit())
+ return 3;
signal(SIGINT, ebpf_stop_threads);
signal(SIGQUIT, ebpf_stop_threads);
@@ -2540,6 +2644,7 @@ int main(int argc, char **argv)
heartbeat_init(&hb);
int update_apps_every = (int) EBPF_CFG_UPDATE_APPS_EVERY_DEFAULT;
int update_apps_list = update_apps_every - 1;
+ int process_maps_per_core = ebpf_modules[EBPF_MODULE_PROCESS_IDX].maps_per_core;
//Plugin will be killed when it receives a signal
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, step);
@@ -2550,7 +2655,7 @@ int main(int argc, char **argv)
if (++update_apps_list == update_apps_every) {
update_apps_list = 0;
cleanup_exited_pids();
- collect_data_for_all_processes(process_pid_fd);
+ collect_data_for_all_processes(process_pid_fd, process_maps_per_core);
pthread_mutex_lock(&lock);
ebpf_create_apps_charts(apps_groups_root_target);
@@ -2565,3 +2670,4 @@ int main(int argc, char **argv)
return 0;
}
+
diff --git a/collectors/ebpf.plugin/ebpf.d.conf b/collectors/ebpf.plugin/ebpf.d.conf
index 6a5ec5c39..8807f9a3a 100644
--- a/collectors/ebpf.plugin/ebpf.d.conf
+++ b/collectors/ebpf.plugin/ebpf.d.conf
@@ -15,6 +15,10 @@
#
# The `pid table size` defines the maximum number of PIDs stored in the application hash tables.
#
+# The `btf path` specifies where to find the BTF files.
+#
+# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.15.
+#
[global]
ebpf load mode = entry
apps = no
@@ -22,6 +26,7 @@
update every = 5
pid table size = 32768
btf path = /sys/kernel/btf/
+ maps per core = yes
#
# eBPF Programs
@@ -63,7 +68,7 @@
shm = yes
socket = no
softirq = yes
- sync = yes
+ sync = no
swap = yes
vfs = no
network connections = no
diff --git a/collectors/ebpf.plugin/ebpf.d/cachestat.conf b/collectors/ebpf.plugin/ebpf.d/cachestat.conf
index 52466be51..82f870c98 100644
--- a/collectors/ebpf.plugin/ebpf.d/cachestat.conf
+++ b/collectors/ebpf.plugin/ebpf.d/cachestat.conf
@@ -24,6 +24,8 @@
# `parent` : Only stores parent PID.
# `all` : Stores all PIDs used by software. This is the most expensive option.
#
+# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
+#
# Uncomment lines to define specific options for thread.
[global]
# ebpf load mode = entry
@@ -34,3 +36,4 @@
ebpf type format = auto
ebpf co-re tracing = trampoline
collect pid = real parent
+# maps per core = yes
diff --git a/collectors/ebpf.plugin/ebpf.d/dcstat.conf b/collectors/ebpf.plugin/ebpf.d/dcstat.conf
index 8aed8f783..f741b62a8 100644
--- a/collectors/ebpf.plugin/ebpf.d/dcstat.conf
+++ b/collectors/ebpf.plugin/ebpf.d/dcstat.conf
@@ -22,6 +22,8 @@
# `parent` : Only stores parent PID.
# `all` : Stores all PIDs used by software. This is the most expensive option.
#
+# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
+#
# Uncomment lines to define specific options for thread.
[global]
# ebpf load mode = entry
@@ -32,3 +34,4 @@
ebpf type format = auto
ebpf co-re tracing = trampoline
collect pid = real parent
+# maps per core = yes
diff --git a/collectors/ebpf.plugin/ebpf.d/fd.conf b/collectors/ebpf.plugin/ebpf.d/fd.conf
index 8333520fc..30a5fcfd9 100644
--- a/collectors/ebpf.plugin/ebpf.d/fd.conf
+++ b/collectors/ebpf.plugin/ebpf.d/fd.conf
@@ -10,6 +10,8 @@
#
# The `pid table size` defines the maximum number of PIDs stored inside the hash table.
#
+# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
+#
# Uncomment lines to define specific options for thread.
[global]
# ebpf load mode = entry
@@ -19,3 +21,4 @@
# pid table size = 32768
ebpf type format = auto
ebpf co-re tracing = trampoline
+# maps per core = yes
diff --git a/collectors/ebpf.plugin/ebpf.d/network.conf b/collectors/ebpf.plugin/ebpf.d/network.conf
index d939d8e1f..75644a772 100644
--- a/collectors/ebpf.plugin/ebpf.d/network.conf
+++ b/collectors/ebpf.plugin/ebpf.d/network.conf
@@ -24,6 +24,9 @@
# `tracepoint`: When available, the eBPF collector will use kernel tracepoint to monitor syscall.
# `probe` : This is the same as legacy code.
#
+# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
+#
+# Uncomment lines to define specific options for thread.
[global]
# ebpf load mode = entry
# apps = yes
@@ -35,6 +38,7 @@
udp connection table size = 4096
ebpf type format = auto
ebpf co-re tracing = trampoline
+ maps per core = no
#
# Network Connection
diff --git a/collectors/ebpf.plugin/ebpf.d/process.conf b/collectors/ebpf.plugin/ebpf.d/process.conf
index 1da5f84d3..f5e8804cd 100644
--- a/collectors/ebpf.plugin/ebpf.d/process.conf
+++ b/collectors/ebpf.plugin/ebpf.d/process.conf
@@ -15,11 +15,14 @@
# `parent` : Only stores parent PID.
# `all` : Stores all PIDs used by software. This is the most expensive option.
#
+# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
+#
# Uncomment lines to define specific options for thread.
-#[global]
+[global]
# ebpf load mode = entry
# apps = yes
# cgroups = no
# update every = 10
# pid table size = 32768
-# collect pid = real parent
+ collect pid = real parent
+# maps per core = yes
diff --git a/collectors/ebpf.plugin/ebpf.d/shm.conf b/collectors/ebpf.plugin/ebpf.d/shm.conf
index 23ab96da4..f8ec1a18f 100644
--- a/collectors/ebpf.plugin/ebpf.d/shm.conf
+++ b/collectors/ebpf.plugin/ebpf.d/shm.conf
@@ -18,6 +18,8 @@
# `tracepoint`: When available, the eBPF collector will use kernel tracepoint to monitor syscall.
# `probe` : This is the same as legacy code.
#
+# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
+#
# Uncomment lines to define specific options for thread.
[global]
# ebpf load mode = entry
@@ -27,6 +29,7 @@
# pid table size = 32768
ebpf type format = auto
ebpf co-re tracing = trampoline
+# maps per core = yes
# List of monitored syscalls
[syscalls]
diff --git a/collectors/ebpf.plugin/ebpf.d/swap.conf b/collectors/ebpf.plugin/ebpf.d/swap.conf
index 3986ae4f8..5bad04424 100644
--- a/collectors/ebpf.plugin/ebpf.d/swap.conf
+++ b/collectors/ebpf.plugin/ebpf.d/swap.conf
@@ -17,6 +17,8 @@
# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
# `probe` : This is the same as legacy code.
#
+# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
+#
# Uncomment lines to define specific options for thread.
[global]
# ebpf load mode = entry
@@ -26,3 +28,4 @@
# pid table size = 32768
ebpf type format = auto
ebpf co-re tracing = trampoline
+# maps per core = yes
diff --git a/collectors/ebpf.plugin/ebpf.d/sync.conf b/collectors/ebpf.plugin/ebpf.d/sync.conf
index ebec5d38e..fefbd4ee6 100644
--- a/collectors/ebpf.plugin/ebpf.d/sync.conf
+++ b/collectors/ebpf.plugin/ebpf.d/sync.conf
@@ -17,7 +17,10 @@
# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
# `tracepoint`: When available, the eBPF collector will use kernel tracepoint to monitor syscall.
# `probe` : This is the same as legacy code.
+#
+# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
#
+# Uncomment lines to define specific options for thread.
[global]
# ebpf load mode = entry
# apps = yes
@@ -25,6 +28,7 @@
# update every = 10
ebpf type format = auto
ebpf co-re tracing = trampoline
+# maps per core = yes
# List of monitored syscalls
[syscalls]
diff --git a/collectors/ebpf.plugin/ebpf.d/vfs.conf b/collectors/ebpf.plugin/ebpf.d/vfs.conf
index fa5d5b4e9..b4e5daac0 100644
--- a/collectors/ebpf.plugin/ebpf.d/vfs.conf
+++ b/collectors/ebpf.plugin/ebpf.d/vfs.conf
@@ -8,6 +8,18 @@
# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
# the setting `apps` and `cgroups` to 'no'.
#
+# The `ebpf type format` option accepts the following values :
+# `auto` : The eBPF collector will investigate hardware and select between the two next options.
+# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
+# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
+#
+# The `ebpf co-re tracing` option accepts the following values:
+# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
+# `tracepoint`: When available, the eBPF collector will use kernel tracepoint to monitor syscall.
+# `probe` : This is the same as legacy code.
+#
+# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
+#
# Uncomment lines to define specific options for thread.
[global]
# ebpf load mode = entry
@@ -17,3 +29,4 @@
# pid table size = 32768
ebpf type format = auto
ebpf co-re tracing = trampoline
+# maps per core = yes
diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h
index 5b48adc62..ae24c302c 100644
--- a/collectors/ebpf.plugin/ebpf.h
+++ b/collectors/ebpf.plugin/ebpf.h
@@ -119,7 +119,8 @@ enum ebpf_main_index {
EBPF_OPTION_GLOBAL_CHART,
EBPF_OPTION_RETURN_MODE,
EBPF_OPTION_LEGACY,
- EBPF_OPTION_CORE
+ EBPF_OPTION_CORE,
+ EBPF_OPTION_UNITTEST
};
typedef struct ebpf_tracepoint {
@@ -159,6 +160,7 @@ typedef struct ebpf_tracepoint {
#define NETDATA_EBPF_LOAD_METHOD "ebpf_load_methods"
#define NETDATA_EBPF_KERNEL_MEMORY "ebpf_kernel_memory"
#define NETDATA_EBPF_HASH_TABLES_LOADED "ebpf_hash_tables_count"
+#define NETDATA_EBPF_HASH_TABLES_PER_CORE "ebpf_hash_tables_per_core"
// Log file
#define NETDATA_DEVELOPER_LOG_FILE "developer.log"
@@ -307,6 +309,8 @@ void ebpf_write_chart_obsolete(char *type, char *id, char *title, char *units, c
void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end);
void ebpf_update_disabled_plugin_stats(ebpf_module_t *em);
ARAL *ebpf_allocate_pid_aral(char *name, size_t size);
+void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link **probe_links);
+
extern ebpf_filesystem_partitions_t localfs[];
extern ebpf_sync_syscalls_t local_syscalls[];
extern int ebpf_exit_plugin;
diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/collectors/ebpf.plugin/ebpf_apps.c
index d6db4c676..3826f8efc 100644
--- a/collectors/ebpf.plugin/ebpf_apps.c
+++ b/collectors/ebpf.plugin/ebpf_apps.c
@@ -1415,14 +1415,37 @@ static inline void aggregate_pid_on_target(struct ebpf_target *w, struct ebpf_pi
}
/**
+ * Process Accumulator
+ *
+ * Sum all values read from kernel and store in the first address.
+ *
+ * @param out the vector with read values.
+ * @param maps_per_core do I need to read all cores?
+ */
+void ebpf_process_apps_accumulator(ebpf_process_stat_t *out, int maps_per_core)
+{
+ int i, end = (maps_per_core) ? ebpf_nprocs : 1;
+ ebpf_process_stat_t *total = &out[0];
+ for (i = 1; i < end; i++) {
+ ebpf_process_stat_t *w = &out[i];
+ total->exit_call += w->exit_call;
+ total->task_err += w->task_err;
+ total->create_thread += w->create_thread;
+ total->create_process += w->create_process;
+ total->release_call += w->release_call;
+ }
+}
+
+/**
* Collect data for all process
*
* Read data from hash table and store it in appropriate vectors.
* It also creates the link between targets and PIDs.
*
* @param tbl_pid_stats_fd The mapped file descriptor for the hash table.
+ * @param maps_per_core do I have hash maps per core?
*/
-void collect_data_for_all_processes(int tbl_pid_stats_fd)
+void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core)
{
if (unlikely(!ebpf_all_pids))
return;
@@ -1448,6 +1471,10 @@ void collect_data_for_all_processes(int tbl_pid_stats_fd)
uint32_t key;
pids = ebpf_root_of_pids; // global list of all processes running
// while (bpf_map_get_next_key(tbl_pid_stats_fd, &key, &next_key) == 0) {
+ size_t length = sizeof(ebpf_process_stat_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
+
while (pids) {
key = pids->pid;
ebpf_process_stat_t *w = global_process_stats[key];
@@ -1456,7 +1483,7 @@ void collect_data_for_all_processes(int tbl_pid_stats_fd)
global_process_stats[key] = w;
}
- if (bpf_map_lookup_elem(tbl_pid_stats_fd, &key, w)) {
+ if (bpf_map_lookup_elem(tbl_pid_stats_fd, &key, process_stat_vector)) {
// Clean Process structures
ebpf_process_stat_release(w);
global_process_stats[key] = NULL;
@@ -1467,6 +1494,12 @@ void collect_data_for_all_processes(int tbl_pid_stats_fd)
continue;
}
+ ebpf_process_apps_accumulator(process_stat_vector, maps_per_core);
+
+ memcpy(w, process_stat_vector, sizeof(ebpf_process_stat_t));
+
+ memset(process_stat_vector, 0, length);
+
pids = pids->next;
}
diff --git a/collectors/ebpf.plugin/ebpf_apps.h b/collectors/ebpf.plugin/ebpf_apps.h
index d33442af5..ad2e338d4 100644
--- a/collectors/ebpf.plugin/ebpf_apps.h
+++ b/collectors/ebpf.plugin/ebpf_apps.h
@@ -213,7 +213,8 @@ size_t read_processes_statistic_using_pid_on_target(ebpf_process_stat_t **ep,
size_t read_bandwidth_statistic_using_pid_on_target(ebpf_bandwidth_t **ep, int fd, struct ebpf_pid_on_target *pids);
-void collect_data_for_all_processes(int tbl_pid_stats_fd);
+void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core);
+void ebpf_process_apps_accumulator(ebpf_process_stat_t *out, int maps_per_core);
extern ebpf_process_stat_t **global_process_stats;
extern netdata_publish_cachestat_t **cachestat_pid;
@@ -235,6 +236,7 @@ extern void ebpf_aral_init(void);
extern ebpf_process_stat_t *ebpf_process_stat_get(void);
extern void ebpf_process_stat_release(ebpf_process_stat_t *stat);
+extern ebpf_process_stat_t *process_stat_vector;
extern ARAL *ebpf_aral_socket_pid;
void ebpf_socket_aral_init();
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c
index b2b006dd3..5bbbe1f43 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.c
+++ b/collectors/ebpf.plugin/ebpf_cachestat.c
@@ -14,19 +14,34 @@ static netdata_idx_t cachestat_hash_values[NETDATA_CACHESTAT_END];
static netdata_idx_t *cachestat_values = NULL;
ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_global", .internal_input = NETDATA_CACHESTAT_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "cstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "cstat_ctrl", .internal_input = NETDATA_CONTROLLER_END,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = "cstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
+ {.name = "cstat_ctrl", .internal_input = NETDATA_CONTROLLER_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+#endif
+ }};
struct config cachestat_config = { .first_section = NULL,
.last_section = NULL,
@@ -233,10 +248,14 @@ static int ebpf_cachestat_attach_probe(struct cachestat_bpf *obj)
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*/
-static void ebpf_cachestat_adjust_map_size(struct cachestat_bpf *obj, ebpf_module_t *em)
+static void ebpf_cachestat_adjust_map(struct cachestat_bpf *obj, ebpf_module_t *em)
{
ebpf_update_map_size(obj->maps.cstat_pid, &cachestat_maps[NETDATA_CACHESTAT_PID_STATS],
em, bpf_map__name(obj->maps.cstat_pid));
+
+ ebpf_update_map_type(obj->maps.cstat_global, &cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS]);
+ ebpf_update_map_type(obj->maps.cstat_pid, &cachestat_maps[NETDATA_CACHESTAT_PID_STATS]);
+ ebpf_update_map_type(obj->maps.cstat_ctrl, &cachestat_maps[NETDATA_CACHESTAT_CTRL]);
}
/**
@@ -291,7 +310,7 @@ static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf
ebpf_cachestat_disable_specific_probe(obj);
}
- ebpf_cachestat_adjust_map_size(obj, em);
+ ebpf_cachestat_adjust_map(obj, em);
if (!em->apps_charts && !em->cgroup_charts)
ebpf_cachestat_disable_release_task(obj);
@@ -445,10 +464,11 @@ static void calculate_stats(netdata_publish_cachestat_t *publish) {
* Sum all values read from kernel and store in the first address.
*
* @param out the vector with read values.
+ * @param maps_per_core do I need to read all cores?
*/
-static void cachestat_apps_accumulator(netdata_cachestat_pid_t *out)
+static void cachestat_apps_accumulator(netdata_cachestat_pid_t *out, int maps_per_core)
{
- int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ int i, end = (maps_per_core) ? ebpf_nprocs : 1;
netdata_cachestat_pid_t *total = &out[0];
for (i = 1; i < end; i++) {
netdata_cachestat_pid_t *w = &out[i];
@@ -504,14 +524,19 @@ static void cachestat_fill_pid(uint32_t current_pid, netdata_cachestat_pid_t *pu
* Read APPS table
*
* Read the apps table and store data inside the structure.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void read_apps_table()
+static void ebpf_read_cachestat_apps_table(int maps_per_core)
{
netdata_cachestat_pid_t *cv = cachestat_vector;
uint32_t key;
struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd;
- size_t length = sizeof(netdata_cachestat_pid_t)*ebpf_nprocs;
+ size_t length = sizeof(netdata_cachestat_pid_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
+
while (pids) {
key = pids->pid;
@@ -520,7 +545,7 @@ static void read_apps_table()
continue;
}
- cachestat_apps_accumulator(cv);
+ cachestat_apps_accumulator(cv, maps_per_core);
cachestat_fill_pid(key, cv);
@@ -535,12 +560,16 @@ static void read_apps_table()
* Update cgroup
*
* Update cgroup data based in
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_cachestat_cgroup()
+static void ebpf_update_cachestat_cgroup(int maps_per_core)
{
netdata_cachestat_pid_t *cv = cachestat_vector;
int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd;
- size_t length = sizeof(netdata_cachestat_pid_t) * ebpf_nprocs;
+ size_t length = sizeof(netdata_cachestat_pid_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
ebpf_cgroup_target_t *ect;
pthread_mutex_lock(&mutex_cgroup_shm);
@@ -559,7 +588,7 @@ static void ebpf_update_cachestat_cgroup()
continue;
}
- cachestat_apps_accumulator(cv);
+ cachestat_apps_accumulator(cv, maps_per_core);
memcpy(out, cv, sizeof(netdata_cachestat_pid_t));
}
@@ -627,8 +656,10 @@ void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *ptr)
* Read global counter
*
* Read the table with number of calls for all functions
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_cachestat_read_global_table()
+static void ebpf_cachestat_read_global_table(int maps_per_core)
{
uint32_t idx;
netdata_idx_t *val = cachestat_hash_values;
@@ -638,7 +669,7 @@ static void ebpf_cachestat_read_global_table()
for (idx = NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU; idx < NETDATA_CACHESTAT_END; idx++) {
if (!bpf_map_lookup_elem(fd, &idx, stored)) {
int i;
- int end = ebpf_nprocs;
+ int end = (maps_per_core) ? ebpf_nprocs: 1;
netdata_idx_t total = 0;
for (i = 0; i < end; i++)
total += stored[i];
@@ -1053,6 +1084,7 @@ static void cachestat_collector(ebpf_module_t *em)
memset(&publish, 0, sizeof(publish));
int cgroups = em->cgroup_charts;
int update_every = em->update_every;
+ int maps_per_core = em->maps_per_core;
heartbeat_t hb;
heartbeat_init(&hb);
int counter = update_every - 1;
@@ -1065,13 +1097,13 @@ static void cachestat_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_cachestat_read_global_table();
+ ebpf_cachestat_read_global_table(maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
- read_apps_table();
+ ebpf_read_cachestat_apps_table(maps_per_core);
if (cgroups)
- ebpf_update_cachestat_cgroup();
+ ebpf_update_cachestat_cgroup(maps_per_core);
pthread_mutex_lock(&lock);
@@ -1216,6 +1248,10 @@ static int ebpf_cachestat_set_internal_value()
*/
static int ebpf_cachestat_load_bpf(ebpf_module_t *em)
{
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(cachestat_maps, em->maps_per_core, running_on_kernel);
+#endif
+
int ret = 0;
ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU].mode);
if (em->load & EBPF_LOAD_LEGACY) {
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c
index 5f1400601..5a07e4619 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.c
+++ b/collectors/ebpf.plugin/ebpf_dcstat.c
@@ -19,19 +19,35 @@ struct config dcstat_config = { .first_section = NULL,
.rwlock = AVL_LOCK_INITIALIZER } };
ebpf_local_maps_t dcstat_maps[] = {{.name = "dcstat_global", .internal_input = NETDATA_DIRECTORY_CACHE_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "dcstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "dcstat_ctrl", .internal_input = NETDATA_CONTROLLER_END,
- .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = "dcstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
+ {.name = "dcstat_ctrl", .internal_input = NETDATA_CONTROLLER_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ }};
static ebpf_specify_name_t dc_optional_name[] = { {.program_name = "netdata_lookup_fast",
.function_to_attach = "lookup_fast",
@@ -138,10 +154,14 @@ static int ebpf_dc_attach_probes(struct dc_bpf *obj)
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*/
-static void ebpf_dc_adjust_map_size(struct dc_bpf *obj, ebpf_module_t *em)
+static void ebpf_dc_adjust_map(struct dc_bpf *obj, ebpf_module_t *em)
{
ebpf_update_map_size(obj->maps.dcstat_pid, &dcstat_maps[NETDATA_DCSTAT_PID_STATS],
em, bpf_map__name(obj->maps.dcstat_pid));
+
+ ebpf_update_map_type(obj->maps.dcstat_global, &dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS]);
+ ebpf_update_map_type(obj->maps.dcstat_pid, &dcstat_maps[NETDATA_DCSTAT_PID_STATS]);
+ ebpf_update_map_type(obj->maps.dcstat_ctrl, &dcstat_maps[NETDATA_DCSTAT_CTRL]);
}
/**
@@ -215,7 +235,7 @@ static inline int ebpf_dc_load_and_attach(struct dc_bpf *obj, ebpf_module_t *em)
ebpf_dc_disable_trampoline(obj);
}
- ebpf_dc_adjust_map_size(obj, em);
+ ebpf_dc_adjust_map(obj, em);
if (!em->apps_charts && !em->cgroup_charts)
ebpf_dc_disable_release_task(obj);
@@ -382,10 +402,11 @@ void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr)
* Sum all values read from kernel and store in the first address.
*
* @param out the vector with read values.
+ * @param maps_per_core do I need to read all cores?
*/
-static void dcstat_apps_accumulator(netdata_dcstat_pid_t *out)
+static void dcstat_apps_accumulator(netdata_dcstat_pid_t *out, int maps_per_core)
{
- int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ int i, end = (maps_per_core) ? ebpf_nprocs : 1;
netdata_dcstat_pid_t *total = &out[0];
for (i = 1; i < end; i++) {
netdata_dcstat_pid_t *w = &out[i];
@@ -428,17 +449,22 @@ static void dcstat_fill_pid(uint32_t current_pid, netdata_dcstat_pid_t *publish)
}
/**
- * Read APPS table
+ * Read Directory Cache APPS table
*
* Read the apps table and store data inside the structure.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void read_apps_table()
+static void read_dc_apps_table(int maps_per_core)
{
netdata_dcstat_pid_t *cv = dcstat_vector;
uint32_t key;
struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd;
- size_t length = sizeof(netdata_dcstat_pid_t)*ebpf_nprocs;
+ size_t length = sizeof(netdata_dcstat_pid_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
+
while (pids) {
key = pids->pid;
@@ -447,7 +473,7 @@ static void read_apps_table()
continue;
}
- dcstat_apps_accumulator(cv);
+ dcstat_apps_accumulator(cv, maps_per_core);
dcstat_fill_pid(key, cv);
@@ -461,9 +487,11 @@ static void read_apps_table()
/**
* Update cgroup
*
- * Update cgroup data based in
+ * Update cgroup data based in collected PID.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_dc_cgroup()
+static void ebpf_update_dc_cgroup(int maps_per_core)
{
netdata_dcstat_pid_t *cv = dcstat_vector;
int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd;
@@ -486,7 +514,7 @@ static void ebpf_update_dc_cgroup()
continue;
}
- dcstat_apps_accumulator(cv);
+ dcstat_apps_accumulator(cv, maps_per_core);
memcpy(out, cv, sizeof(netdata_dcstat_pid_t));
}
@@ -499,8 +527,10 @@ static void ebpf_update_dc_cgroup()
* Read global table
*
* Read the table with number of calls for all functions
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_dc_read_global_table()
+static void ebpf_dc_read_global_table(int maps_per_core)
{
uint32_t idx;
netdata_idx_t *val = dcstat_hash_values;
@@ -510,7 +540,7 @@ static void ebpf_dc_read_global_table()
for (idx = NETDATA_KEY_DC_REFERENCE; idx < NETDATA_DIRECTORY_CACHE_END; idx++) {
if (!bpf_map_lookup_elem(fd, &idx, stored)) {
int i;
- int end = ebpf_nprocs;
+ int end = (maps_per_core) ? ebpf_nprocs: 1;
netdata_idx_t total = 0;
for (i = 0; i < end; i++)
total += stored[i];
@@ -974,6 +1004,7 @@ static void dcstat_collector(ebpf_module_t *em)
heartbeat_t hb;
heartbeat_init(&hb);
int counter = update_every - 1;
+ int maps_per_core = em->maps_per_core;
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
@@ -982,13 +1013,13 @@ static void dcstat_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_dc_read_global_table();
+ ebpf_dc_read_global_table(maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
- read_apps_table();
+ read_dc_apps_table(maps_per_core);
if (cgroups)
- ebpf_update_dc_cgroup();
+ ebpf_update_dc_cgroup(maps_per_core);
pthread_mutex_lock(&lock);
@@ -1084,6 +1115,10 @@ static void ebpf_dcstat_allocate_global_vectors(int apps)
*/
static int ebpf_dcstat_load_bpf(ebpf_module_t *em)
{
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(dcstat_maps, em->maps_per_core, running_on_kernel);
+#endif
+
int ret = 0;
ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_DC_TARGET_LOOKUP_FAST].mode);
if (em->load & EBPF_LOAD_LEGACY) {
diff --git a/collectors/ebpf.plugin/ebpf_disk.c b/collectors/ebpf.plugin/ebpf_disk.c
index e1a579441..71c972777 100644
--- a/collectors/ebpf.plugin/ebpf_disk.c
+++ b/collectors/ebpf.plugin/ebpf_disk.c
@@ -14,10 +14,25 @@ struct config disk_config = { .first_section = NULL,
static ebpf_local_maps_t disk_maps[] = {{.name = "tbl_disk_iocall", .internal_input = NETDATA_DISK_HISTOGRAM_LENGTH,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
+ {.name = "tmp_disk_tp_stat", .internal_input = 8192, .user_input = 8192,
+ .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
{.name = NULL, .internal_input = 0, .user_input = 0,
.type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ }};
static avl_tree_lock disk_tree;
netdata_ebpf_disks_t *disk_list = NULL;
@@ -503,11 +518,12 @@ static void ebpf_fill_plot_disks(netdata_ebpf_disks_t *ptr)
/**
* Read hard disk table
*
- * @param table file descriptor for table
- *
* Read the table with number of calls for all functions
+ *
+ * @param table file descriptor for table
+ * @param maps_per_core do I need to read all cores?
*/
-static void read_hard_disk_tables(int table)
+static void read_hard_disk_tables(int table, int maps_per_core)
{
netdata_idx_t *values = disk_hash_values;
block_key_t key = {};
@@ -548,7 +564,7 @@ static void read_hard_disk_tables(int table)
uint64_t total = 0;
int i;
- int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+ int end = (maps_per_core) ? 1 : ebpf_nprocs;
for (i = 0; i < end; i++) {
total += values[i];
}
@@ -690,6 +706,7 @@ static void disk_collector(ebpf_module_t *em)
heartbeat_t hb;
heartbeat_init(&hb);
int counter = update_every - 1;
+ int maps_per_core = em->maps_per_core;
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
@@ -697,7 +714,7 @@ static void disk_collector(ebpf_module_t *em)
continue;
counter = 0;
- read_hard_disk_tables(disk_maps[NETDATA_DISK_READ].map_fd);
+ read_hard_disk_tables(disk_maps[NETDATA_DISK_READ].map_fd, maps_per_core);
pthread_mutex_lock(&lock);
ebpf_remove_pointer_from_plot_disk(em);
ebpf_latency_send_hd_data(update_every);
@@ -774,6 +791,9 @@ void *ebpf_disk_thread(void *ptr)
goto enddisk;
}
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(disk_maps, em->maps_per_core, running_on_kernel);
+#endif
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
goto enddisk;
diff --git a/collectors/ebpf.plugin/ebpf_disk.h b/collectors/ebpf.plugin/ebpf_disk.h
index c606d6594..69c705875 100644
--- a/collectors/ebpf.plugin/ebpf_disk.h
+++ b/collectors/ebpf.plugin/ebpf_disk.h
@@ -55,7 +55,8 @@ typedef struct netdata_ebpf_disks {
} netdata_ebpf_disks_t;
enum ebpf_disk_tables {
- NETDATA_DISK_READ
+ NETDATA_DISK_READ,
+ NETDATA_DISK_TMP
};
typedef struct block_key {
diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c
index 96da91b0a..6d3868952 100644
--- a/collectors/ebpf.plugin/ebpf_fd.c
+++ b/collectors/ebpf.plugin/ebpf_fd.c
@@ -15,17 +15,33 @@ static netdata_publish_syscall_t fd_publish_aggregated[NETDATA_FD_SYSCALL_END];
static ebpf_local_maps_t fd_maps[] = {{.name = "tbl_fd_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
.user_input = 0,
.type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
{.name = "tbl_fd_global", .internal_input = NETDATA_KEY_END_VECTOR,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
{.name = "fd_ctrl", .internal_input = NETDATA_CONTROLLER_END,
.user_input = 0,
.type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
{.name = NULL, .internal_input = 0, .user_input = 0,
.type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ }};
struct config fd_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER,
@@ -271,10 +287,14 @@ static void ebpf_fd_set_hash_tables(struct fd_bpf *obj)
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*/
-static void ebpf_fd_adjust_map_size(struct fd_bpf *obj, ebpf_module_t *em)
+static void ebpf_fd_adjust_map(struct fd_bpf *obj, ebpf_module_t *em)
{
ebpf_update_map_size(obj->maps.tbl_fd_pid, &fd_maps[NETDATA_FD_PID_STATS],
em, bpf_map__name(obj->maps.tbl_fd_pid));
+
+ ebpf_update_map_type(obj->maps.tbl_fd_global, &fd_maps[NETDATA_FD_GLOBAL_STATS]);
+ ebpf_update_map_type(obj->maps.tbl_fd_pid, &fd_maps[NETDATA_FD_PID_STATS]);
+ ebpf_update_map_type(obj->maps.fd_ctrl, &fd_maps[NETDATA_FD_CONTROLLER]);
}
/**
@@ -322,7 +342,7 @@ static inline int ebpf_fd_load_and_attach(struct fd_bpf *obj, ebpf_module_t *em)
ebpf_disable_specific_probes(obj);
}
- ebpf_fd_adjust_map_size(obj, em);
+ ebpf_fd_adjust_map(obj, em);
if (!em->apps_charts && !em->cgroup_charts)
ebpf_fd_disable_release_task(obj);
@@ -415,8 +435,10 @@ static void ebpf_fd_send_data(ebpf_module_t *em)
* Read global counter
*
* Read the table with number of calls for all functions
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_fd_read_global_table()
+static void ebpf_fd_read_global_table(int maps_per_core)
{
uint32_t idx;
netdata_idx_t *val = fd_hash_values;
@@ -426,7 +448,7 @@ static void ebpf_fd_read_global_table()
for (idx = NETDATA_KEY_CALLS_DO_SYS_OPEN; idx < NETDATA_FD_COUNTER; idx++) {
if (!bpf_map_lookup_elem(fd, &idx, stored)) {
int i;
- int end = ebpf_nprocs;
+ int end = (maps_per_core) ? ebpf_nprocs: 1;
netdata_idx_t total = 0;
for (i = 0; i < end; i++)
total += stored[i];
@@ -442,10 +464,11 @@ static void ebpf_fd_read_global_table()
* Sum all values read from kernel and store in the first address.
*
* @param out the vector with read values.
+ * @param maps_per_core do I need to read all cores?
*/
-static void fd_apps_accumulator(netdata_fd_stat_t *out)
+static void fd_apps_accumulator(netdata_fd_stat_t *out, int maps_per_core)
{
- int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ int i, end = (maps_per_core) ? ebpf_nprocs : 1;
netdata_fd_stat_t *total = &out[0];
for (i = 1; i < end; i++) {
netdata_fd_stat_t *w = &out[i];
@@ -479,14 +502,19 @@ static void fd_fill_pid(uint32_t current_pid, netdata_fd_stat_t *publish)
* Read APPS table
*
* Read the apps table and store data inside the structure.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void read_apps_table()
+static void read_fd_apps_table(int maps_per_core)
{
netdata_fd_stat_t *fv = fd_vector;
uint32_t key;
struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = fd_maps[NETDATA_FD_PID_STATS].map_fd;
- size_t length = sizeof(netdata_fd_stat_t) * ebpf_nprocs;
+ size_t length = sizeof(netdata_fd_stat_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
+
while (pids) {
key = pids->pid;
@@ -495,7 +523,7 @@ static void read_apps_table()
continue;
}
- fd_apps_accumulator(fv);
+ fd_apps_accumulator(fv, maps_per_core);
fd_fill_pid(key, fv);
@@ -509,9 +537,11 @@ static void read_apps_table()
/**
* Update cgroup
*
- * Update cgroup data based in
+ * Update cgroup data collected per PID.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_fd_cgroup()
+static void ebpf_update_fd_cgroup(int maps_per_core)
{
ebpf_cgroup_target_t *ect ;
netdata_fd_stat_t *fv = fd_vector;
@@ -531,7 +561,7 @@ static void ebpf_update_fd_cgroup()
} else {
memset(fv, 0, length);
if (!bpf_map_lookup_elem(fd, &pid, fv)) {
- fd_apps_accumulator(fv);
+ fd_apps_accumulator(fv, maps_per_core);
memcpy(out, fv, sizeof(netdata_fd_stat_t));
}
@@ -915,6 +945,7 @@ static void fd_collector(ebpf_module_t *em)
heartbeat_init(&hb);
int update_every = em->update_every;
int counter = update_every - 1;
+ int maps_per_core = em->maps_per_core;
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
@@ -923,21 +954,21 @@ static void fd_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_fd_read_global_table();
+ ebpf_fd_read_global_table(maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
- read_apps_table();
+ read_fd_apps_table(maps_per_core);
+
+ if (cgroups)
+ ebpf_update_fd_cgroup(maps_per_core);
+
+ pthread_mutex_lock(&lock);
#ifdef NETDATA_DEV_MODE
if (ebpf_aral_fd_pid)
ebpf_send_data_aral_chart(ebpf_aral_fd_pid, em);
#endif
- if (cgroups)
- ebpf_update_fd_cgroup();
-
- pthread_mutex_lock(&lock);
-
ebpf_fd_send_data(em);
if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
@@ -1082,6 +1113,10 @@ static void ebpf_fd_allocate_global_vectors(int apps)
*/
static int ebpf_fd_load_bpf(ebpf_module_t *em)
{
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(fd_maps, em->maps_per_core, running_on_kernel);
+#endif
+
int ret = 0;
ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_FD_SYSCALL_OPEN].mode);
if (em->load & EBPF_LOAD_LEGACY) {
diff --git a/collectors/ebpf.plugin/ebpf_filesystem.c b/collectors/ebpf.plugin/ebpf_filesystem.c
index f8b28195c..63f592eb9 100644
--- a/collectors/ebpf.plugin/ebpf_filesystem.c
+++ b/collectors/ebpf.plugin/ebpf_filesystem.c
@@ -8,27 +8,122 @@ struct config fs_config = { .first_section = NULL,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
-static ebpf_local_maps_t fs_maps[] = {{.name = "tbl_ext4", .internal_input = NETDATA_KEY_CALLS_SYNC,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "tbl_xfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "tbl_nfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "tbl_zfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "tbl_btrfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "tbl_ext_addr", .internal_input = 1,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+ebpf_local_maps_t ext4_maps[] = {{.name = "tbl_ext4", .internal_input = NETDATA_KEY_CALLS_SYNC,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = "tmp_ext4", .internal_input = 4192, .user_input = 4192,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ }};
+
+ebpf_local_maps_t xfs_maps[] = {{.name = "tbl_xfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = "tmp_xfs", .internal_input = 4192, .user_input = 4192,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ }};
+
+ebpf_local_maps_t nfs_maps[] = {{.name = "tbl_nfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = "tmp_nfs", .internal_input = 4192, .user_input = 4192,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ }};
+
+ebpf_local_maps_t zfs_maps[] = {{.name = "tbl_zfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = "tmp_zfs", .internal_input = 4192, .user_input = 4192,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ }};
+
+ebpf_local_maps_t btrfs_maps[] = {{.name = "tbl_btrfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = "tbl_ext_addr", .internal_input = 1, .user_input = 1,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
+ {.name = "tmp_btrfs", .internal_input = 4192, .user_input = 4192,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ }};
static netdata_syscall_stat_t filesystem_aggregated_data[NETDATA_EBPF_HIST_MAX_BINS];
static netdata_publish_syscall_t filesystem_publish_aggregated[NETDATA_EBPF_HIST_MAX_BINS];
@@ -176,26 +271,32 @@ int ebpf_filesystem_initialize_ebpf_data(ebpf_module_t *em)
if (!efp->probe_links && efp->flags & NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM) {
em->thread_name = efp->filesystem;
em->kernels = efp->kernels;
+ em->maps = efp->fs_maps;
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
+#endif
efp->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &efp->objects);
if (!efp->probe_links) {
em->thread_name = saved_name;
em->kernels = kernels;
+ em->maps = NULL;
return -1;
}
efp->flags |= NETDATA_FILESYSTEM_FLAG_HAS_PARTITION;
pthread_mutex_lock(&lock);
- ebpf_update_kernel_memory(&plugin_statistics, &fs_maps[i], EBPF_ACTION_STAT_ADD);
+ ebpf_update_kernel_memory(&plugin_statistics, efp->fs_maps, EBPF_ACTION_STAT_ADD);
pthread_mutex_unlock(&lock);
// Nedeed for filesystems like btrfs
if ((efp->flags & NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE) && (efp->addresses.function)) {
- ebpf_load_addresses(&efp->addresses, fs_maps[i + 1].map_fd);
+ ebpf_load_addresses(&efp->addresses, efp->fs_maps[NETDATA_ADDR_FS_TABLE].map_fd);
}
}
efp->flags &= ~NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM;
}
em->thread_name = saved_name;
em->kernels = kernels;
+ em->maps = NULL;
if (!dimensions) {
dimensions = ebpf_fill_histogram_dimension(NETDATA_EBPF_HIST_MAX_BINS);
@@ -394,11 +495,13 @@ static inline netdata_ebpf_histogram_t *select_hist(ebpf_filesystem_partitions_t
/**
* Read hard disk table
*
- * @param table index for the hash table
+ * @param efp structure with filesystem monitored
+ * @param fd file descriptor to get data.
+ * @param maps_per_core do I need to read all cores?
*
* Read the table with number of calls for all functions
*/
-static void read_filesystem_table(ebpf_filesystem_partitions_t *efp, int fd)
+static void read_filesystem_table(ebpf_filesystem_partitions_t *efp, int fd, int maps_per_core)
{
netdata_idx_t *values = filesystem_hash_values;
uint32_t key;
@@ -416,7 +519,7 @@ static void read_filesystem_table(ebpf_filesystem_partitions_t *efp, int fd)
uint64_t total = 0;
int i;
- int end = ebpf_nprocs;
+ int end = (maps_per_core) ? ebpf_nprocs : 1;
for (i = 0; i < end; i++) {
total += values[i];
}
@@ -430,17 +533,17 @@ static void read_filesystem_table(ebpf_filesystem_partitions_t *efp, int fd)
/**
* Read hard disk table
*
- * @param table index for the hash table
- *
* Read the table with number of calls for all functions
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void read_filesystem_tables()
+static void read_filesystem_tables(int maps_per_core)
{
int i;
for (i = 0; localfs[i].filesystem; i++) {
ebpf_filesystem_partitions_t *efp = &localfs[i];
if (efp->flags & NETDATA_FILESYSTEM_FLAG_HAS_PARTITION) {
- read_filesystem_table(efp, fs_maps[i].map_fd);
+ read_filesystem_table(efp, efp->fs_maps[NETDATA_MAIN_FS_TABLE].map_fd, maps_per_core);
}
}
}
@@ -464,7 +567,7 @@ void ebpf_filesystem_read_hash(ebpf_module_t *em)
if (em->optional)
return;
- read_filesystem_tables();
+ read_filesystem_tables(em->maps_per_core);
}
/**
@@ -546,6 +649,21 @@ static void ebpf_update_filesystem()
}
/**
+ * Set maps
+ *
+ * When thread is initialized the variable fs_maps is set as null,
+ * this function fills the variable before to use.
+ */
+static void ebpf_set_maps()
+{
+ localfs[NETDATA_FS_LOCALFS_EXT4].fs_maps = ext4_maps;
+ localfs[NETDATA_FS_LOCALFS_XFS].fs_maps = xfs_maps;
+ localfs[NETDATA_FS_LOCALFS_NFS].fs_maps = nfs_maps;
+ localfs[NETDATA_FS_LOCALFS_ZFS].fs_maps = zfs_maps;
+ localfs[NETDATA_FS_LOCALFS_BTRFS].fs_maps = btrfs_maps;
+}
+
+/**
* Filesystem thread
*
* Thread used to generate socket charts.
@@ -559,7 +677,7 @@ void *ebpf_filesystem_thread(void *ptr)
netdata_thread_cleanup_push(ebpf_filesystem_exit, ptr);
ebpf_module_t *em = (ebpf_module_t *)ptr;
- em->maps = fs_maps;
+ ebpf_set_maps();
ebpf_update_filesystem();
// Initialize optional as zero, to identify when there are not partitions to monitor
diff --git a/collectors/ebpf.plugin/ebpf_filesystem.h b/collectors/ebpf.plugin/ebpf_filesystem.h
index cf19b253e..b1126badb 100644
--- a/collectors/ebpf.plugin/ebpf_filesystem.h
+++ b/collectors/ebpf.plugin/ebpf_filesystem.h
@@ -42,6 +42,16 @@ enum netdata_filesystem_table {
NETDATA_ADDR_FS_TABLE
};
+enum netdata_filesystem_localfs_idx {
+ NETDATA_FS_LOCALFS_EXT4,
+ NETDATA_FS_LOCALFS_XFS,
+ NETDATA_FS_LOCALFS_NFS,
+ NETDATA_FS_LOCALFS_ZFS,
+ NETDATA_FS_LOCALFS_BTRFS,
+
+ NETDATA_FS_LOCALFS_END,
+};
+
void *ebpf_filesystem_thread(void *ptr);
extern struct config fs_config;
diff --git a/collectors/ebpf.plugin/ebpf_hardirq.c b/collectors/ebpf.plugin/ebpf_hardirq.c
index b4d49dc00..113648ec9 100644
--- a/collectors/ebpf.plugin/ebpf_hardirq.c
+++ b/collectors/ebpf.plugin/ebpf_hardirq.c
@@ -17,14 +17,20 @@ static ebpf_local_maps_t hardirq_maps[] = {
.internal_input = NETDATA_HARDIRQ_MAX_IRQS,
.user_input = 0,
.type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
},
{
.name = "tbl_hardirq_static",
.internal_input = HARDIRQ_EBPF_STATIC_END,
.user_input = 0,
.type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
},
/* end */
{
@@ -32,7 +38,10 @@ static ebpf_local_maps_t hardirq_maps[] = {
.internal_input = 0,
.user_input = 0,
.type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
}
};
@@ -555,6 +564,9 @@ void *ebpf_hardirq_thread(void *ptr)
goto endhardirq;
}
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
+#endif
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
goto endhardirq;
diff --git a/collectors/ebpf.plugin/ebpf_mdflush.c b/collectors/ebpf.plugin/ebpf_mdflush.c
index fc794e5e5..321bd97ee 100644
--- a/collectors/ebpf.plugin/ebpf_mdflush.c
+++ b/collectors/ebpf.plugin/ebpf_mdflush.c
@@ -16,7 +16,10 @@ static ebpf_local_maps_t mdflush_maps[] = {
.internal_input = 1024,
.user_input = 0,
.type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
},
/* end */
{
@@ -87,7 +90,14 @@ static int mdflush_val_cmp(void *a, void *b)
}
}
-static void mdflush_read_count_map()
+/**
+ * Read count map
+ *
+ * Read the hash table and store data to allocated vectors.
+ *
+ * @param maps_per_core do I need to read all cores?
+ */
+static void mdflush_read_count_map(int maps_per_core)
{
int mapfd = mdflush_maps[MDFLUSH_MAP_COUNT].map_fd;
mdflush_ebpf_key_t curr_key = (uint32_t)-1;
@@ -137,7 +147,7 @@ static void mdflush_read_count_map()
// we must add up count value for this record across all CPUs.
uint64_t total_cnt = 0;
int i;
- int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+ int end = (!maps_per_core) ? 1 : ebpf_nprocs;
for (i = 0; i < end; i++) {
total_cnt += mdflush_ebpf_vals[i];
}
@@ -215,6 +225,7 @@ static void mdflush_collector(ebpf_module_t *em)
heartbeat_t hb;
heartbeat_init(&hb);
int counter = update_every - 1;
+ int maps_per_core = em->maps_per_core;
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
@@ -222,7 +233,8 @@ static void mdflush_collector(ebpf_module_t *em)
continue;
counter = 0;
- mdflush_read_count_map();
+ mdflush_read_count_map(maps_per_core);
+ pthread_mutex_lock(&lock);
// write dims now for all hitherto discovered devices.
write_begin_chart("mdstat", "mdstat_flush");
avl_traverse_lock(&mdflush_pub, mdflush_write_dims, NULL);
@@ -251,6 +263,9 @@ void *ebpf_mdflush_thread(void *ptr)
goto endmdflush;
}
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
+#endif
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
goto endmdflush;
diff --git a/collectors/ebpf.plugin/ebpf_mount.c b/collectors/ebpf.plugin/ebpf_mount.c
index a2a4c5530..e0951f8c4 100644
--- a/collectors/ebpf.plugin/ebpf_mount.c
+++ b/collectors/ebpf.plugin/ebpf_mount.c
@@ -5,10 +5,18 @@
static ebpf_local_maps_t mount_maps[] = {{.name = "tbl_mount", .internal_input = NETDATA_MOUNT_END,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
{.name = NULL, .internal_input = 0, .user_input = 0,
.type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ }};
static char *mount_dimension_name[NETDATA_EBPF_MOUNT_SYSCALL] = { "mount", "umount" };
static netdata_syscall_stat_t mount_aggregated_data[NETDATA_EBPF_MOUNT_SYSCALL];
@@ -192,6 +200,8 @@ static inline int ebpf_mount_load_and_attach(struct mount_bpf *obj, ebpf_module_
ebpf_mount_disable_trampoline(obj);
}
+ ebpf_update_map_type(obj->maps.tbl_mount, &mount_maps[NETDATA_KEY_MOUNT_TABLE]);
+
int ret = mount_bpf__load(obj);
if (!ret) {
if (test != EBPF_LOAD_PROBE && test != EBPF_LOAD_RETPROBE )
@@ -249,8 +259,10 @@ static void ebpf_mount_exit(void *ptr)
* Read global table
*
* Read the table with number of calls for all functions
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_mount_read_global_table()
+static void ebpf_mount_read_global_table(int maps_per_core)
{
static netdata_idx_t *mount_values = NULL;
if (!mount_values)
@@ -259,17 +271,22 @@ static void ebpf_mount_read_global_table()
uint32_t idx;
netdata_idx_t *val = mount_hash_values;
netdata_idx_t *stored = mount_values;
+ size_t length = sizeof(netdata_idx_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
+
int fd = mount_maps[NETDATA_KEY_MOUNT_TABLE].map_fd;
for (idx = NETDATA_KEY_MOUNT_CALL; idx < NETDATA_MOUNT_END; idx++) {
if (!bpf_map_lookup_elem(fd, &idx, stored)) {
int i;
- int end = ebpf_nprocs;
+ int end = (maps_per_core) ? ebpf_nprocs : 1;
netdata_idx_t total = 0;
for (i = 0; i < end; i++)
total += stored[i];
val[idx] = total;
+ memset(stored, 0, length);
}
}
}
@@ -304,13 +321,14 @@ static void mount_collector(ebpf_module_t *em)
heartbeat_init(&hb);
int update_every = em->update_every;
int counter = update_every - 1;
+ int maps_per_core = em->maps_per_core;
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
if (ebpf_exit_plugin || ++counter != update_every)
continue;
counter = 0;
- ebpf_mount_read_global_table();
+ ebpf_mount_read_global_table(maps_per_core);
pthread_mutex_lock(&lock);
ebpf_mount_send_data();
@@ -372,6 +390,10 @@ static void ebpf_create_mount_charts(int update_every)
*/
static int ebpf_mount_load_bpf(ebpf_module_t *em)
{
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
+#endif
+
int ret = 0;
if (em->load & EBPF_LOAD_LEGACY) {
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c
index 856c922ec..094875292 100644
--- a/collectors/ebpf.plugin/ebpf_oomkill.c
+++ b/collectors/ebpf.plugin/ebpf_oomkill.c
@@ -16,7 +16,10 @@ static ebpf_local_maps_t oomkill_maps[] = {
.internal_input = NETDATA_OOMKILL_MAX_ENTRIES,
.user_input = 0,
.type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
},
/* end */
{
@@ -24,7 +27,10 @@ static ebpf_local_maps_t oomkill_maps[] = {
.internal_input = 0,
.user_input = 0,
.type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
}
};
@@ -285,6 +291,8 @@ static void ebpf_update_oomkill_cgroup(int32_t *keys, uint32_t total)
/**
* Main loop for this collector.
+ *
+ * @param em the thread main structure.
*/
static void oomkill_collector(ebpf_module_t *em)
{
@@ -384,6 +392,9 @@ void *ebpf_oomkill_thread(void *ptr)
goto endoomkill;
}
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
+#endif
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
goto endoomkill;
diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c
index 66af47857..17a9809d3 100644
--- a/collectors/ebpf.plugin/ebpf_process.c
+++ b/collectors/ebpf.plugin/ebpf_process.c
@@ -18,17 +18,33 @@ static char *status[] = { "process", "zombie" };
static ebpf_local_maps_t process_maps[] = {{.name = "tbl_pid_stats", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
.user_input = 0,
.type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
{.name = "tbl_total_stats", .internal_input = NETDATA_KEY_END_VECTOR,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
{.name = "process_ctrl", .internal_input = NETDATA_CONTROLLER_END,
.user_input = 0,
.type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
{.name = NULL, .internal_input = 0, .user_input = 0,
.type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ }};
char *tracepoint_sched_type = { "sched" } ;
char *tracepoint_sched_process_exit = { "sched_process_exit" };
@@ -39,6 +55,7 @@ static int was_sched_process_exec_enabled = 0;
static int was_sched_process_fork_enabled = 0;
static netdata_idx_t *process_hash_values = NULL;
+ebpf_process_stat_t *process_stat_vector = NULL;
static netdata_syscall_stat_t process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_END];
static netdata_publish_syscall_t process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_END];
@@ -55,6 +72,7 @@ static char *threads_stat[NETDATA_EBPF_THREAD_STAT_END] = {"total", "running"};
static char *load_event_stat[NETDATA_EBPF_LOAD_STAT_END] = {"legacy", "co-re"};
static char *memlock_stat = {"memory_locked"};
static char *hash_table_stat = {"hash_table"};
+static char *hash_table_core[NETDATA_EBPF_LOAD_STAT_END] = {"per_core", "unique"};
/*****************************************************************
*
@@ -251,8 +269,10 @@ void ebpf_process_send_apps_data(struct ebpf_target *root, ebpf_module_t *em)
/**
* Read the hash table and store data to allocated vectors.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void read_hash_global_tables()
+static void ebpf_read_process_hash_global_tables(int maps_per_core)
{
uint64_t idx;
netdata_idx_t res[NETDATA_KEY_END_VECTOR];
@@ -263,7 +283,7 @@ static void read_hash_global_tables()
if (!bpf_map_lookup_elem(fd, &idx, val)) {
uint64_t total = 0;
int i;
- int end = ebpf_nprocs;
+ int end = (maps_per_core) ? ebpf_nprocs : 1;
for (i = 0; i < end; i++)
total += val[i];
@@ -285,13 +305,18 @@ static void read_hash_global_tables()
/**
* Update cgroup
*
- * Update cgroup data based in
+ * Update cgroup data based in PID running.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_process_cgroup()
+static void ebpf_update_process_cgroup(int maps_per_core)
{
ebpf_cgroup_target_t *ect ;
int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
+ size_t length = sizeof(ebpf_process_stat_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
pthread_mutex_lock(&mutex_cgroup_shm);
for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
struct pid_on_target2 *pids;
@@ -303,9 +328,15 @@ static void ebpf_update_process_cgroup()
memcpy(out, in, sizeof(ebpf_process_stat_t));
} else {
- if (bpf_map_lookup_elem(pid_fd, &pid, out)) {
+ if (bpf_map_lookup_elem(pid_fd, &pid, process_stat_vector)) {
memset(out, 0, sizeof(ebpf_process_stat_t));
}
+
+ ebpf_process_apps_accumulator(process_stat_vector, maps_per_core);
+
+ memcpy(out, process_stat_vector, sizeof(ebpf_process_stat_t));
+
+ memset(process_stat_vector, 0, length);
}
}
}
@@ -507,6 +538,35 @@ static inline void ebpf_create_statistic_hash_tables(ebpf_module_t *em)
}
/**
+ * Create chart for percpu stats
+ *
+ * Write to standard output current values for threads.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+static inline void ebpf_create_statistic_hash_per_core(ebpf_module_t *em)
+{
+ ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
+ NETDATA_EBPF_HASH_TABLES_PER_CORE,
+ "How threads are loading hash/array tables.",
+ "threads",
+ NETDATA_EBPF_FAMILY,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NULL,
+ 140004,
+ em->update_every,
+ NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ ebpf_write_global_dimension(hash_table_core[NETDATA_EBPF_THREAD_PER_CORE],
+ hash_table_core[NETDATA_EBPF_THREAD_PER_CORE],
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+
+ ebpf_write_global_dimension(hash_table_core[NETDATA_EBPF_THREAD_UNIQUE],
+ hash_table_core[NETDATA_EBPF_THREAD_UNIQUE],
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+}
+
+/**
* Update Internal Metric variable
*
* By default eBPF.plugin sends internal metrics for netdata, but user can
@@ -541,6 +601,8 @@ static void ebpf_create_statistic_charts(ebpf_module_t *em)
ebpf_create_statistic_kernel_memory(em);
ebpf_create_statistic_hash_tables(em);
+
+ ebpf_create_statistic_hash_per_core(em);
}
/**
@@ -647,6 +709,7 @@ static void ebpf_process_exit(void *ptr)
ebpf_module_t *em = (ebpf_module_t *)ptr;
freez(process_hash_values);
+ freez(process_stat_vector);
ebpf_process_disable_tracepoints();
@@ -1010,6 +1073,11 @@ void ebpf_send_statistic_data()
write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_LOADED);
write_chart_dimension(hash_table_stat, (long long)plugin_statistics.hash_tables);
write_end_chart();
+
+ write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_PER_CORE);
+ write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_PER_CORE], (long long)plugin_statistics.hash_percpu);
+ write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_UNIQUE], (long long)plugin_statistics.hash_unique);
+ write_end_chart();
}
/**
@@ -1032,6 +1100,7 @@ static void process_collector(ebpf_module_t *em)
int update_every = em->update_every;
int counter = update_every - 1;
+ int maps_per_core = em->maps_per_core;
while (!ebpf_exit_plugin) {
usec_t dt = heartbeat_next(&hb, USEC_PER_SEC);
(void)dt;
@@ -1041,14 +1110,14 @@ static void process_collector(ebpf_module_t *em)
if (++counter == update_every) {
counter = 0;
- read_hash_global_tables();
+ ebpf_read_process_hash_global_tables(maps_per_core);
netdata_apps_integration_flags_t apps_enabled = em->apps_charts;
pthread_mutex_lock(&collect_data_mutex);
if (ebpf_all_pids_count > 0) {
if (cgroups && shm_ebpf_cgroup.header) {
- ebpf_update_process_cgroup();
+ ebpf_update_process_cgroup(maps_per_core);
}
}
@@ -1099,6 +1168,7 @@ static void ebpf_process_allocate_global_vectors(size_t length)
memset(process_aggregated_data, 0, length * sizeof(netdata_syscall_stat_t));
memset(process_publish_aggregated, 0, length * sizeof(netdata_publish_syscall_t));
process_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
+ process_stat_vector = callocz(ebpf_nprocs, sizeof(ebpf_process_stat_t));
global_process_stats = callocz((size_t)pid_max, sizeof(ebpf_process_stat_t *));
}
@@ -1195,8 +1265,7 @@ void *ebpf_process_thread(void *ptr)
set_local_pointers();
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
- pthread_mutex_unlock(&lock);
- goto endprocess;
+ em->enabled = em->global_charts = em->apps_charts = em->cgroup_charts = NETDATA_THREAD_EBPF_STOPPING;
}
int algorithms[NETDATA_KEY_PUBLISH_PROCESS_END] = {
@@ -1225,7 +1294,6 @@ void *ebpf_process_thread(void *ptr)
process_collector(em);
-endprocess:
pthread_mutex_lock(&ebpf_exit_cleanup);
if (em->enabled == NETDATA_THREAD_EBPF_RUNNING)
ebpf_update_disabled_plugin_stats(em);
diff --git a/collectors/ebpf.plugin/ebpf_process.h b/collectors/ebpf.plugin/ebpf_process.h
index 5f119aea1..bccdc0eb5 100644
--- a/collectors/ebpf.plugin/ebpf_process.h
+++ b/collectors/ebpf.plugin/ebpf_process.h
@@ -56,6 +56,13 @@ enum netdata_ebpf_load_mode_stats{
NETDATA_EBPF_LOAD_STAT_END
};
+enum netdata_ebpf_thread_per_core{
+ NETDATA_EBPF_THREAD_PER_CORE,
+ NETDATA_EBPF_THREAD_UNIQUE,
+
+ NETDATA_EBPF_PER_CORE_END
+};
+
// Index from kernel
typedef enum ebpf_process_index {
NETDATA_KEY_CALLS_DO_EXIT,
diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c
index f81c01964..093d65b60 100644
--- a/collectors/ebpf.plugin/ebpf_shm.c
+++ b/collectors/ebpf.plugin/ebpf_shm.c
@@ -21,15 +21,27 @@ struct config shm_config = { .first_section = NULL,
static ebpf_local_maps_t shm_maps[] = {{.name = "tbl_pid_shm", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
.user_input = 0,
.type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
{.name = "shm_ctrl", .internal_input = NETDATA_CONTROLLER_END,
.user_input = 0,
.type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
{.name = "tbl_shm", .internal_input = NETDATA_SHM_END,
.user_input = 0,
.type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
{.name = NULL, .internal_input = 0, .user_input = 0}};
netdata_ebpf_targets_t shm_targets[] = { {.name = "shmget", .mode = EBPF_LOAD_TRAMPOLINE},
@@ -215,10 +227,14 @@ static void ebpf_shm_disable_release_task(struct shm_bpf *obj)
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*/
-static void ebpf_shm_adjust_map_size(struct shm_bpf *obj, ebpf_module_t *em)
+static void ebpf_shm_adjust_map(struct shm_bpf *obj, ebpf_module_t *em)
{
ebpf_update_map_size(obj->maps.tbl_pid_shm, &shm_maps[NETDATA_PID_SHM_TABLE],
em, bpf_map__name(obj->maps.tbl_pid_shm));
+
+ ebpf_update_map_type(obj->maps.tbl_shm, &shm_maps[NETDATA_SHM_GLOBAL_TABLE]);
+ ebpf_update_map_type(obj->maps.tbl_pid_shm, &shm_maps[NETDATA_PID_SHM_TABLE]);
+ ebpf_update_map_type(obj->maps.shm_ctrl, &shm_maps[NETDATA_SHM_CONTROLLER]);
}
/**
@@ -250,7 +266,7 @@ static inline int ebpf_shm_load_and_attach(struct shm_bpf *obj, ebpf_module_t *e
ebpf_disable_trampoline(obj);
}
- ebpf_shm_adjust_map_size(obj, em);
+ ebpf_shm_adjust_map(obj, em);
if (!em->apps_charts && !em->cgroup_charts)
ebpf_shm_disable_release_task(obj);
@@ -312,10 +328,11 @@ static void ebpf_shm_exit(void *ptr)
* Sum all values read from kernel and store in the first address.
*
* @param out the vector with read values.
+ * @param maps_per_core do I need to read all cores?
*/
-static void shm_apps_accumulator(netdata_publish_shm_t *out)
+static void shm_apps_accumulator(netdata_publish_shm_t *out, int maps_per_core)
{
- int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ int i, end = (maps_per_core) ? ebpf_nprocs : 1;
netdata_publish_shm_t *total = &out[0];
for (i = 1; i < end; i++) {
netdata_publish_shm_t *w = &out[i];
@@ -349,12 +366,17 @@ static void shm_fill_pid(uint32_t current_pid, netdata_publish_shm_t *publish)
* Update cgroup
*
* Update cgroup data based in
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_shm_cgroup()
+static void ebpf_update_shm_cgroup(int maps_per_core)
{
netdata_publish_shm_t *cv = shm_vector;
int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd;
- size_t length = sizeof(netdata_publish_shm_t) * ebpf_nprocs;
+ size_t length = sizeof(netdata_publish_shm_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
+
ebpf_cgroup_target_t *ect;
memset(cv, 0, length);
@@ -371,7 +393,7 @@ static void ebpf_update_shm_cgroup()
memcpy(out, in, sizeof(netdata_publish_shm_t));
} else {
if (!bpf_map_lookup_elem(fd, &pid, cv)) {
- shm_apps_accumulator(cv);
+ shm_apps_accumulator(cv, maps_per_core);
memcpy(out, cv, sizeof(netdata_publish_shm_t));
@@ -389,14 +411,19 @@ static void ebpf_update_shm_cgroup()
* Read APPS table
*
* Read the apps table and store data inside the structure.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void read_apps_table()
+static void read_shm_apps_table(int maps_per_core)
{
netdata_publish_shm_t *cv = shm_vector;
uint32_t key;
struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd;
- size_t length = sizeof(netdata_publish_shm_t)*ebpf_nprocs;
+ size_t length = sizeof(netdata_publish_shm_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
+
while (pids) {
key = pids->pid;
@@ -405,7 +432,7 @@ static void read_apps_table()
continue;
}
- shm_apps_accumulator(cv);
+ shm_apps_accumulator(cv, maps_per_core);
shm_fill_pid(key, cv);
@@ -446,23 +473,29 @@ static void shm_send_global()
* Read global counter
*
* Read the table with number of calls for all functions
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_shm_read_global_table()
+static void ebpf_shm_read_global_table(int maps_per_core)
{
netdata_idx_t *stored = shm_values;
netdata_idx_t *val = shm_hash_values;
int fd = shm_maps[NETDATA_SHM_GLOBAL_TABLE].map_fd;
+ size_t length = sizeof(netdata_idx_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
uint32_t i, end = NETDATA_SHM_END;
for (i = NETDATA_KEY_SHMGET_CALL; i < end; i++) {
if (!bpf_map_lookup_elem(fd, &i, stored)) {
int j;
- int last = ebpf_nprocs;
+ int last = (maps_per_core) ? ebpf_nprocs : 1;
netdata_idx_t total = 0;
for (j = 0; j < last; j++)
total += stored[j];
val[i] = total;
+ memset(stored, 0 , length);
}
}
}
@@ -831,6 +864,7 @@ static void shm_collector(ebpf_module_t *em)
heartbeat_t hb;
heartbeat_init(&hb);
int counter = update_every - 1;
+ int maps_per_core = em->maps_per_core;
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
if (ebpf_exit_plugin || ++counter != update_every)
@@ -838,14 +872,14 @@ static void shm_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_shm_read_global_table();
+ ebpf_shm_read_global_table(maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps) {
- read_apps_table();
+ read_shm_apps_table(maps_per_core);
}
if (cgroups) {
- ebpf_update_shm_cgroup();
+ ebpf_update_shm_cgroup(maps_per_core);
}
pthread_mutex_lock(&lock);
@@ -984,6 +1018,10 @@ static void ebpf_create_shm_charts(int update_every)
*/
static int ebpf_shm_load_bpf(ebpf_module_t *em)
{
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
+#endif
+
int ret = 0;
ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_KEY_SHMGET_CALL].mode);
diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c
index aebc9ca12..b45dec7d9 100644
--- a/collectors/ebpf.plugin/ebpf_socket.c
+++ b/collectors/ebpf.plugin/ebpf_socket.c
@@ -27,35 +27,67 @@ static ebpf_local_maps_t socket_maps[] = {{.name = "tbl_bandwidth",
.internal_input = NETDATA_COMPILED_CONNECTIONS_ALLOWED,
.user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED,
.type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
{.name = "tbl_global_sock",
.internal_input = NETDATA_SOCKET_COUNTER,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
{.name = "tbl_lports",
.internal_input = NETDATA_SOCKET_COUNTER,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
{.name = "tbl_conn_ipv4",
.internal_input = NETDATA_COMPILED_CONNECTIONS_ALLOWED,
.user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED,
.type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
{.name = "tbl_conn_ipv6",
.internal_input = NETDATA_COMPILED_CONNECTIONS_ALLOWED,
.user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED,
.type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
{.name = "tbl_nv_udp",
.internal_input = NETDATA_COMPILED_UDP_CONNECTIONS_ALLOWED,
.user_input = NETDATA_MAXIMUM_UDP_CONNECTIONS_ALLOWED,
.type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
{.name = "socket_ctrl", .internal_input = NETDATA_CONTROLLER_END,
.user_input = 0,
.type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = NULL, .internal_input = 0, .user_input = 0}};
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ }};
static netdata_idx_t *socket_hash_values = NULL;
static netdata_syscall_stat_t socket_aggregated_data[NETDATA_MAX_SOCKET_VECTOR];
@@ -362,7 +394,7 @@ static void ebpf_socket_set_hash_tables(struct socket_bpf *obj)
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*/
-static void ebpf_socket_adjust_map_size(struct socket_bpf *obj, ebpf_module_t *em)
+static void ebpf_socket_adjust_map(struct socket_bpf *obj, ebpf_module_t *em)
{
ebpf_update_map_size(obj->maps.tbl_bandwidth, &socket_maps[NETDATA_SOCKET_TABLE_BANDWIDTH],
em, bpf_map__name(obj->maps.tbl_bandwidth));
@@ -375,6 +407,15 @@ static void ebpf_socket_adjust_map_size(struct socket_bpf *obj, ebpf_module_t *e
ebpf_update_map_size(obj->maps.tbl_nv_udp, &socket_maps[NETDATA_SOCKET_TABLE_UDP],
em, bpf_map__name(obj->maps.tbl_nv_udp));
+
+
+ ebpf_update_map_type(obj->maps.tbl_bandwidth, &socket_maps[NETDATA_SOCKET_TABLE_BANDWIDTH]);
+ ebpf_update_map_type(obj->maps.tbl_conn_ipv4, &socket_maps[NETDATA_SOCKET_TABLE_IPV4]);
+ ebpf_update_map_type(obj->maps.tbl_conn_ipv6, &socket_maps[NETDATA_SOCKET_TABLE_IPV6]);
+ ebpf_update_map_type(obj->maps.tbl_nv_udp, &socket_maps[NETDATA_SOCKET_TABLE_UDP]);
+ ebpf_update_map_type(obj->maps.socket_ctrl, &socket_maps[NETDATA_SOCKET_TABLE_CTRL]);
+ ebpf_update_map_type(obj->maps.tbl_global_sock, &socket_maps[NETDATA_SOCKET_GLOBAL]);
+ ebpf_update_map_type(obj->maps.tbl_lports, &socket_maps[NETDATA_SOCKET_LPORTS]);
}
/**
@@ -403,14 +444,14 @@ static inline int ebpf_socket_load_and_attach(struct socket_bpf *obj, ebpf_modul
ebpf_socket_disable_specific_probe(obj, em->mode);
}
+ ebpf_socket_adjust_map(obj, em);
+
int ret = socket_bpf__load(obj);
if (ret) {
fprintf(stderr, "failed to load BPF object: %d\n", ret);
return ret;
}
- ebpf_socket_adjust_map_size(obj, em);
-
if (test == EBPF_LOAD_TRAMPOLINE) {
ret = socket_bpf__attach(obj);
} else {
@@ -1988,17 +2029,23 @@ static void hash_accumulator(netdata_socket_t *values, netdata_socket_idx_t *key
*
* @param fd the hash table with data.
* @param family the family associated to the hash table
+ * @param maps_per_core do I need to read all cores?
*
* @return it returns 0 on success and -1 otherwise.
*/
-static void ebpf_read_socket_hash_table(int fd, int family)
+static void ebpf_read_socket_hash_table(int fd, int family, int maps_per_core)
{
netdata_socket_idx_t key = {};
netdata_socket_idx_t next_key = {};
netdata_socket_t *values = socket_values;
- size_t length = ebpf_nprocs*sizeof(netdata_socket_t);
- int test, end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+ size_t length = sizeof(netdata_socket_t);
+ int test, end;
+ if (maps_per_core) {
+ length *= ebpf_nprocs;
+ end = ebpf_nprocs;
+ } else
+ end = 1;
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
// We need to reset the values when we are working on kernel 4.15 or newer, because kernel does not create
@@ -2122,11 +2169,13 @@ static void read_listen_table()
void *ebpf_socket_read_hash(void *ptr)
{
netdata_thread_cleanup_push(ebpf_socket_cleanup, ptr);
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
heartbeat_t hb;
heartbeat_init(&hb);
int fd_ipv4 = socket_maps[NETDATA_SOCKET_TABLE_IPV4].map_fd;
int fd_ipv6 = socket_maps[NETDATA_SOCKET_TABLE_IPV6].map_fd;
+ int maps_per_core = em->maps_per_core;
// This thread is cancelled from another thread
for (;;) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
@@ -2134,8 +2183,8 @@ void *ebpf_socket_read_hash(void *ptr)
break;
pthread_mutex_lock(&nv_mutex);
- ebpf_read_socket_hash_table(fd_ipv4, AF_INET);
- ebpf_read_socket_hash_table(fd_ipv6, AF_INET6);
+ ebpf_read_socket_hash_table(fd_ipv4, AF_INET, maps_per_core);
+ ebpf_read_socket_hash_table(fd_ipv6, AF_INET6, maps_per_core);
pthread_mutex_unlock(&nv_mutex);
}
@@ -2145,23 +2194,30 @@ void *ebpf_socket_read_hash(void *ptr)
/**
* Read the hash table and store data to allocated vectors.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void read_hash_global_tables()
+static void read_hash_global_tables(int maps_per_core)
{
uint64_t idx;
netdata_idx_t res[NETDATA_SOCKET_COUNTER];
netdata_idx_t *val = socket_hash_values;
+ size_t length = sizeof(netdata_idx_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
+
int fd = socket_maps[NETDATA_SOCKET_GLOBAL].map_fd;
for (idx = 0; idx < NETDATA_SOCKET_COUNTER; idx++) {
if (!bpf_map_lookup_elem(fd, &idx, val)) {
uint64_t total = 0;
int i;
- int end = ebpf_nprocs;
+ int end = (maps_per_core) ? ebpf_nprocs : 1;
for (i = 0; i < end; i++)
total += val[i];
res[idx] = total;
+ memset(socket_hash_values, 0, length);
} else {
res[idx] = 0;
}
@@ -2220,9 +2276,9 @@ void ebpf_socket_fill_publish_apps(uint32_t current_pid, ebpf_bandwidth_t *eb)
*
* @param out the vector with the values to sum
*/
-void ebpf_socket_bandwidth_accumulator(ebpf_bandwidth_t *out)
+void ebpf_socket_bandwidth_accumulator(ebpf_bandwidth_t *out, int maps_per_core)
{
- int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ int i, end = (maps_per_core) ? ebpf_nprocs : 1;
ebpf_bandwidth_t *total = &out[0];
for (i = 1; i < end; i++) {
ebpf_bandwidth_t *move = &out[i];
@@ -2241,13 +2297,18 @@ void ebpf_socket_bandwidth_accumulator(ebpf_bandwidth_t *out)
/**
* Update the apps data reading information from the hash table
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_socket_update_apps_data()
+static void ebpf_socket_update_apps_data(int maps_per_core)
{
int fd = socket_maps[NETDATA_SOCKET_TABLE_BANDWIDTH].map_fd;
ebpf_bandwidth_t *eb = bandwidth_vector;
uint32_t key;
struct ebpf_pid_stat *pids = ebpf_root_of_pids;
+ size_t length = sizeof(ebpf_bandwidth_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
while (pids) {
key = pids->pid;
@@ -2256,10 +2317,12 @@ static void ebpf_socket_update_apps_data()
continue;
}
- ebpf_socket_bandwidth_accumulator(eb);
+ ebpf_socket_bandwidth_accumulator(eb, maps_per_core);
ebpf_socket_fill_publish_apps(key, eb);
+ memset(eb, 0, length);
+
pids = pids->next;
}
}
@@ -2267,15 +2330,21 @@ static void ebpf_socket_update_apps_data()
/**
* Update cgroup
*
- * Update cgroup data based in
+ * Update cgroup data based in PIDs.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_socket_cgroup()
+static void ebpf_update_socket_cgroup(int maps_per_core)
{
ebpf_cgroup_target_t *ect ;
ebpf_bandwidth_t *eb = bandwidth_vector;
int fd = socket_maps[NETDATA_SOCKET_TABLE_BANDWIDTH].map_fd;
+ size_t length = sizeof(ebpf_bandwidth_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
+
pthread_mutex_lock(&mutex_cgroup_shm);
for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
struct pid_on_target2 *pids;
@@ -2298,7 +2367,7 @@ static void ebpf_update_socket_cgroup()
publish->call_tcp_v6_connection = in->call_tcp_v6_connection;
} else {
if (!bpf_map_lookup_elem(fd, &pid, eb)) {
- ebpf_socket_bandwidth_accumulator(eb);
+ ebpf_socket_bandwidth_accumulator(eb, maps_per_core);
memcpy(out, eb, sizeof(ebpf_bandwidth_t));
@@ -2312,6 +2381,8 @@ static void ebpf_update_socket_cgroup()
publish->call_close = out->close;
publish->call_tcp_v4_connection = out->tcp_v4_connection;
publish->call_tcp_v6_connection = out->tcp_v6_connection;
+
+ memset(eb, 0, length);
}
}
}
@@ -2845,6 +2916,7 @@ static void socket_collector(ebpf_module_t *em)
int socket_global_enabled = em->global_charts;
int update_every = em->update_every;
+ int maps_per_core = em->maps_per_core;
int counter = update_every - 1;
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
@@ -2855,15 +2927,15 @@ static void socket_collector(ebpf_module_t *em)
netdata_apps_integration_flags_t socket_apps_enabled = em->apps_charts;
if (socket_global_enabled) {
read_listen_table();
- read_hash_global_tables();
+ read_hash_global_tables(maps_per_core);
}
pthread_mutex_lock(&collect_data_mutex);
if (socket_apps_enabled)
- ebpf_socket_update_apps_data();
+ ebpf_socket_update_apps_data(maps_per_core);
if (cgroups)
- ebpf_update_socket_cgroup();
+ ebpf_update_socket_cgroup(maps_per_core);
if (network_connection)
calculate_nv_plot();
@@ -3855,6 +3927,10 @@ void parse_table_size_options(struct config *cfg)
*/
static int ebpf_socket_load_bpf(ebpf_module_t *em)
{
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
+#endif
+
int ret = 0;
if (em->load & EBPF_LOAD_LEGACY) {
diff --git a/collectors/ebpf.plugin/ebpf_softirq.c b/collectors/ebpf.plugin/ebpf_softirq.c
index 33abbdf5e..01e2d0a52 100644
--- a/collectors/ebpf.plugin/ebpf_softirq.c
+++ b/collectors/ebpf.plugin/ebpf_softirq.c
@@ -16,7 +16,10 @@ static ebpf_local_maps_t softirq_maps[] = {
.internal_input = NETDATA_SOFTIRQ_MAX_IRQS,
.user_input = 0,
.type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
},
/* end */
{
@@ -24,7 +27,10 @@ static ebpf_local_maps_t softirq_maps[] = {
.internal_input = 0,
.user_input = 0,
.type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
}
};
@@ -94,10 +100,21 @@ static void softirq_cleanup(void *ptr)
* MAIN LOOP
*****************************************************************/
-static void softirq_read_latency_map()
+/**
+ * Read Latency Map
+ *
+ * Read data from kernel ring to plot for users.
+ *
+ * @param maps_per_core do I need to read all cores?
+ */
+static void softirq_read_latency_map(int maps_per_core)
{
int fd = softirq_maps[SOFTIRQ_MAP_LATENCY].map_fd;
int i;
+ size_t length = sizeof(softirq_ebpf_val_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
+
for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
int test = bpf_map_lookup_elem(fd, &i, softirq_ebpf_vals);
if (unlikely(test < 0)) {
@@ -106,12 +123,13 @@ static void softirq_read_latency_map()
uint64_t total_latency = 0;
int cpu_i;
- int end = ebpf_nprocs;
+ int end = (maps_per_core) ? ebpf_nprocs : 1;
for (cpu_i = 0; cpu_i < end; cpu_i++) {
total_latency += softirq_ebpf_vals[cpu_i].latency/1000;
}
softirq_vals[i].latency = total_latency;
+ memset(softirq_ebpf_vals, 0, length);
}
}
@@ -172,6 +190,7 @@ static void softirq_collector(ebpf_module_t *em)
heartbeat_init(&hb);
int update_every = em->update_every;
int counter = update_every - 1;
+ int maps_per_core = em->maps_per_core;
//This will be cancelled by its parent
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
@@ -179,7 +198,7 @@ static void softirq_collector(ebpf_module_t *em)
continue;
counter = 0;
- softirq_read_latency_map();
+ softirq_read_latency_map(maps_per_core);
pthread_mutex_lock(&lock);
// write dims now for all hitherto discovered IRQs.
@@ -212,6 +231,9 @@ void *ebpf_softirq_thread(void *ptr)
goto endsoftirq;
}
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
+#endif
em->probe_links = ebpf_load_program(ebpf_plugin_dir, em, running_on_kernel, isrh, &em->objects);
if (!em->probe_links) {
goto endsoftirq;
diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c
index 2352470a4..c9129a3fa 100644
--- a/collectors/ebpf.plugin/ebpf_swap.c
+++ b/collectors/ebpf.plugin/ebpf_swap.c
@@ -21,16 +21,32 @@ struct config swap_config = { .first_section = NULL,
static ebpf_local_maps_t swap_maps[] = {{.name = "tbl_pid_swap", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
.user_input = 0,
.type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
{.name = "swap_ctrl", .internal_input = NETDATA_CONTROLLER_END,
.user_input = 0,
.type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
{.name = "tbl_swap", .internal_input = NETDATA_SWAP_END,
.user_input = 0,
.type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = NULL, .internal_input = 0, .user_input = 0}};
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ }};
netdata_ebpf_targets_t swap_targets[] = { {.name = "swap_readpage", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = "swap_writepage", .mode = EBPF_LOAD_TRAMPOLINE},
@@ -133,17 +149,21 @@ static void ebpf_swap_set_hash_tables(struct swap_bpf *obj)
}
/**
- * Adjust Map Size
+ * Adjust Map
*
* Resize maps according input from users.
*
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*/
-static void ebpf_swap_adjust_map_size(struct swap_bpf *obj, ebpf_module_t *em)
+static void ebpf_swap_adjust_map(struct swap_bpf *obj, ebpf_module_t *em)
{
ebpf_update_map_size(obj->maps.tbl_pid_swap, &swap_maps[NETDATA_PID_SWAP_TABLE],
em, bpf_map__name(obj->maps.tbl_pid_swap));
+
+ ebpf_update_map_type(obj->maps.tbl_pid_swap, &swap_maps[NETDATA_PID_SWAP_TABLE]);
+ ebpf_update_map_type(obj->maps.tbl_swap, &swap_maps[NETDATA_SWAP_GLOBAL_TABLE]);
+ ebpf_update_map_type(obj->maps.swap_ctrl, &swap_maps[NETDATA_SWAP_CONTROLLER]);
}
/**
@@ -182,7 +202,7 @@ static inline int ebpf_swap_load_and_attach(struct swap_bpf *obj, ebpf_module_t
ebpf_swap_disable_trampoline(obj);
}
- ebpf_swap_adjust_map_size(obj, em);
+ ebpf_swap_adjust_map(obj, em);
if (!em->apps_charts && !em->cgroup_charts)
ebpf_swap_disable_release_task(obj);
@@ -251,10 +271,11 @@ static void ebpf_swap_exit(void *ptr)
* Sum all values read from kernel and store in the first address.
*
* @param out the vector with read values.
+ * @param maps_per_core do I need to read all cores?
*/
-static void swap_apps_accumulator(netdata_publish_swap_t *out)
+static void swap_apps_accumulator(netdata_publish_swap_t *out, int maps_per_core)
{
- int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ int i, end = (maps_per_core) ? ebpf_nprocs : 1;
netdata_publish_swap_t *total = &out[0];
for (i = 1; i < end; i++) {
netdata_publish_swap_t *w = &out[i];
@@ -286,13 +307,17 @@ static void swap_fill_pid(uint32_t current_pid, netdata_publish_swap_t *publish)
* Update cgroup
*
* Update cgroup data based in
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_swap_cgroup()
+static void ebpf_update_swap_cgroup(int maps_per_core)
{
ebpf_cgroup_target_t *ect ;
netdata_publish_swap_t *cv = swap_vector;
int fd = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd;
- size_t length = sizeof(netdata_publish_swap_t)*ebpf_nprocs;
+ size_t length = sizeof(netdata_publish_swap_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
pthread_mutex_lock(&mutex_cgroup_shm);
for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
struct pid_on_target2 *pids;
@@ -306,9 +331,12 @@ static void ebpf_update_swap_cgroup()
} else {
memset(cv, 0, length);
if (!bpf_map_lookup_elem(fd, &pid, cv)) {
- swap_apps_accumulator(cv);
+ swap_apps_accumulator(cv, maps_per_core);
memcpy(out, cv, sizeof(netdata_publish_swap_t));
+
+ // We are cleaning to avoid passing data read from one process to other.
+ memset(cv, 0, length);
}
}
}
@@ -320,14 +348,18 @@ static void ebpf_update_swap_cgroup()
* Read APPS table
*
* Read the apps table and store data inside the structure.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void read_apps_table()
+static void read_swap_apps_table(int maps_per_core)
{
netdata_publish_swap_t *cv = swap_vector;
uint32_t key;
struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd;
- size_t length = sizeof(netdata_publish_swap_t)*ebpf_nprocs;
+ size_t length = sizeof(netdata_publish_swap_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
while (pids) {
key = pids->pid;
@@ -336,7 +368,7 @@ static void read_apps_table()
continue;
}
- swap_apps_accumulator(cv);
+ swap_apps_accumulator(cv, maps_per_core);
swap_fill_pid(key, cv);
@@ -365,8 +397,10 @@ static void swap_send_global()
* Read global counter
*
* Read the table with number of calls to all functions
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_swap_read_global_table()
+static void ebpf_swap_read_global_table(int maps_per_core)
{
netdata_idx_t *stored = swap_values;
netdata_idx_t *val = swap_hash_values;
@@ -376,7 +410,7 @@ static void ebpf_swap_read_global_table()
for (i = NETDATA_KEY_SWAP_READPAGE_CALL; i < end; i++) {
if (!bpf_map_lookup_elem(fd, &i, stored)) {
int j;
- int last = ebpf_nprocs;
+ int last = (maps_per_core) ? ebpf_nprocs : 1;
netdata_idx_t total = 0;
for (j = 0; j < last; j++)
total += stored[j];
@@ -646,6 +680,7 @@ static void swap_collector(ebpf_module_t *em)
heartbeat_t hb;
heartbeat_init(&hb);
int counter = update_every - 1;
+ int maps_per_core = em->maps_per_core;
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
if (ebpf_exit_plugin || ++counter != update_every)
@@ -653,13 +688,13 @@ static void swap_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_swap_read_global_table();
+ ebpf_swap_read_global_table(maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
- read_apps_table();
+ read_swap_apps_table(maps_per_core);
if (cgroup)
- ebpf_update_swap_cgroup();
+ ebpf_update_swap_cgroup(maps_per_core);
pthread_mutex_lock(&lock);
@@ -752,7 +787,7 @@ static void ebpf_create_swap_charts(int update_every)
EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_SWAP_SUBMENU,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
- 202,
+ NETDATA_CHART_PRIO_SYSTEM_SWAP_CALLS,
ebpf_create_global_dimension,
swap_publish_aggregated, NETDATA_SWAP_END,
update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
@@ -767,6 +802,10 @@ static void ebpf_create_swap_charts(int update_every)
*/
static int ebpf_swap_load_bpf(ebpf_module_t *em)
{
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
+#endif
+
int ret = 0;
ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_KEY_SWAP_READPAGE_CALL].mode);
if (em->load & EBPF_LOAD_LEGACY) {
diff --git a/collectors/ebpf.plugin/ebpf_sync.c b/collectors/ebpf.plugin/ebpf_sync.c
index f838b65af..66e9c742c 100644
--- a/collectors/ebpf.plugin/ebpf_sync.c
+++ b/collectors/ebpf.plugin/ebpf_sync.c
@@ -10,27 +10,95 @@ static netdata_publish_syscall_t sync_counter_publish_aggregated[NETDATA_SYNC_ID
static netdata_idx_t sync_hash_values[NETDATA_SYNC_IDX_END];
-static ebpf_local_maps_t sync_maps[] = {{.name = "tbl_sync", .internal_input = NETDATA_SYNC_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "tbl_syncfs", .internal_input = NETDATA_SYNC_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "tbl_msync", .internal_input = NETDATA_SYNC_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "tbl_fsync", .internal_input = NETDATA_SYNC_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "tbl_fdatasync", .internal_input = NETDATA_SYNC_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = "tbl_syncfr", .internal_input = NETDATA_SYNC_END,
- .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = NULL, .internal_input = 0, .user_input = 0,
- .type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+ebpf_local_maps_t sync_maps[] = {{.name = "tbl_sync", .internal_input = NETDATA_SYNC_END,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ }};
+
+ebpf_local_maps_t syncfs_maps[] = {{.name = "tbl_syncfs", .internal_input = NETDATA_SYNC_END,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ }};
+
+ebpf_local_maps_t msync_maps[] = {{.name = "tbl_msync", .internal_input = NETDATA_SYNC_END,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ }};
+
+ebpf_local_maps_t fsync_maps[] = {{.name = "tbl_fsync", .internal_input = NETDATA_SYNC_END,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ }};
+
+ebpf_local_maps_t fdatasync_maps[] = {{.name = "tbl_fdatasync", .internal_input = NETDATA_SYNC_END,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ }};
+
+ebpf_local_maps_t sync_file_range_maps[] = {{.name = "tbl_syncfr", .internal_input = NETDATA_SYNC_END,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ }};
struct config sync_config = { .first_section = NULL,
.last_section = NULL,
@@ -111,12 +179,12 @@ void ebpf_sync_disable_tracepoints(struct sync_bpf *obj, sync_syscalls_index_t i
*
* Set the values for maps according the value given by kernel.
*
- * @param obj is the main structure for bpf objects.
- * @param idx the index for the main structure
+ * @param map the map loaded.
+ * @param obj the main structure for bpf objects.
*/
-static void ebpf_sync_set_hash_tables(struct sync_bpf *obj, sync_syscalls_index_t idx)
+static void ebpf_sync_set_hash_tables(ebpf_local_maps_t *map, struct sync_bpf *obj)
{
- sync_maps[idx].map_fd = bpf_map__fd(obj->maps.tbl_sync);
+ map->map_fd = bpf_map__fd(obj->maps.tbl_sync);
}
/**
@@ -154,6 +222,8 @@ static inline int ebpf_sync_load_and_attach(struct sync_bpf *obj, ebpf_module_t
ebpf_sync_disable_tracepoints(obj, idx);
}
+ ebpf_update_map_type(obj->maps.tbl_sync, &em->maps[NETDATA_SYNC_GLOBAL_TABLE]);
+
int ret = sync_bpf__load(obj);
if (!ret) {
if (test != EBPF_LOAD_PROBE && test != EBPF_LOAD_RETPROBE) {
@@ -165,7 +235,7 @@ static inline int ebpf_sync_load_and_attach(struct sync_bpf *obj, ebpf_module_t
}
if (!ret)
- ebpf_sync_set_hash_tables(obj, idx);
+ ebpf_sync_set_hash_tables(&em->maps[NETDATA_SYNC_GLOBAL_TABLE], obj);
}
return ret;
@@ -264,11 +334,21 @@ static int ebpf_sync_load_legacy(ebpf_sync_syscalls_t *w, ebpf_module_t *em)
*/
static int ebpf_sync_initialize_syscall(ebpf_module_t *em)
{
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(sync_maps, em->maps_per_core, running_on_kernel);
+ ebpf_define_map_type(syncfs_maps, em->maps_per_core, running_on_kernel);
+ ebpf_define_map_type(msync_maps, em->maps_per_core, running_on_kernel);
+ ebpf_define_map_type(fsync_maps, em->maps_per_core, running_on_kernel);
+ ebpf_define_map_type(fdatasync_maps, em->maps_per_core, running_on_kernel);
+ ebpf_define_map_type(sync_file_range_maps, em->maps_per_core, running_on_kernel);
+#endif
+
int i;
const char *saved_name = em->thread_name;
int errors = 0;
for (i = 0; local_syscalls[i].syscall; i++) {
ebpf_sync_syscalls_t *w = &local_syscalls[i];
+ w->sync_maps = local_syscalls[i].sync_maps;
if (w->enabled) {
if (em->load & EBPF_LOAD_LEGACY) {
if (ebpf_sync_load_legacy(w, em))
@@ -317,17 +397,25 @@ static int ebpf_sync_initialize_syscall(ebpf_module_t *em)
* Read global table
*
* Read the table with number of calls for all functions
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_sync_read_global_table()
+static void ebpf_sync_read_global_table(int maps_per_core)
{
- netdata_idx_t stored;
+ netdata_idx_t stored[ebpf_nprocs];
uint32_t idx = NETDATA_SYNC_CALL;
int i;
for (i = 0; local_syscalls[i].syscall; i++) {
- if (local_syscalls[i].enabled) {
- int fd = sync_maps[i].map_fd;
+ ebpf_sync_syscalls_t *w = &local_syscalls[i];
+ if (w->enabled) {
+ int fd = w->sync_maps[NETDATA_SYNC_GLOBAL_TABLE].map_fd;
if (!bpf_map_lookup_elem(fd, &idx, &stored)) {
- sync_hash_values[i] = stored;
+ int j, end = (maps_per_core) ? ebpf_nprocs : 1;
+ netdata_idx_t total = 0;
+ for (j = 0; j < end ;j++ )
+ total += stored[j];
+
+ sync_hash_values[i] = total;
}
}
}
@@ -352,7 +440,7 @@ static void ebpf_send_sync_chart(char *id,
while (move && idx <= end) {
if (local_syscalls[idx].enabled)
- write_chart_dimension(move->name, sync_hash_values[idx]);
+ write_chart_dimension(move->name, (long long)sync_hash_values[idx]);
move = move->next;
idx++;
@@ -396,13 +484,14 @@ static void sync_collector(ebpf_module_t *em)
heartbeat_init(&hb);
int update_every = em->update_every;
int counter = update_every - 1;
+ int maps_per_core = em->maps_per_core;
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
if (ebpf_exit_plugin || ++counter != update_every)
continue;
counter = 0;
- ebpf_sync_read_global_table();
+ ebpf_sync_read_global_table(maps_per_core);
pthread_mutex_lock(&lock);
sync_send_data();
@@ -498,6 +587,22 @@ static void ebpf_sync_parse_syscalls()
}
/**
+ * Set sync maps
+ *
+ * When thread is initialized the variable sync_maps is set as null,
+ * this function fills the variable before to use.
+ */
+static void ebpf_set_sync_maps()
+{
+ local_syscalls[NETDATA_SYNC_SYNC_IDX].sync_maps = sync_maps;
+ local_syscalls[NETDATA_SYNC_SYNCFS_IDX].sync_maps = syncfs_maps;
+ local_syscalls[NETDATA_SYNC_MSYNC_IDX].sync_maps = msync_maps;
+ local_syscalls[NETDATA_SYNC_FSYNC_IDX].sync_maps = fsync_maps;
+ local_syscalls[NETDATA_SYNC_FDATASYNC_IDX].sync_maps = fdatasync_maps;
+ local_syscalls[NETDATA_SYNC_SYNC_FILE_RANGE_IDX].sync_maps = sync_file_range_maps;
+}
+
+/**
* Sync thread
*
* Thread used to make sync thread
@@ -513,6 +618,7 @@ void *ebpf_sync_thread(void *ptr)
ebpf_module_t *em = (ebpf_module_t *)ptr;
em->maps = sync_maps;
+ ebpf_set_sync_maps();
ebpf_sync_parse_syscalls();
#ifdef LIBBPF_MAJOR_VERSION
diff --git a/collectors/ebpf.plugin/ebpf_unittest.c b/collectors/ebpf.plugin/ebpf_unittest.c
new file mode 100644
index 000000000..3e1443ad3
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_unittest.c
@@ -0,0 +1,83 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ebpf_unittest.h"
+
+ebpf_module_t test_em;
+
+/**
+ * Initialize structure
+ *
+ * Initialize structure used to run unittests
+ */
+void ebpf_ut_initialize_structure(netdata_run_mode_t mode)
+{
+ memset(&test_em, 0, sizeof(ebpf_module_t));
+ test_em.thread_name = strdupz("process");
+ test_em.config_name = test_em.thread_name;
+ test_em.kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10 |
+ NETDATA_V5_14;
+ test_em.pid_map_size = ND_EBPF_DEFAULT_PID_SIZE;
+ test_em.apps_level = NETDATA_APPS_LEVEL_REAL_PARENT;
+ test_em.mode = mode;
+}
+
+/**
+ * Clean UP Memory
+ *
+ * Clean up allocated data during unit test;
+ */
+void ebpf_ut_cleanup_memory()
+{
+ freez((void *)test_em.thread_name);
+}
+
+/**
+ * Load Binary
+ *
+ * Test load of legacy eBPF programs.
+ *
+ * @return It returns 0 on success and -1 otherwise.
+ */
+static int ebpf_ut_load_binary()
+{
+ test_em.probe_links = ebpf_load_program(ebpf_plugin_dir, &test_em, running_on_kernel, isrh, &test_em.objects);
+ if (!test_em.probe_links)
+ return -1;
+
+ ebpf_unload_legacy_code(test_em.objects, test_em.probe_links);
+
+ return 0;
+}
+
+/**
+ * Load Real Binary
+ *
+ * Load an existent binary inside plugin directory.
+ *
+ * @return It returns 0 on success and -1 otherwise.
+ */
+int ebpf_ut_load_real_binary()
+{
+ return ebpf_ut_load_binary();
+}
+/**
+ * Load fake Binary
+ *
+ * Try to load a binary not generated by netdata.
+ *
+ * @return It returns 0 on success and -1 otherwise. The success for this function means we could work properly with
+ * expected fails.
+ */
+int ebpf_ut_load_fake_binary()
+{
+ const char *original = test_em.thread_name;
+
+ test_em.thread_name = strdupz("I_am_not_here");
+ int ret = ebpf_ut_load_binary();
+
+ ebpf_ut_cleanup_memory();
+
+ test_em.thread_name = original;
+
+ return !ret;
+}
diff --git a/collectors/ebpf.plugin/ebpf_unittest.h b/collectors/ebpf.plugin/ebpf_unittest.h
new file mode 100644
index 000000000..429cbe628
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_unittest.h
@@ -0,0 +1,10 @@
+#ifndef NETDATA_EBPF_PLUGIN_UNITTEST_H_
+# define NETDATA_EBPF_PLUGIN_UNITTEST_H_ 1
+
+#include "ebpf.h"
+
+void ebpf_ut_initialize_structure(netdata_run_mode_t mode);
+int ebpf_ut_load_real_binary();
+int ebpf_ut_load_fake_binary();
+void ebpf_ut_cleanup_memory();
+#endif
diff --git a/collectors/ebpf.plugin/ebpf_vfs.c b/collectors/ebpf.plugin/ebpf_vfs.c
index e2d87fd52..bfc7ee8f7 100644
--- a/collectors/ebpf.plugin/ebpf_vfs.c
+++ b/collectors/ebpf.plugin/ebpf_vfs.c
@@ -17,15 +17,31 @@ netdata_publish_vfs_t *vfs_vector = NULL;
static ebpf_local_maps_t vfs_maps[] = {{.name = "tbl_vfs_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
.user_input = 0, .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_HASH
+#endif
+ },
{.name = "tbl_vfs_stats", .internal_input = NETDATA_VFS_COUNTER,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
{.name = "vfs_ctrl", .internal_input = NETDATA_CONTROLLER_END,
.user_input = 0,
.type = NETDATA_EBPF_MAP_CONTROLLER,
- .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
- {.name = NULL, .internal_input = 0, .user_input = 0}};
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ },
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+#ifdef LIBBPF_MAJOR_VERSION
+ .map_type = BPF_MAP_TYPE_PERCPU_ARRAY
+#endif
+ }};
struct config vfs_config = { .first_section = NULL,
.last_section = NULL,
@@ -293,17 +309,21 @@ static int ebpf_vfs_attach_probe(struct vfs_bpf *obj)
}
/**
- * Adjust Map Size
+ * Adjust Size
*
* Resize maps according input from users.
*
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*/
-static void ebpf_vfs_adjust_map_size(struct vfs_bpf *obj, ebpf_module_t *em)
+static void ebpf_vfs_adjust_map(struct vfs_bpf *obj, ebpf_module_t *em)
{
ebpf_update_map_size(obj->maps.tbl_vfs_pid, &vfs_maps[NETDATA_VFS_PID],
em, bpf_map__name(obj->maps.tbl_vfs_pid));
+
+ ebpf_update_map_type(obj->maps.tbl_vfs_pid, &vfs_maps[NETDATA_VFS_PID]);
+ ebpf_update_map_type(obj->maps.tbl_vfs_stats, &vfs_maps[NETDATA_VFS_ALL]);
+ ebpf_update_map_type(obj->maps.vfs_ctrl, &vfs_maps[NETDATA_VFS_CTRL]);
}
/**
@@ -356,7 +376,7 @@ static inline int ebpf_vfs_load_and_attach(struct vfs_bpf *obj, ebpf_module_t *e
ebpf_vfs_disable_trampoline(obj);
}
- ebpf_vfs_adjust_map_size(obj, em);
+ ebpf_vfs_adjust_map(obj, em);
if (!em->apps_charts && !em->cgroup_charts)
ebpf_vfs_disable_release_task(obj);
@@ -475,23 +495,30 @@ static void ebpf_vfs_send_data(ebpf_module_t *em)
/**
* Read the hash table and store data to allocated vectors.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_vfs_read_global_table()
+static void ebpf_vfs_read_global_table(int maps_per_core)
{
uint64_t idx;
netdata_idx_t res[NETDATA_VFS_COUNTER];
netdata_idx_t *val = vfs_hash_values;
+ size_t length = sizeof(netdata_idx_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
+
int fd = vfs_maps[NETDATA_VFS_ALL].map_fd;
for (idx = 0; idx < NETDATA_VFS_COUNTER; idx++) {
uint64_t total = 0;
if (!bpf_map_lookup_elem(fd, &idx, val)) {
int i;
- int end = ebpf_nprocs;
+ int end = (maps_per_core) ? ebpf_nprocs : 1;
for (i = 0; i < end; i++)
total += val[i];
}
res[idx] = total;
+ memset(val, 0, length);
}
vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK].ncall = res[NETDATA_KEY_CALLS_VFS_UNLINK];
@@ -723,9 +750,9 @@ void ebpf_vfs_send_apps_data(ebpf_module_t *em, struct ebpf_target *root)
*
* @param out the vector with read values.
*/
-static void vfs_apps_accumulator(netdata_publish_vfs_t *out)
+static void vfs_apps_accumulator(netdata_publish_vfs_t *out, int maps_per_core)
{
- int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ int i, end = (maps_per_core) ? ebpf_nprocs : 1;
netdata_publish_vfs_t *total = &out[0];
for (i = 1; i < end; i++) {
netdata_publish_vfs_t *w = &out[i];
@@ -771,12 +798,15 @@ static void vfs_fill_pid(uint32_t current_pid, netdata_publish_vfs_t *publish)
/**
* Read the hash table and store data to allocated vectors.
*/
-static void ebpf_vfs_read_apps()
+static void ebpf_vfs_read_apps(int maps_per_core)
{
struct ebpf_pid_stat *pids = ebpf_root_of_pids;
netdata_publish_vfs_t *vv = vfs_vector;
int fd = vfs_maps[NETDATA_VFS_PID].map_fd;
- size_t length = sizeof(netdata_publish_vfs_t) * ebpf_nprocs;
+ size_t length = sizeof(netdata_publish_vfs_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
+
while (pids) {
uint32_t key = pids->pid;
@@ -785,7 +815,7 @@ static void ebpf_vfs_read_apps()
continue;
}
- vfs_apps_accumulator(vv);
+ vfs_apps_accumulator(vv, maps_per_core);
vfs_fill_pid(key, vv);
@@ -799,14 +829,18 @@ static void ebpf_vfs_read_apps()
/**
* Update cgroup
*
- * Update cgroup data based in
+ * Update cgroup data based in PID.
+ *
+ * @param maps_per_core do I need to read all cores?
*/
-static void read_update_vfs_cgroup()
+static void read_update_vfs_cgroup(int maps_per_core)
{
ebpf_cgroup_target_t *ect ;
netdata_publish_vfs_t *vv = vfs_vector;
int fd = vfs_maps[NETDATA_VFS_PID].map_fd;
- size_t length = sizeof(netdata_publish_vfs_t) * ebpf_nprocs;
+ size_t length = sizeof(netdata_publish_vfs_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
pthread_mutex_lock(&mutex_cgroup_shm);
for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
@@ -821,7 +855,7 @@ static void read_update_vfs_cgroup()
} else {
memset(vv, 0, length);
if (!bpf_map_lookup_elem(fd, &pid, vv)) {
- vfs_apps_accumulator(vv);
+ vfs_apps_accumulator(vv, maps_per_core);
memcpy(out, vv, sizeof(netdata_publish_vfs_t));
}
@@ -1458,6 +1492,7 @@ static void vfs_collector(ebpf_module_t *em)
heartbeat_init(&hb);
int update_every = em->update_every;
int counter = update_every - 1;
+ int maps_per_core = em->maps_per_core;
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
if (ebpf_exit_plugin || ++counter != update_every)
@@ -1465,21 +1500,21 @@ static void vfs_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_vfs_read_global_table();
+ ebpf_vfs_read_global_table(maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
- ebpf_vfs_read_apps();
+ ebpf_vfs_read_apps(maps_per_core);
+
+ if (cgroups)
+ read_update_vfs_cgroup(maps_per_core);
+
+ pthread_mutex_lock(&lock);
#ifdef NETDATA_DEV_MODE
if (ebpf_aral_vfs_pid)
ebpf_send_data_aral_chart(ebpf_aral_vfs_pid, em);
#endif
- if (cgroups)
- read_update_vfs_cgroup();
-
- pthread_mutex_lock(&lock);
-
ebpf_vfs_send_data(em);
fflush(stdout);
@@ -1843,6 +1878,10 @@ static void ebpf_vfs_allocate_global_vectors(int apps)
*/
static int ebpf_vfs_load_bpf(ebpf_module_t *em)
{
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_define_map_type(em->maps, em->maps_per_core, running_on_kernel);
+#endif
+
int ret = 0;
ebpf_adjust_apps_cgroup(em, em->targets[NETDATA_EBPF_VFS_WRITE].mode);
if (em->load & EBPF_LOAD_LEGACY) {
diff --git a/collectors/freeipmi.plugin/freeipmi_plugin.c b/collectors/freeipmi.plugin/freeipmi_plugin.c
index bcc5139f3..a2251891a 100644
--- a/collectors/freeipmi.plugin/freeipmi_plugin.c
+++ b/collectors/freeipmi.plugin/freeipmi_plugin.c
@@ -1833,6 +1833,21 @@ int main (int argc, char **argv) {
for(iteration = 0; 1 ; iteration++) {
usec_t dt = heartbeat_next(&hb, step);
+ if (iteration) {
+ if (iteration == 1) {
+ fprintf(
+ stdout,
+ "CHART netdata.freeipmi_availability_status '' 'Plugin availability status' 'status' plugins netdata.plugin_availability_status line 146000 %d\n"
+ "DIMENSION available '' absolute 1 1\n",
+ netdata_update_every);
+ }
+ fprintf(
+ stdout,
+ "BEGIN netdata.freeipmi_availability_status\n"
+ "SET available = 1\n"
+ "END\n");
+ }
+
if(debug && iteration)
fprintf(stderr, "freeipmi.plugin: iteration %zu, dt %llu usec, sensors collected %zu, sensors sent to netdata %zu \n"
, iteration
@@ -1852,6 +1867,11 @@ int main (int argc, char **argv) {
fflush(stdout);
// restart check (14400 seconds)
- if(now_monotonic_sec() - started_t > 14400) exit(0);
+ if (now_monotonic_sec() - started_t > 14400) {
+ fprintf(stdout, "EXIT\n");
+ fflush(stdout);
+ exit(0);
+ }
}
}
+
diff --git a/collectors/nfacct.plugin/README.md b/collectors/nfacct.plugin/README.md
index e8502236f..ae6597a40 100644
--- a/collectors/nfacct.plugin/README.md
+++ b/collectors/nfacct.plugin/README.md
@@ -13,6 +13,11 @@ learn_rel_path: "Integrations/Monitor/Networking"
## Prerequisites
+If you are using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), install the
+`netdata-plugin-nfacct` package using your system package manager.
+
+If you built Netdata locally:
+
1. install `libmnl-dev` and `libnetfilter-acct-dev` using the package manager of your system.
2. re-install Netdata from source. The installer will detect that the required libraries are now available and will also build `netdata.plugin`.
diff --git a/collectors/perf.plugin/README.md b/collectors/perf.plugin/README.md
index e519be9c4..a8bd4b0e5 100644
--- a/collectors/perf.plugin/README.md
+++ b/collectors/perf.plugin/README.md
@@ -14,6 +14,9 @@ the `perf_event_open()` system call.
## Important Notes
+If you are using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), you will need to install
+the `netdata-plugin-perf` package using your system package manager.
+
Accessing hardware PMUs requires root permissions, so the plugin is setuid to root.
Keep in mind that the number of PMUs in a system is usually quite limited and every hardware monitoring
diff --git a/collectors/plugins.d/plugins_d.c b/collectors/plugins.d/plugins_d.c
index dc13cd2ee..da5226a5c 100644
--- a/collectors/plugins.d/plugins_d.c
+++ b/collectors/plugins.d/plugins_d.c
@@ -72,7 +72,7 @@ static void pluginsd_worker_thread_cleanup(void *arg)
info("PLUGINSD: 'host:%s', waiting for data collection child process pid %d to exit...",
rrdhost_hostname(cd->host), pid);
- waitid(P_PID, (id_t)pid, &info, WEXITED);
+ netdata_waitid(P_PID, (id_t)pid, &info, WEXITED);
}
}
}
diff --git a/collectors/plugins.d/plugins_d.h b/collectors/plugins.d/plugins_d.h
index 68ed4940f..fe43a19f5 100644
--- a/collectors/plugins.d/plugins_d.h
+++ b/collectors/plugins.d/plugins_d.h
@@ -87,6 +87,7 @@ struct plugind {
extern struct plugind *pluginsd_root;
size_t pluginsd_process(RRDHOST *host, struct plugind *cd, FILE *fp_plugin_input, FILE *fp_plugin_output, int trust_durations);
+void pluginsd_process_thread_cleanup(void *ptr);
size_t pluginsd_initialize_plugin_directories();
diff --git a/collectors/plugins.d/pluginsd_parser.c b/collectors/plugins.d/pluginsd_parser.c
index 28fc0bd49..097e5ea60 100644
--- a/collectors/plugins.d/pluginsd_parser.c
+++ b/collectors/plugins.d/pluginsd_parser.c
@@ -11,10 +11,10 @@ static int send_to_plugin(const char *txt, void *data) {
return 0;
#ifdef ENABLE_HTTPS
- struct netdata_ssl *ssl = parser->ssl_output;
+ NETDATA_SSL *ssl = parser->ssl_output;
if(ssl) {
- if(ssl->conn && ssl->flags == NETDATA_SSL_HANDSHAKE_COMPLETE)
- return (int)netdata_ssl_write(ssl->conn, (void *)txt, strlen(txt));
+ if(SSL_connection(ssl))
+ return (int)netdata_ssl_write(ssl, (void *)txt, strlen(txt));
error("PLUGINSD: cannot send command (SSL)");
return -1;
@@ -108,11 +108,12 @@ void pluginsd_rrdset_cleanup(RRDSET *st) {
st->pluginsd.pos = 0;
}
-static inline void pluginsd_set_chart_from_parent(void *user, RRDSET *st, const char *keyword) {
+static inline void pluginsd_unlock_previous_chart(void *user, const char *keyword, bool stale) {
PARSER_USER_OBJECT *u = (PARSER_USER_OBJECT *) user;
if(unlikely(pluginsd_unlock_rrdset_data_collection(user))) {
- error("PLUGINSD: 'host:%s/chart:%s/' stale data collection lock found during %s; it has been unlocked",
+ if(stale)
+ error("PLUGINSD: 'host:%s/chart:%s/' stale data collection lock found during %s; it has been unlocked",
rrdhost_hostname(u->st->rrdhost), rrdset_id(u->st), keyword);
}
@@ -120,9 +121,16 @@ static inline void pluginsd_set_chart_from_parent(void *user, RRDSET *st, const
ml_chart_update_end(u->st);
u->v2.ml_locked = false;
- error("PLUGINSD: 'host:%s/chart:%s/' stale ML lock found during %s, it has been unlocked",
+ if(stale)
+ error("PLUGINSD: 'host:%s/chart:%s/' stale ML lock found during %s, it has been unlocked",
rrdhost_hostname(u->st->rrdhost), rrdset_id(u->st), keyword);
}
+}
+
+static inline void pluginsd_set_chart_from_parent(void *user, RRDSET *st, const char *keyword) {
+ PARSER_USER_OBJECT *u = (PARSER_USER_OBJECT *) user;
+
+ pluginsd_unlock_previous_chart(user, keyword, true);
if(st) {
size_t dims = dictionary_entries(st->rrddim_root_index);
@@ -1459,9 +1467,11 @@ PARSER_RC pluginsd_replay_end(char **words, size_t num_words, void *user)
time_t started = st->rrdhost->receiver->replication_first_time_t;
time_t current = ((PARSER_USER_OBJECT *) user)->replay.end_time;
- if(started && current > started)
+ if(started && current > started) {
+ host->rrdpush_receiver_replication_percent = (NETDATA_DOUBLE) (current - started) * 100.0 / (NETDATA_DOUBLE) (now - started);
worker_set_metric(WORKER_RECEIVER_JOB_REPLICATION_COMPLETION,
- (NETDATA_DOUBLE)(current - started) * 100.0 / (NETDATA_DOUBLE)(now - started));
+ host->rrdpush_receiver_replication_percent);
+ }
}
((PARSER_USER_OBJECT *) user)->replay.start_time = 0;
@@ -1501,7 +1511,8 @@ PARSER_RC pluginsd_replay_end(char **words, size_t num_words, void *user)
pluginsd_set_chart_from_parent(user, NULL, PLUGINSD_KEYWORD_REPLAY_END);
- worker_set_metric(WORKER_RECEIVER_JOB_REPLICATION_COMPLETION, 100.0);
+ host->rrdpush_receiver_replication_percent = 100.0;
+ worker_set_metric(WORKER_RECEIVER_JOB_REPLICATION_COMPLETION, host->rrdpush_receiver_replication_percent);
return PARSER_RC_OK;
}
@@ -1783,12 +1794,7 @@ PARSER_RC pluginsd_end_v2(char **words __maybe_unused, size_t num_words __maybe_
// ------------------------------------------------------------------------
// unblock data collection
- ml_chart_update_end(st);
- u->v2.ml_locked = false;
-
- timing_step(TIMING_STEP_END2_ML);
-
- pluginsd_unlock_rrdset_data_collection(user);
+ pluginsd_unlock_previous_chart(user, PLUGINSD_KEYWORD_END_V2, false);
rrdcontext_collected_rrdset(st);
store_metric_collection_completed();
@@ -1823,13 +1829,14 @@ PARSER_RC pluginsd_end_v2(char **words __maybe_unused, size_t num_words __maybe_
return PARSER_RC_OK;
}
-static void pluginsd_process_thread_cleanup(void *ptr) {
+void pluginsd_process_thread_cleanup(void *ptr) {
PARSER *parser = (PARSER *)ptr;
pluginsd_cleanup_v2(parser->user);
pluginsd_host_define_cleanup(parser->user);
rrd_collector_finished();
+
parser_destroy(parser);
}
diff --git a/collectors/proc.plugin/proc_self_mountinfo.c b/collectors/proc.plugin/proc_self_mountinfo.c
index 0483749c3..194791603 100644
--- a/collectors/proc.plugin/proc_self_mountinfo.c
+++ b/collectors/proc.plugin/proc_self_mountinfo.c
@@ -360,6 +360,18 @@ struct mountinfo *mountinfo_read(int do_statvfs) {
else {
mi->st_dev = 0;
}
+
+ //try to detect devices with same minor and major modes. Within these,
+ //the larger mount point is considered a bind.
+ struct mountinfo *mt;
+ for(mt = root; mt; mt = mt->next) {
+ if(unlikely(mt->major == mi->major && mt->minor == mi->minor && !(mi->flags & MOUNTINFO_IS_BIND))) {
+ if(strlen(mi->root) < strlen(mt->root))
+ mt->flags |= MOUNTINFO_IS_BIND;
+ else
+ mi->flags |= MOUNTINFO_IS_BIND;
+ }
+ }
}
else {
mi->filesystem = NULL;
diff --git a/collectors/python.d.plugin/oracledb/README.md b/collectors/python.d.plugin/oracledb/README.md
index 722c77b75..315816de0 100644
--- a/collectors/python.d.plugin/oracledb/README.md
+++ b/collectors/python.d.plugin/oracledb/README.md
@@ -13,8 +13,7 @@ Monitors the performance and health metrics of the Oracle database.
## Requirements
-- `cx_Oracle` package.
-- Oracle Client (using `cx_Oracle` requires Oracle Client libraries to be installed).
+- `oracledb` package.
It produces following charts:
@@ -53,18 +52,13 @@ It produces following charts:
To use the Oracle module do the following:
-1. Install `cx_Oracle` package ([link](https://cx-oracle.readthedocs.io/en/latest/user_guide/installation.html)).
+1. Install `oracledb` package ([link](https://python-oracledb.readthedocs.io/en/latest/user_guide/installation.html)).
-2. Install Oracle Client libraries
- ([link](https://cx-oracle.readthedocs.io/en/latest/user_guide/installation.html#install-oracle-client)).
-
-3. Create a read-only `netdata` user with proper access to your Oracle Database Server.
+2. Create a read-only `netdata` user with proper access to your Oracle Database Server.
Connect to your Oracle database with an administrative user and execute:
-```
-ALTER SESSION SET "_ORACLE_SCRIPT"=true;
-
+```SQL
CREATE USER netdata IDENTIFIED BY <PASSWORD>;
GRANT CONNECT TO netdata;
@@ -88,6 +82,7 @@ local:
server: 'localhost:1521'
service: 'XE'
+
remote:
user: 'netdata'
password: 'secret'
diff --git a/collectors/python.d.plugin/oracledb/oracledb.chart.py b/collectors/python.d.plugin/oracledb/oracledb.chart.py
index 28ef8db10..455cf270e 100644
--- a/collectors/python.d.plugin/oracledb/oracledb.chart.py
+++ b/collectors/python.d.plugin/oracledb/oracledb.chart.py
@@ -8,11 +8,18 @@ from copy import deepcopy
from bases.FrameworkServices.SimpleService import SimpleService
try:
- import cx_Oracle
+ import oracledb as cx_Oracle
- HAS_ORACLE = True
+ HAS_ORACLE_NEW = True
+ HAS_ORACLE_OLD = False
except ImportError:
- HAS_ORACLE = False
+ HAS_ORACLE_NEW = False
+ try:
+ import cx_Oracle
+
+ HAS_ORACLE_OLD = True
+ except ImportError:
+ HAS_ORACLE_OLD = False
ORDER = [
'session_count',
@@ -187,7 +194,7 @@ CHARTS = {
},
}
-CX_CONNECT_STRING = "{0}/{1}@//{2}/{3}"
+CX_CONNECT_STRING_OLD = "{0}/{1}@//{2}/{3}"
QUERY_SYSTEM = '''
SELECT
@@ -322,6 +329,7 @@ class Service(SimpleService):
self.password = configuration.get('password')
self.server = configuration.get('server')
self.service = configuration.get('service')
+ self.protocol = configuration.get('protocol', 'tcps')
self.alive = False
self.conn = None
self.active_tablespaces = set()
@@ -330,18 +338,25 @@ class Service(SimpleService):
if self.conn:
self.conn.close()
self.conn = None
-
- try:
- self.conn = cx_Oracle.connect(
- CX_CONNECT_STRING.format(
- self.user,
- self.password,
- self.server,
- self.service,
- ))
- except cx_Oracle.DatabaseError as error:
- self.error(error)
- return False
+ if HAS_ORACLE_NEW:
+ try:
+ self.conn = cx_Oracle.connect(
+ f'{self.user}/{self.password}@{self.protocol}://{self.server}/{self.service}')
+ except cx_Oracle.DatabaseError as error:
+ self.error(error)
+ return False
+ else:
+ try:
+ self.conn = cx_Oracle.connect(
+ CX_CONNECT_STRING_OLD.format(
+ self.user,
+ self.password,
+ self.server,
+ self.service,
+ ))
+ except cx_Oracle.DatabaseError as error:
+ self.error(error)
+ return False
self.alive = True
return True
@@ -350,15 +365,15 @@ class Service(SimpleService):
return self.connect()
def check(self):
- if not HAS_ORACLE:
- self.error("'cx_Oracle' package is needed to use oracledb module")
+ if not HAS_ORACLE_NEW and not HAS_ORACLE_OLD:
+ self.error("'oracledb' package is needed to use oracledb module")
return False
if not all([
self.user,
self.password,
self.server,
- self.service,
+ self.service
]):
self.error("one of these parameters is not specified: user, password, server, service")
return False
@@ -812,7 +827,7 @@ class Service(SimpleService):
'absolute',
1,
1000,
- ])
+ ])
self.charts['allocated_usage'].add_dimension(
[
'{0}_allocated_used'.format(name),
@@ -820,7 +835,7 @@ class Service(SimpleService):
'absolute',
1,
1000,
- ])
+ ])
self.charts['allocated_usage_in_percent'].add_dimension(
[
'{0}_allocated_used_in_percent'.format(name),
diff --git a/collectors/python.d.plugin/oracledb/oracledb.conf b/collectors/python.d.plugin/oracledb/oracledb.conf
index 625717299..027215dad 100644
--- a/collectors/python.d.plugin/oracledb/oracledb.conf
+++ b/collectors/python.d.plugin/oracledb/oracledb.conf
@@ -63,9 +63,11 @@
#
# user: username # the username for the user account. Required.
# password: password # the password for the user account. Required.
-# server: localhost:1521 # the IP address or hostname of the Oracle Database Server. Required.
+# server: localhost:1521 # the IP address or hostname (and port) of the Oracle Database Server. Required.
# service: XE # the Oracle Database service name. Required. To view the services available on your server,
-# run this query: `SELECT value FROM v$parameter WHERE name='service_names'`.
+# run this query: `select SERVICE_NAME from gv$session where sid in (select sid from V$MYSTAT)`.
+# protocol: tcp/tcps # one of the strings "tcp" or "tcps" indicating whether to use unencrypted network traffic
+# or encrypted network traffic
#
# ----------------------------------------------------------------------
# AUTO-DETECTION JOBS
@@ -76,9 +78,11 @@
# password: 'secret'
# server: 'localhost:1521'
# service: 'XE'
+# protocol: 'tcps'
#remote:
# user: 'netdata'
# password: 'secret'
# server: '10.0.0.1:1521'
-# service: 'XE' \ No newline at end of file
+# service: 'XE'
+# protocol: 'tcps'
diff --git a/collectors/python.d.plugin/smartd_log/smartd_log.conf b/collectors/python.d.plugin/smartd_log/smartd_log.conf
index 6c01d953b..3e81317f1 100644
--- a/collectors/python.d.plugin/smartd_log/smartd_log.conf
+++ b/collectors/python.d.plugin/smartd_log/smartd_log.conf
@@ -63,6 +63,7 @@
#
# log_path: '/path/to/smartd_logs' # path to smartd log files. Default is /var/log/smartd
# exclude_disks: 'PATTERN1 PATTERN2' # space separated patterns. If the pattern is in the drive name, the module will not collect data for it.
+# age: 30 # time in minutes since the last dump to file. If smartd has not dumped data within this time the job exits.
#
# ----------------------------------------------------------------------
diff --git a/collectors/python.d.plugin/tor/tor.chart.py b/collectors/python.d.plugin/tor/tor.chart.py
index 8dc021a63..f7bc2d79b 100644
--- a/collectors/python.d.plugin/tor/tor.chart.py
+++ b/collectors/python.d.plugin/tor/tor.chart.py
@@ -17,6 +17,7 @@ except ImportError:
STEM_AVAILABLE = False
DEF_PORT = 'default'
+DEF_ADDR = '127.0.0.1'
ORDER = [
'traffic',
@@ -41,6 +42,7 @@ class Service(SimpleService):
self.order = ORDER
self.definitions = CHARTS
self.port = self.configuration.get('control_port', DEF_PORT)
+ self.addr = self.configuration.get('control_addr', DEF_ADDR)
self.password = self.configuration.get('password')
self.use_socket = isinstance(self.port, str) and self.port != DEF_PORT and not self.port.isdigit()
self.conn = None
@@ -78,7 +80,7 @@ class Service(SimpleService):
def connect_via_port(self):
try:
- self.conn = stem.control.Controller.from_port(port=self.port)
+ self.conn = stem.control.Controller.from_port(address=self.addr, port=self.port)
except (stem.SocketError, ValueError) as error:
self.error(error)
diff --git a/collectors/python.d.plugin/tor/tor.conf b/collectors/python.d.plugin/tor/tor.conf
index bf09b21fe..c7c98dc0b 100644
--- a/collectors/python.d.plugin/tor/tor.conf
+++ b/collectors/python.d.plugin/tor/tor.conf
@@ -61,6 +61,7 @@
#
# Additionally to the above, tor plugin also supports the following:
#
+# control_addr: 'address' # tor control IP address (defaults to '127.0.0.1')
# control_port: 'port' # tor control port
# password: 'password' # tor control password
#
@@ -71,6 +72,7 @@
# local_tcp:
# name: 'local'
# control_port: 9051
+# control_addr: 127.0.0.1
# password: <password>
#
# local_socket:
diff --git a/collectors/slabinfo.plugin/README.md b/collectors/slabinfo.plugin/README.md
index e0abaff80..abcbe1e3f 100644
--- a/collectors/slabinfo.plugin/README.md
+++ b/collectors/slabinfo.plugin/README.md
@@ -18,6 +18,9 @@ Each internal structure (process, file descriptor, inode...) is stored within a
The plugin is disabled by default because it collects and displays a huge amount of metrics.
To enable it set `slabinfo = yes` in the `plugins` section of the `netdata.conf` configuration file.
+If you are using [our official native DEB/RPM packages](https://github.com/netdata/netdata/blob/master/packaging/installer/methods/packages.md), you will additionally need to install the `netdata-plugin-slabinfo`
+package using your system package manager.
+
There is currently no configuration needed for the plugin itself.
As `/proc/slabinfo` is only readable by root, this plugin is setuid root.
diff --git a/collectors/tc.plugin/plugin_tc.c b/collectors/tc.plugin/plugin_tc.c
index b7e493b69..b833fd3c2 100644
--- a/collectors/tc.plugin/plugin_tc.c
+++ b/collectors/tc.plugin/plugin_tc.c
@@ -864,7 +864,7 @@ static void tc_main_cleanup(void *ptr) {
siginfo_t info;
collector_info("TC: waiting for tc plugin child process pid %d to exit...", tc_child_pid);
- waitid(P_PID, (id_t) tc_child_pid, &info, WEXITED);
+ netdata_waitid(P_PID, (id_t) tc_child_pid, &info, WEXITED);
}
tc_child_pid = 0;
diff --git a/config.cmake.h.in b/config.cmake.h.in
index e0faaa4cf..c1bebc1ba 100644
--- a/config.cmake.h.in
+++ b/config.cmake.h.in
@@ -35,7 +35,6 @@
#cmakedefine ENABLE_PROMETHEUS_REMOTE_WRITE
/* they are defined as REQUIRED in CMakeLists.txt */
-#define NETDATA_WITH_ZLIB 1
#define ENABLE_JSONC 1
#cmakedefine ENABLE_ML
diff --git a/configs.signatures b/configs.signatures
deleted file mode 100644
index 994b62a80..000000000
--- a/configs.signatures
+++ /dev/null
@@ -1,764 +0,0 @@
-declare -A configs_signatures=(
- ['00049600c2f9237e6c46b8b0f703c13c']='health.d/bcache.conf'
- ['00403e687213f3b7db9bf4563a5a92cc']='python.d/isc_dhcpd.conf'
- ['0056936ce99788ed9ae1c611c87aa6d8']='apps_groups.conf'
- ['007fc019fb32e952b509d455c016a002']='health.d/tcp_resets.conf'
- ['0083884a6cec6a48ee61665fbe131142']='charts.d/sensors.conf'
- ['0102351817595a85d01ebd54a5f2f36b']='python.d/ovpn_status_log.conf'
- ['01302e01162d465614276de43fad7546']='python.d.conf'
- ['0147c7e8f8f57e37c5dade4e8aacacf9']='python.d/example.conf'
- ['017036c1dc32c9312b2704b839bd078f']='python.d/haproxy.conf'
- ['01c54057e0ca55b5bb49df1662d6b8c3']='python.d/web_log.conf'
- ['024f4a6a431bcbc6acdb4184aa9661f3']='python.d/httpcheck.conf'
- ['02fa10fa85ab88e9723998de48d1aca0']='health.d/disks.conf'
- ['0314f0f1f88773c0ed9e9a908335e7ca']='health.d/tcp_mem.conf'
- ['032ee2b3b6cb200bfdf1a0698c2457d6']='health.d/boinc.conf'
- ['03510c8b3a2a6e8535320cfb9ebef06a']='python.d/httpcheck.conf'
- ['036dc300bd7b0e0ef229b9822686d63e']='python.d/isc_dhcpd.conf'
- ['0388b873d0d7e47c19005b7241db77d8']='python.d/tomcat.conf'
- ['04138a3d8e907c75329fe60ce2e27c1c']='health.d/tcp_resets.conf'
- ['0433d98a19d3b08e6f13884e46d39b47']='health.d/disks.conf'
- ['043f0a35dde85837fabeb85b990a41c1']='health.d/swap.conf'
- ['044496086420b531487d3c57600ca673']='apps_groups.conf'
- ['0529b679d3c0e7e6332753c7f6484731']='health.d/net.conf'
- ['054a2eece27ee2f5928b8167f5989b65']='python.d/dockerd.conf'
- ['057d12aaff0467e64529e839a258806b']='health.d/entropy.conf'
- ['05809c6662ba39f19cbec90234433d62']='health_alarm_notify.conf'
- ['059d98d0c562e1c81653d1e64673deab']='python.d/web_log.conf'
- ['05a8f39f134850c1e8d6267dbe706273']='health.d/web_log.conf'
- ['061c45b0e34170d357e47883166ecf40']='python.d/nginx.conf'
- ['06f055543a6b4d038d92c226952d777f']='health.d/isc_dhcpd.conf'
- ['074d618a7e9c72f9bdfda7611e01e0ca']='python.d/redis.conf'
- ['074df527cc70b5f38c0714f08f20e57c']='health.d/apache.conf'
- ['0787e67357804b934d2866f1b7c60b14']='health.d/ipc.conf'
- ['08042325ab27256b938575deafee8ecf']='python.d/nginx.conf'
- ['0847d54a7a0c7e0381c52e9d4d3fa7db']='health.d/mdstat.conf'
- ['084ee72d64760f2641b0720e79c922f3']='health.d/cpu.conf'
- ['0856124b1eecf01681b4fdf4e21efb3f']='health.d/net.conf'
- ['0862e7cf3d32ef48795702c6aefd27e0']='python.d/fail2ban.conf'
- ['08de9ae0765a4161abe24c09e47b3454']='python.d/go_expvar.conf'
- ['08ff5218f938fc48e09e718821169d14']='health.d/redis.conf'
- ['091572888425bc3b8b559c3c53367ec7']='apps_groups.conf'
- ['09225283977a6584f8063016091cc4f2']='health.d/tcp_resets.conf'
- ['09264cec953ae1c4c2985e6446abb386']='health.d/mysql.conf'
- ['093540fdc2a228e976ce5d48a3adf9fc']='health.d/disks.conf'
- ['09e030d26be08a13fa3560e47fa27825']='apps_groups.conf'
- ['0a5bc649295ba08f7e22e175901c1380']='python.d/unbound.conf'
- ['0a7039ecc7a86b480d9d499b12b02763']='python.d/freeradius.conf'
- ['0ad10fa896346202aee99384b0ec968d']='health.d/cpu.conf'
- ['0b6903f981cdb018c17802ac4e295609']='health.d/btrfs.conf'
- ['0bd66be0e8d99abc3a1d816036343f0a']='health_alarm_notify.conf'
- ['0c5e0fa364d7bdf7c16e8459a0544572']='health.d/netfilter.conf'
- ['0cd4e1fb57497e4d4c2451a9e58f724d']='python.d/redis.conf'
- ['0d29fe9919a2975107db1f2583344e7a']='health.d/mdstat.conf'
- ['0dd38dcd2473ddb9f8b1b41147432d10']='health_alarm_notify.conf'
- ['0e59bc11d0a869ea0247c04c08c8d72e']='python.d/ipfs.conf'
- ['0ee63c201892b21abc8bf6c712e815e3']='python.d/mysql.conf'
- ['0ef8af1f358741afa7fd5d0ffabefaac']='charts.d/mysql.conf'
- ['0f65b08edebedd06e376274021196a6b']='health.d/lighttpd.conf'
- ['0ff6d725acde27a53de4646d69e9e3d1']='health.d/net.conf'
- ['107de0cfeafcb6ab22fe7dd4a25d200d']='health.d/udp_errors.conf'
- ['107e6ac69b30fb9837ac64c35f891ec7']='health.d/tcp_resets.conf'
- ['10ac8106a109fdabdcc0405e9f43dbe1']='charts.d/mem_apps.conf'
- ['10c3b525850a1cb9de760a8ee96fbc6e']='charts.d/opensips.conf'
- ['1112c848ef91ebb9c622020d09712d67']='health.d/net.conf'
- ['111401c47f94015cd12ad0b8a4393c4f']='health.d/softnet.conf'
- ['111ead4b350593dd69b6f7ac0307b49b']='python.d/httpcheck.conf'
- ['12a4c7803ae79506a14ea784fea60dce']='health.d/net.conf'
- ['12d27b9f4d1696c2d49a77ed71d68e6f']='python.d/w1sensor.conf'
- ['12e57bea1127933a4fe49ce2e9674f4d']='statsd.d/example.conf'
- ['13141998a5d71308d9c119834c27bfd3']='python.d.conf'
- ['13ccf65fd879795f0fcea89ade27c2d0']='health.d/swap.conf'
- ['13e861a3d2f3075de883994ab54df658']='health.d/megacli.conf'
- ['1423e4f8c25a66c316be37da0d5c9c54']='health.d/btrfs.conf'
- ['142a5b693d34b0308bb0b8aec71fad79']='python.d/postfix.conf'
- ['14783e051650442ec9e2ed38d81d667e']='charts.d/exim.conf'
- ['156fe032bfd9da822060d0f515b326d9']='health.d/isc_dhcpd.conf'
- ['15d8401b56a74120f9f832873ec9c578']='health.d/postgres.conf'
- ['15e32114994b92be7853b88091e7c6fb']='python.d/exim.conf'
- ['167a2ce21035ac6b5a8720c7c2b4413c']='health.d/web_log.conf'
- ['174c21a6ce5de97bda83d502aa47a9f8']='health.d/apache.conf'
- ['17555c7418c801ceb6c93adbe485d6f9']='apps_groups.conf'
- ['178281aa2241d4a3e6b798bb9c4ae577']='python.d/haproxy.conf'
- ['17dc745a76ab4c37ee31a3224f644fc1']='charts.d/postfix.conf'
- ['18710ef6523cef8630d644ab270bfe02']='health.d/varnish.conf'
- ['18c46f55d45a17d60c72877807d9a3d2']='health.d/udp_errors.conf'
- ['18ee1c6197a4381b1c1631ef6129824f']='apps_groups.conf'
- ['1972e48345e6c3f0d65f94a03317622b']='health_alarm_notify.conf'
- ['1bc518219377499d1d05d6ee770f1a87']='python.d/go_expvar.conf'
- ['1be2d92f2934601e18e6d709590569b7']='charts.d/apcupsd.conf'
- ['1c12b678ab65f271a96da1bbd0a1ab1c']='health.d/softnet.conf'
- ['1c3168c95b53e999df3d45162b3f50b8']='health.d/fping.conf'
- ['1c71a8792c5c0ed035dd97af93a04838']='health_alarm_notify.conf'
- ['1d6efba856acaaaf3b50bc6d66611b92']='python.d/web_log.conf'
- ['1e09f326178acf07d361c08a44d8b1f3']='python.d/rabbitmq.conf'
- ['1e0bc6a0ff701d16225383e5de76585b']='python.d/spigotmc.conf'
- ['1ea8e8ef1fa8a3a0fcdfba236f4cb195']='python.d/mysql.conf'
- ['1eb0bc80934a3166fcde4d153c476d14']='health.d/fping.conf'
- ['1ef0fd38e7969c023bc3fa6d89eaf6d6']='python.d/mdstat.conf'
- ['1f43a9a820c02e0525de594299b55b15']='python.d.conf'
- ['1f5545b3ff52b3eb75ee05401f67a9bc']='fping.conf'
- ['1fa47f32ab52a22f8e7087cae85dd25e']='health.d/net.conf'
- ['203678a5be09d65993dcb3a9d475d187']='health.d/ipfs.conf'
- ['20be73f473e59bc7de1fe61d53466aba']='health.d/ram.conf'
- ['21913b96f540333094a972614f5da8f1']='charts.d/tomcat.conf'
- ['21924a6ab8008d16ffac340f226ebad9']='python.d/nginx.conf'
- ['219c5bb81965fa17d4940d4aa343c282']='health.d/mysql.conf'
- ['225792e33ddeea72992ffa5ab36d505f']='python.d/ntpd.conf'
- ['22952dbf42647c583b005054b23b545f']='health.d/disks.conf'
- ['22ceb822983134a7ca67343241f30341']='health.d/disks.conf'
- ['2320314191d0f8e7548b9273b77ac5e3']='apps_groups.conf'
- ['2385e5d35b440619621c4af62492d91b']='health.d/disks.conf'
- ['23989e04f9c7694b7eab646f8949cd52']='python.d/portcheck.conf'
- ['23a5afe5260a7ad388e447709cb009df']='python.d/web_log.conf'
- ['23ae815aefa221b1929f96752a1f7556']='health.d/squid.conf'
- ['243503ceee1d5b4e1e55a28768a116ae']='health.d/net.conf'
- ['2472e49550326f7142e2c425ccbca005']='health.d/softnet.conf'
- ['24d02e4086fd60943c45d8de2e52a4fb']='python.d/springboot.conf'
- ['254de8ec49602bea2da3631676d7cfec']='health.d/cpu.conf'
- ['256a7f06f7e579a61752fc64418cffe5']='charts.d/nut.conf'
- ['25a35a7c3c6092a839865e9be250c024']='health.d/ram.conf'
- ['262f98b3d88b98978cb08d566ce85a9d']='charts.d/squid.conf'
- ['27a1dbd43abc7394dcd72efe797ee9af']='python.d.conf'
- ['2827de41cf34a91b7a8e4d8724f59668']='health.d/net.conf'
- ['28df44a90e8ea4c6156314c03e88bf44']='health.d/softnet.conf'
- ['292c6cbbb5c819bb91f87c02a45890c1']='health.d/swap.conf'
- ['29485dc362202095d3d80e4f744d0538']='health_alarm_notify.conf'
- ['297160ae7ee01a547ed14f857b4f2c8d']='health.d/memcached.conf'
- ['298504f331c55dff4055321ff3a7b5cc']='health.d/web_log.conf'
- ['29c37d59e8801dffd18617738c1b4b71']='python.d.conf'
- ['29f97e10b92333790fbe0d2a3617b736']='health_alarm_notify.conf'
- ['2a0794fd43eadf30a51805bc9ba2c64d']='python.d/hddtemp.conf'
- ['2acae80dbdbe536a160f5b216bac84bc']='python.d/samba.conf'
- ['2ad55a5d1e885cf142849a78d4b00401']='health.d/net.conf'
- ['2b0106e89ce622da2869cb0d201246d1']='python.d/unbound.conf'
- ['2bbbebf52f84fd27fbefecd2a8a8076f']='health.d/memcached.conf'
- ['2c2b7e8df922b2bf121fb7db32bbc3bd']='health.d/udp_errors.conf'
- ['2d1d7498c72f4245cf32902c2b7e71e0']='health.d/entropy.conf'
- ['2da0a2e7117292ece11d69723a294bd7']='python.d/mongodb.conf'
- ['2ee5df033fe9c65a45566b6760b856e3']='python.d/web_log.conf'
- ['2f05e09b69ea20cda56d8f8b6fd3e86d']='health.d/couchdb.conf'
- ['2f13a6b7d11eda826ff26569b2a77080']='health.d/apcupsd.conf'
- ['2f3a8e33df83f14e0af8ca2465697215']='python.d/exim.conf'
- ['2f4a85fedecce1bf425fa1039f6b021e']='apps_groups.conf'
- ['2fa8fb929fd597f2ab97b6efc540a043']='health_alarm_notify.conf'
- ['307ac41f6c67fcf007d6f7135fac314c']='stream.conf'
- ['312b4b8e2805e19cf9be554b319567d6']='health.d/softnet.conf'
- ['31471ee7eb6cfbb412587a837ffcfe6f']='python.d.conf'
- ['3161290af7c1909768253e714ea2c3de']='python.d/ceph.conf'
- ['318bb45755726a25120bb33413d4b582']='health.d/net.conf'
- ['318db50a701442890c269ab547041e97']='health.d/tcp_orphans.conf'
- ['31e4058cfe0a01dd9ce4ae425fd7b4f1']='python.d/web_log.conf'
- ['322ec5e7095912221110623c9d7130cf']='health_alarm_notify.conf'
- ['325617412a628e3bc776e3fbb777a2a6']='health.d/redis.conf'
- ['326e1477131e0f73304711135f70a2a5']='health.d/memcached.conf'
- ['32fde0057c790964f2c743cb3c9aad29']='health.d/nginx.conf'
- ['33486497112127badc4c47ed2008969c']='python.d/freeradius.conf'
- ['33b135e28aeaef2b8224ba69a0fde245']='health.d/cpu.conf'
- ['343bc919a2fbc93f687f9d1339ec5f79']='health.d/net.conf'
- ['34f6cf10f8da18ddd7edd2a28a4fe5f9']='python.d/sensors.conf'
- ['35024ebd94542484c0866e6ee6b950cb']='health.d/net.conf'
- ['35ac63a2f08b2c6dd901c542629ae5df']='python.d/postgres.conf'
- ['35eb9785c844afd43fa7931915e2d566']='python.d/elasticsearch.conf'
- ['3634d5eddc46fb0d50cf47f370670c2c']='health.d/redis.conf'
- ['364b6e0081b116c9ec073b4d329a6dcc']='health_alarm_notify.conf'
- ['367d1463e520eb9dc89223bab161c6d1']='python.d/postgres.conf'
- ['36fdd55665cf10b0db164c2a0cca5e57']='health.d/qos.conf'
- ['373160658e7d5f1a129de397b9347365']='health.d/entropy.conf'
- ['373c1276dc9e65884ff2b26e1f08afe7']='health.d/named.conf'
- ['3798445a7faaf45c7a8047908678e690']='python.d/varnish.conf'
- ['37a5218f42e0ffd1becfb7db14cae568']='health.d/fronius.conf'
- ['37bc2b50ade9f334da4775dfea59f785']='python.d.conf'
- ['3807c37ac57046ae867e34dcfe6dbfd9']='health.d/httpcheck.conf'
- ['3848172053221b95279ba9bf789cd4e0']='health.d/apache.conf'
- ['3866efafd38e161136428d0f818cac43']='health.d/net.conf'
- ['38d1bf04fe9901481dd6febcc0404a86']='python.d.conf'
- ['392ab65af875a8daf0041113b1b40c2f']='python.d.conf'
- ['39304b2570611c3acb35b72762b46778']='charts.d/sensors.conf'
- ['394b7e91c97b7adb776460d309b335ff']='python.d/nginx.conf'
- ['39571e9fad9b759200c5d5b2ee13feb4']='python.d/redis.conf'
- ['39b65042cafdd9b849a44ec81aa66cac']='health_alarm_notify.conf'
- ['39f9422b0f0c3eec11a31aff79d89514']='health.d/retroshare.conf'
- ['3a04a3bc66c49d0c24f65a44fd9caa80']='python.d/postgres.conf'
- ['3a0f1f988d2111ba003199deca722d9b']='health_alarm_notify.conf'
- ['3a278ef6c66c122a407a0236c251119d']='python.d/nginx_plus.conf'
- ['3af522d65b50a5e447607ffb28c81ff5']='apps_groups.conf'
- ['3b1bfa40a4ff6a200bb2fc00bc51a664']='apps_groups.conf'
- ['3b535da82cf2ff53e03d735d02fb2357']='python.d/squid.conf'
- ['3bc2776623889744a98178bad6fb3b79']='health.d/disks.conf'
- ['3bc2c4423b19779d49ee7935b2ea1431']='health.d/stiebeleltron.conf'
- ['3bc65e997ab59b9de390fdf63d77f5e1']='python.d/postgres.conf'
- ['3c60691eb05d4d5bf78a41ed46303bb6']='python.d.conf'
- ['3c9c47163e9d4dbcb0079b6232398f2f']='apps_groups.conf'
- ['3ca696189911fb38a0319ddd71e9a395']='python.d/phpfpm.conf'
- ['3cc6255457d4cba881ae0554ae5d9190']='health.d/squid.conf'
- ['3d974ac9fdaa44d4527d6503bec35e34']='stream.conf'
- ['3d9b33da0f40c2ceecd006ddfd44fd14']='python.d.conf'
- ['3f170e3343cd784983b019163393f5af']='health.d/nginx.conf'
- ['3f7b669fde5c63bd55cb6dd88866d306']='python.d/ceph.conf'
- ['3fbe85671efd5d07e51584ab8262b48b']='health.d/tcp_listen.conf'
- ['3fc45cc18e884c22482524dff6d27833']='python.d/hddtemp.conf'
- ['3fcc3c449ce8e0388f9c23ca07cab608']='health.d/backend.conf'
- ['40225ee41bc3a85ce9b7f7af4d90e3e9']='charts.d/cpu_apps.conf'
- ['4063a01bffb43b0423425d1ba3004967']='health.d/tcp_resets.conf'
- ['41fa6bb109763561be59d7bcd07bbe82']='python.d/dnsdist.conf'
- ['421d5dc6c2fce22d0816b6e6363bea57']='python.d/hddtemp.conf'
- ['42ad0e70b1365b6e7244cc305dbaa529']='health_alarm_notify.conf'
- ['42bf1c7c64ed77038a0aa094d792a9e2']='python.d/mysql.conf'
- ['4332dee96e4f38fc73c962df3494ab7c']='health_alarm_notify.conf'
- ['43739017b6195a6abec14a70fe0df224']='python.d/rethinkdbs.conf'
- ['43ebb7f224c3b232d8ad044d7e9508b6']='health.d/net.conf'
- ['43ef8c1e77054f53f9be9f381eb6cd67']='python.d/portcheck.conf'
- ['4401f0c6a101d35d2cb833e7b0aeb421']='health.d/qos.conf'
- ['444e20cf75e2cd019e8d412d5d1f4a7f']='charts.d/cpu_apps.conf'
- ['4461bfacf9a3da47770fb3ca31f4c91f']='health.d/net.conf'
- ['450667c552ab7a7d8d4a2c214fdacca5']='health.d/entropy.conf'
- ['459e57e6acb389f4243f695a1e53ab2b']='health.d/boinc.conf'
- ['45a77ac36ba9f1898144b902de17204b']='health.d/memcached.conf'
- ['46798cda21e1a5faa769abf4e5d27c48']='health.d/disks.conf'
- ['46dfa2b6a7e7c76532e00c1344d5d171']='python.d/logind.conf'
- ['46ef6c1b638e40a7dfd62defdc5f99a3']='health.d/retroshare.conf'
- ['47180421d580baeaedf8c0ef3d647fb5']='python.d/hddtemp.conf'
- ['48195c5c8c0476a49b714b4c76bdb570']='python.d/squid.conf'
- ['48eef63bcf744bae114b502b6dacb4a1']='charts.d/phpfpm.conf'
- ['4960852f8951b54ca2fe10065752143e']='python.d.conf'
- ['4a448831776de8acf2e0bdc4cc994cb4']='apps_groups.conf'
- ['4aba3b6a28ccd75faf5326aca997ee0d']='health_alarm_notify.conf'
- ['4b775fb31342f1478b3773d041a72911']='python.d.conf'
- ['4ccb06fff1ce06dc5bc80e0a9f568f6e']='charts.d.conf'
- ['4cd585f5dfdacaf287413ad037b4e60a']='apps_groups.conf'
- ['4d13684cadfa90e73ab465409bf7263b']='health.d/mysql.conf'
- ['4d91ee6fe4c887ea3865ef36ac63da3c']='health.d/mysql.conf'
- ['4da1c0f009d87995ed66d84fae07f09a']='health.d/memory.conf'
- ['4dee2390e0bc89938dafa34a390dcf36']='charts.d/squid.conf'
- ['4e07ea46dd54eb0bbb4f1c0982a71973']='python.d/cpuidle.conf'
- ['4e37502fdf1944d094dd8be1e1f5e9e6']='health.d/cpu.conf'
- ['4e59e91d800059183028bbb44cf5afd2']='health.d/httpcheck.conf'
- ['4e995acb0d6fd77403a2a9dca984b55b']='charts.d.conf'
- ['4f6a5b47a13f5912cc89e9286701dd08']='health.d/redis.conf'
- ['4f6f4d39c19d7d954f769d3f9d3b4da5']='health.d/memcached.conf'
- ['4fc3fa3dc89b789c8820ce109ea6e385']='python.d/httpcheck.conf'
- ['4fdf72784296326e0b46cb526a5d77a1']='python.d.conf'
- ['4fef19afccd9a591165b72f0b1a2ac2e']='python.d/freeradius.conf'
- ['501eb2484b459b410b3f792c2dbaa955']='health.d/swap.conf'
- ['5050b5963599f13ad5dc0263fa39a906']='python.d/fail2ban.conf'
- ['508771d8e4611a058991a1bc11039dea']='health.d/disks.conf'
- ['5120492fa26be3749192607f62dc05f8']='health.d/mdstat.conf'
- ['5271cf9fc0fd10915a9759add70f7d78']='health.d/swap.conf'
- ['5278ebbae19c60db600f0a119cb3664e']='python.d/apache.conf'
- ['52d230aff57850a5aacc4e0420fcd8f5']='python.d.conf'
- ['52d4131cf9df84e2550b1a5d899ec61d']='health.d/swap.conf'
- ['5306b64a1e6baacd9de721e1f56961a8']='health_alarm_notify.conf'
- ['53160707fdc6ce46c195b1b55bb0bcb1']='health.d/swap.conf'
- ['535e5113b07b0fc6f3abd59546c276f6']='charts.d.conf'
- ['5379cdc26d7725e2b0d688d785816cef']='python.d/mysql.conf'
- ['5452eccad2f220d1191411737f6f4b2b']='python.d/isc_dhcpd.conf'
- ['54614490a14e1a4b7b3d9fecb6b4cfa5']='python.d/exim.conf'
- ['547779cdc460a926980de1590294b96b']='health.d/softnet.conf'
- ['54c3934a03453453b8d7d0e8b84a7cd8']='health_alarm_notify.conf'
- ['5523c092be7d667b228c70aeda6f44eb']='stream.conf'
- ['55608bdd908a3806df1468f6ee318b2b']='health.d/qos.conf'
- ['5598b83e915e31f68027afe324a427cd']='apps_groups.conf'
- ['55cc7e3fe365a77f8e92d01d7a428276']='health.d/ram.conf'
- ['56324751bae48308f155c079ee7ed43f']='python.d/megacli.conf'
- ['565f11c38ae6bd5cc9d3c2adb542bc1b']='health.d/softnet.conf'
- ['5664a814f9351b55da76edd472169a73']='health_alarm_notify.conf'
- ['56b689031cdcf138064825f31474b37d']='apps_groups.conf'
- ['56d072c4756b898d0c91f143b89e366b']='python.d.conf'
- ['573398335c0c71c075fa57f702bce287']='health.d/disks.conf'
- ['579c7c41c756745f57afd11c94a879d7']='python.d.conf'
- ['57be306944cb09b7f024079728fd04b9']='apps_groups.conf'
- ['5829812db29598db5857c9f433e96fef']='python.d/apache.conf'
- ['58439d9c1253e33c74b399e6853143ab']='health.d/apcupsd.conf'
- ['5855dd70d71c8497e5591b0690162c9c']='health.d/tcp_resets.conf'
- ['58660dfcc260f77deec94b328b3838e8']='health_alarm_notify.conf'
- ['58e835b7176865ec5a6f59f7aba832bf']='health.d/named.conf'
- ['598f9814966a9e2fe48e8218151d3fa6']='stream.conf'
- ['59dded33e3adfe622f36c557a4f4bed7']='health.d/net.conf'
- ['59dea5e3872e5fe4e6c535b216c516b4']='health.d/disks.conf'
- ['5b5588b00d6829908c2c5ea3220cfa1c']='health.d/load.conf'
- ['5b917d894bb6a755d59264e9d48e9d56']='fping.conf'
- ['5bbef0708f5eff4d4a53aaf35fc48a62']='health.d/disks.conf'
- ['5bf51bb24fb41db9b1e448bd060d3f8c']='apps_groups.conf'
- ['5c1694184557813a6948db0872556bf0']='charts.d/libreswan.conf'
- ['5da15d6e17a15213a720749045e5d419']='health.d/disks.conf'
- ['5dddb6c9670f4aa605abe4b0d901acc4']='python.d/bind_rndc.conf'
- ['5e6fd588ef6934cf04ddb5e662aa02ea']='health.d/postgres.conf'
- ['5eb670b6fe39da5fec2523d910b0dd1e']='health.d/cpu.conf'
- ['5f05d4b248ab2637ada319b4e8c4e4c3']='python.d/varnish.conf'
- ['5f109df927d5f20409c81f4bfca0c83e']='python.d/web_log.conf'
- ['5ff1bcaa58695754e2f6980bfe19f579']='health.d/entropy.conf'
- ['609c6c57605033da96ea65e50c90201c']='charts.d/apache.conf'
- ['60a13375b3072300bd7552cb5ee9762b']='health.d/netfilter.conf'
- ['611130db85bad90f966b52055147c81e']='python.d/httpcheck.conf'
- ['61b7ed36f35e7bd930f5f7f91694a112']='charts.d/postfix.conf'
- ['621f10b257a11add5ff5aff41e9662e3']='health.d/memcached.conf'
- ['623771eecb3c277fc728b5304793f93b']='health.d/cpu.conf'
- ['6265b7465e38839c3543190e638156aa']='python.d/ntpd.conf'
- ['6319e4ae3810e9eabb61e852e1305785']='python.d.conf'
- ['632c28d714c87a4969d11cf36a5edaa8']='health.d/web_log.conf'
- ['636d032928ea0f4741eab264fb49c099']='apps_groups.conf'
- ['6398ef37a15cb6a0bc921f58948d2b39']='health.d/softnet.conf'
- ['63c626bc64b3d7bc46a72fbccf9b1926']='health.d/net.conf'
- ['64070d856ab1b47a18ec871e49bbc13b']='python.d/squid.conf'
- ['647361e99b5f4e0d73470c569bb9461c']='apps_groups.conf'
- ['64ac37868097a462e5ee6905c350267e']='python.d/postgres.conf'
- ['64c48f9726ab987baec9c617a9fef7a6']='health.d/nginx.conf'
- ['64ffc1b6878c81b87564b0f48642c790']='health.d/elasticsearch.conf'
- ['650b5fc9da23b25ee7ee1481e4aa2851']='health_alarm_notify.conf'
- ['653e0c014c8fcfb4db6cd3351d87d720']='python.d.conf'
- ['6546909d10cc5efcef9dd873bea85956']='python.d/mysql.conf'
- ['65a59d96c039d0180603ffd945a8968c']='apps_groups.conf'
- ['65c6933a17fb6b7f8e6baeab73431c17']='charts.d/apcupsd.conf'
- ['6608c6546b3c6bde084fc1d34b1163c1']='health.d/retroshare.conf'
- ['66628e70f70c6e991f4fe641b8e9bdde']='python.d/nginx_plus.conf'
- ['669ebef43ee341f6889d382e86d0e200']='health.d/named.conf'
- ['66c068eaa3672fbe4e2448e330b3511c']='python.d/web_log.conf'
- ['66dfe138058ca26a31a118007eb31f35']='health.d/nginx.conf'
- ['6814b9bc84483db428f6a479ba221855']='python.d/mysql.conf'
- ['6848e78a5a1c349c6c42d6245d6530ad']='python.d/boinc.conf'
- ['68607aef1802ed3dc0cd593bf6073beb']='python.d/postfix.conf'
- ['6a18f61a595c0d48c3363bcc0dbfa6b9']='health_alarm_notify.conf'
- ['6a47af861ad3dd112124c37fbf09672b']='apps_groups.conf'
- ['6aa4507f86657383917a0407f2a9cc0d']='python.d.conf'
- ['6acad8ce5c33e642742825db0eb9bb56']='python.d/web_log.conf'
- ['6b39de5d85db45115db236347a6896d4']='health.d/named.conf'
- ['6b598533309e08d71023e46801d45d7e']='apps_groups.conf'
- ['6bb278bd9e171c4cb5c0fe639231288b']='python.d/web_log.conf'
- ['6bf0de6e3b251b765b10a71d8c5c319d']='python.d/apache.conf'
- ['6c9f2f0abe49a6f1a69db052ebcef1bf']='python.d/elasticsearch.conf'
- ['6ca08ea2a238cad26578b8b85edae160']='health.d/udp_errors.conf'
- ['6d02c2dd0863e09ad9dbba53e3b58116']='health.d/mysql.conf'
- ['6df13c6ad582ef339a2a93901b6f0196']='health_alarm_notify.conf'
- ['6e8366993709652fe7fc00e5d6a0a136']='charts.d/mysql.conf'
- ['6ea958ca521e0514af57c08b518d8c5c']='health.d/backend.conf'
- ['6f303ccfdc21c7b122758cea8c15e249']='python.d.conf'
- ['6f54474c885234af0c792d135644d230']='python.d.conf'
- ['7005feb3eb5d06416d07cdf7e7c54425']='python.d/ntpd.conf'
- ['70105b1744a8e13f49083d7f1981aea2']='python.d/ipfs.conf'
- ['707a63f53f4b32e01d134ae90ba94aad']='health_alarm_notify.conf'
- ['707a63f53f4b32e01d134ae90ba94aad']='health_email_recipients.conf'
- ['70d82dabecb09a1da4684f293abef0c9']='health_alarm_notify.conf'
- ['7117b7067ac2b712aa4c9e92a6cdbf5a']='python.d/couchdb.conf'
- ['7120cba2f55b1c0a97a0e10d4f6ef751']='health.d/ipmi.conf'
- ['72246c32511197d87b004e67e4c8da36']='python.d/portcheck.conf'
- ['729b3e24a72f7d566fd429617d51a21b']='health.d/web_log.conf'
- ['72ea87f658483f47c38994291af488e8']='health_alarm_notify.conf'
- ['73125ae64d5c6e9361944cd9bd14844e']='python.d/exim.conf'
- ['731a1fcfe9b2da1b9d685056a59541b8']='python.d/hddtemp.conf'
- ['73a8e10dfe4183aca751e9e2a80dabe3']='node.d.conf'
- ['7454ed74511d7b9819dfe173f9020786']='python.d/redis.conf'
- ['749fe31362969d75f1ea66d15231d98d']='python.d/retroshare.conf'
- ['74e5e8d3a4b324f1770f61f78ee4b0e6']='health.d/beanstalkd.conf'
- ['7502c931aa9acbb92f54c67978d75983']='stream.conf'
- ['751f15371d0987018abc4d4ad60819f5']='apps_groups.conf'
- ['7596ae54d46ce199ac599429ef753caf']='health.d/cpu.conf'
- ['75a9c4b0b1c73956df55585eb0619f6c']='charts.d/ap.conf'
- ['75ddb2b9bc38a5306bceb5acb0422fe3']='python.d/icecast.conf'
- ['76205037196767f6877392862eb00d7b']='health.d/ram.conf'
- ['763e24621c63f5aa05fd6dddf0c855ba']='health.d/nginx_plus.conf'
- ['7673ea6afe0a286a77a390b9d042c191']='python.d/httpcheck.conf'
- ['769aa4cdcdc3d78d0328d1f9e4edcdf9']='python.d/mysql.conf'
- ['76a0c1b21e49850442a43efddb15a81e']='health.d/tcp_orphans.conf'
- ['76a31091f42f2be1fab3bb56bb7ea400']='health_alarm_notify.conf'
- ['76edb4cc11935aadaff53129c63457aa']='python.d.conf'
- ['777f4da70f461ef675bde07fb3644312']='python.d/redis.conf'
- ['777f55a95c5c25cf6176fece1ebbf4b8']='apps_groups.conf'
- ['77b256144293ebfabad31779a5326948']='python.d/phpfpm.conf'
- ['7808ba2ca26bd0642270740cf6a8ee59']='charts.d/mem_apps.conf'
- ['7830066c46a7e5f9682b8d3f4566b4e5']='python.d/cpufreq.conf'
- ['78bb08809dffcb62e9bc493840f9c039']='python.d/squid.conf'
- ['78e0065738394f5bf15023f41d66ed4b']='python.d/squid.conf'
- ['79a37756869d9b4629285922572d6b9b']='apps_groups.conf'
- ['7a21ccc76be2968ce5d0b52ec1166788']='python.d.conf'
- ['7a985528cc9176564640001aa73e3492']='health.d/nginx.conf'
- ['7aa209fa287c95b3ca04c23681b40770']='health.d/disks.conf'
- ['7ad46e684775d186251eb71b1e9be530']='charts.d/ap.conf'
- ['7b3281b6dbdbbc0b48c53fd76033e0db']='health.d/disks.conf'
- ['7bac18d8d5ff8f117be8d489a21c0c65']='python.d/mysql.conf'
- ['7cf6402b51e5070f2be3ad6fe059ff89']='charts.d.conf'
- ['7d8bd884ec26cb35d16c4fc05f969799']='python.d/squid.conf'
- ['7deb236ec68a512b9bdd18e6a51d76f7']='python.d/mysql.conf'
- ['7e5fc1644aa7a54f9dbb1bd102521b09']='health.d/memcached.conf'
- ['7f13631183fbdf79c21c8e5a171e9b34']='health.d/zfs.conf'
- ['82f1dc0a477a175ae31d7b815411e44e']='health.d/dbengine.conf'
- ['7fb8184d56a27040e73261ed9c6fc76f']='health_alarm_notify.conf'
- ['80266bddd3df374923c750a6de91d120']='health.d/apache.conf'
- ['803a7f9dcb942eeac0fd764b9e3e38ca']='fping.conf'
- ['80d242d619eb7e91cebfdbf58d79b0f8']='health.d/disks.conf'
- ['80df37b89e852d585209b8c02bb94312']='python.d/bind_rndc.conf'
- ['80f109ff293ac94222bf3959432751bd']='health.d/qos.conf'
- ['81255035f6d53534938085df72cdef23']='health.d/nginx.conf'
- ['8170ba3ae507cf9322bd60350348552e']='health.d/net.conf'
- ['81af92c7050873c7de2fb42e0c3f04f4']='python.d/tomcat.conf'
- ['81f7c857a9a7bcf12f500166bd6c7499']='health.d/linux_power_supply.conf'
- ['81fd16f29d5f3d422fe1cee82dc8ed9d']='health.d/cpu.conf'
- ['8213d921b6a8382e27052fb42d81db3d']='python.d/freeradius.conf'
- ['8214bb8f4b005aa4691fcd38f7331e8f']='health.d/swap.conf'
- ['830c4e7307b58a4c4bb5034f091e008d']='charts.d/nginx.conf'
- ['8320bf7600afcaa3d419d268d5563133']='python.d/web_log.conf'
- ['837480f77ba1a85677a36747fbc2cd2e']='python.d/sensors.conf'
- ['8422e71761d22e817e3cfcb1befc6080']='health.d/mongodb.conf'
- ['8425a60ea3d28ed40bb0bac4c3f182e8']='python.d/sensors.conf'
- ['842b1ad5b89bfa5f421d9c5b72e001a4']='health.d/apache.conf'
- ['845023f9b4a526aa0e6493756dbe6034']='health.d/squid.conf'
- ['846ce94bfeeb90c0dc6a89e8d25f1a68']='health.d/named.conf'
- ['846f6039460aa317f165d91a54cd8b07']='health.d/stiebeleltron.conf'
- ['8490f690d97adacc4e2096df82e7e8a5']='charts.d/cpufreq.conf'
- ['86a0d8d5619b5134e2d050805b45d6c3']='python.d/unbound.conf'
- ['87155bea7383028b0c1846c802cfdd81']='python.d/mdstat.conf'
- ['871bbeea33b83ea9755600b6d574919c']='python.d/web_log.conf'
- ['87224d2f2b87646f3c0d38cc1eb30112']='python.d/nsd.conf'
- ['87615ae5ac2412d853c717383fa53781']='python.d/chrony.conf'
- ['87642c568093daf3b2c30c5beffe2225']='python.d/elasticsearch.conf'
- ['8810140ce9c09af1d18b9602c4003904']='health_alarm_notify.conf'
- ['886975ecb9a4e856151dc71024b122e6']='health.d/apcupsd.conf'
- ['8891fb423f6b987281d7913bb6c1c024']='health.d/ipc.conf'
- ['88e3b51b6b3fe8f317df82a2d4fbb990']='python.d.conf'
- ['88f77865f75c9fb61c97d700bd4561ee']='python.d/mysql.conf'
- ['8989b5e2f4ef9cd278ef58be0fae4074']='health.d/disks.conf'
- ['899bcb0b3f4375b0a1280296be930201']='health.d/named.conf'
- ['89fb3cbb223be4fa0cb676cfa3b07055']='health.d/backend.conf'
- ['8a1b95d375992d7b11330a0ac46f369c']='health.d/disks.conf'
- ['8a66a3085ad8892a002ff39b18b2cb07']='python.d/fail2ban.conf'
- ['8abc7f66746b201b5b0af45c419d53bc']='health.d/bind_rndc.conf'
- ['8b834a0f343a8e620dbb639270a84cce']='health.d/mdstat.conf'
- ['8c0f037f8ad506c41acdbc4f9f6cead6']='health_alarm_notify.conf'
- ['8c1d41e2c88aeca78bc319ed74c8748c']='python.d/phpfpm.conf'
- ['8d0552371a7c9725a04196fa560813d1']='health.d/cpu.conf'
- ['8d24873bb25c195026918f15626310ea']='health.d/softnet.conf'
- ['8d736f551571675244d853bb2f53b3da']='health.d/load.conf'
- ['8dc0bd0a70b5117454bd5f5b98f91c2c']='health.d/disks.conf'
- ['8dc6a32b8e2995cbdd527c621a72c4fb']='health.d/ram.conf'
- ['8ec636a4f96158044d2cec0fd1ff8452']='python.d/rabbitmq.conf'
- ['8ed596c4f6f85b24a890cfe95f10ce9a']='python.d/ntpd.conf'
- ['8f4f925c1e97dd164007495ec5135ffc']='health.d/fping.conf'
- ['8f520e787d995943e61a777c826bddf7']='python.d/litespeed.conf'
- ['8f7b734ea0f89abf8acbb47c50234477']='health.d/web_log.conf'
- ['8fd472a854b0996327e8ed3562161182']='health_alarm_notify.conf'
- ['919911d13901d60a7580f5dfd7fc87bb']='health.d/ram.conf'
- ['91c377e7d26a1120cfbbd488332f0398']='python.d/dns_query_time.conf'
- ['91c757ef6be3abdb86906d9dbb9c217a']='fping.conf'
- ['91cf3b3d42cac969b8b3fd4f531ecfb3']='python.d/squid.conf'
- ['91e1a9703debbdc64edf124419fdc14b']='python.d/elasticsearch.conf'
- ['91f0a626c19f76241cadf9dbf28fb5a7']='health.d/beanstalkd.conf'
- ['92024bbe088e55251665fb666305ff66']='python.d/mysql.conf'
- ['920574fcfe56d5c9c11a583905e9db62']='health.d/tcp_conn.conf'
- ['9347bcce0b3574ac5193d43248d2e3cc']='python.d/chrony.conf'
- ['93c7c00103f63ea3b4eea1951dd16c95']='health_alarm_notify.conf'
- ['94bb961f83ec724cf86239328f73a3db']='health.d/redis.conf'
- ['94e567bbefd37db0c55d880ff61188a6']='health_alarm_notify.conf'
- ['9542f80def48ba105190f6cdaa18248e']='health.d/mysql.conf'
- ['95a27691df972832a5e7626ae59b0af6']='python.d/portcheck.conf'
- ['96997c8bf3a65b9eac848cafa8c127d2']='python.d/portcheck.conf'
- ['978daf0777ffe774e5a9576d33972e97']='python.d/smartd_log.conf'
- ['97eee7a30e6419df4537242e9d4a719d']='health.d/mysql.conf'
- ['97f337eb96213f3ede05e522e3743a6c']='python.d/memcached.conf'
- ['98e4dd6ba71bf76767bc59c63a51b617']='apps_groups.conf'
- ['98f6f917138949228b9fb88c61e5aea8']='charts.d/cpufreq.conf'
- ['9962e20641036566279ac800861b7963']='python.d.conf'
- ['99a3de85d1e7826ed64a5f8576712e5d']='python.d.conf'
- ['99b06e68f1da5917ae4cf60e901439f6']='health.d/ram.conf'
- ['99b6030ce25c8fee4598179c0f95fb0b']='health.d/redis.conf'
- ['99c1617448abbdc493976ab9bda5ce02']='apps_groups.conf'
- ['9a525125e705ca5a3146b3399be4510a']='python.d/nginx_plus.conf'
- ['9a89bbdf5d9732b9a29a0aa82714059b']='health.d/dockerd.conf'
- ['9a8a459a3841b78d4c6ef07428ad2fe1']='health.d/entropy.conf'
- ['9b6eee7f2febb29efac2b7ea9fcab9be']='charts.d/nut.conf'
- ['9c0185ceff15415bc59b2ce2c1f04367']='apps_groups.conf'
- ['9c457056c9ee0d50f9717da647bbd444']='health_alarm_notify.conf'
- ['9c8ddfa810d83ae58c8614ee5229e66b']='health.d/disks.conf'
- ['9c981c75bdf4b1637f7113e7e45eb2bf']='health.d/memcached.conf'
- ['9d304e41e32721224a743f25534263d9']='python.d/retroshare.conf'
- ['9e0553ebdc21b64295873fc104cfa79d']='python.d.conf'
- ['9e07d51bb83a38dcc37a39ca92fe4865']='charts.d/opensips.conf'
- ['9e33f51e56d258e7f4336048edde2f5c']='health.d/httpcheck.conf'
- ['9eb3326ae2ee9badeaad31d8dd2eaa2b']='python.d/isc_dhcpd.conf'
- ['a02d14124b19c635c1426cee2e98bac5']='charts.d.conf'
- ['a03f3e38378385bf87d4c0f81eb1f108']='health.d/tcp_resets.conf'
- ['a09714b5942cf25a89ec3da1dbc18063']='health.d/ram.conf'
- ['a0b3a12389c9c56dfe35964b20b59836']='health.d/bind_rndc.conf'
- ['a0c0ef7ca9671f4b5e797d4276e5c0dd']='health.d/disks.conf'
- ['a0ee8f351f213c0e8af9eb7a4a09cb95']='apps_groups.conf'
- ['a1b53a225f225911cd8ac892bba8118b']='python.d/powerdns.conf'
- ['a1b6dfe312b896b0b1ba471e8ac07f95']='python.d/isc_dhcpd.conf'
- ['a1bb5823c4926b65ef4b2dae467fc847']='python.d/couchdb.conf'
- ['a250e12f1ab4c18796fdaff5b0ba8968']='python.d/varnish.conf'
- ['a2944a309f8ce1a3195451856478d6ae']='python.d.conf'
- ['a2a647dc492dc2d6ed1f5c0fdc97a96e']='python.d/mongodb.conf'
- ['a305b400378d6492efd15f9940c2779b']='health.d/softnet.conf'
- ['a41885acf112563e3446f9d937362c9b']='python.d/chrony.conf'
- ['a4407787e4beb23a701a8a614dca461d']='health.d/disks.conf'
- ['a44899a5795bed2863c1d11aa3e85586']='health.d/swap.conf'
- ['a4a8660728c6afcb528cc6b378897d6b']='health.d/squid.conf'
- ['a4be524cc5b7192878c292a17c767c28']='health.d/redis.conf'
- ['a4e8c35f8973049f4db5c8900e9a2354']='health_alarm_notify.conf'
- ['a5114d5b0d3816dba75024b9444f4b40']='health.d/disks.conf'
- ['a5134d7cfbe27f5791e788c2add51abb']='apps_groups.conf'
- ['a55133f1b0be0a4255057849dd451b09']='health_alarm_notify.conf'
- ['a6d5ce2572bf7a1dce9e545fcd29273e']='health.d/apache.conf'
- ['a70e14bda17b076d2486232355652ae6']='apps_groups.conf'
- ['a71d9082410200bf92e823675d78121c']='python.d/retroshare.conf'
- ['a731b7b164f42717c1c9a778ee637ff3']='health.d/memcached.conf'
- ['a7320c6f26191b9599ec3bc4be007a93']='health.d/swap.conf'
- ['a752e51d923e15add4a11fa8f3be935a']='health_email_recipients.conf'
- ['a78d59c2ad14a17b9b8c7fa5d796b427']='python.d.conf'
- ['a7cceeafb1e6ef1ead503ab65f687902']='apps_groups.conf'
- ['a8167dafeac0b66696a1d9b08e815cda']='health.d/disks.conf'
- ['a837986be634fd7648bcdf939019424a']='apps_groups.conf'
- ['a89c516a1144435a88decf25509318ac']='health_alarm_notify.conf'
- ['a8bb4e1d0525f59692778ad8f675a77a']='python.d/example.conf'
- ['a8feb36776005bf419c90278787a1be8']='health.d/entropy.conf'
- ['a9150a0c61e1b360cf8c265ea2413d02']='python.d/couchdb.conf'
- ['a94af1c808aafdf00537d85ff2197ec8']='python.d/exim.conf'
- ['a9827518560ae7e811ef74e08cd4d3a6']='charts.d/load_average.conf'
- ['a9ab68845db2fb695b7060273a6ac68e']='health_alarm_notify.conf'
- ['a9cd91675467c5426f5b51c47602c889']='apps_groups.conf'
- ['aa4bee249bfc0c4a88ac8c2ffb97aa0d']='health.d/squid.conf'
- ['aa620b7017c8b864d80aa6c8acab01cf']='python.d/smartd_log.conf'
- ['aa6c4a270e6276f2deddf127ee1a24f6']='statsd.d/example.conf'
- ['aa8b57a733c2035917acf81a8ebdfbe7']='health.d/haproxy.conf'
- ['aac44691a1cf95fa8f8990a79bab4ce1']='python.d/web_log.conf'
- ['ab3902bf769ed35219691c95a3954ebb']='python.d/portcheck.conf'
- ['abaf2e021f9f6ee5d1c4e4726f47348e']='health.d/ipc.conf'
- ['abe1a80ac6d6f97bd324e72f31e8256e']='health.d/ram.conf'
- ['ac8a91f0297bf7ebb8970f8cae4b3477']='health.d/ipc.conf'
- ['acaa6731a272f6d251afb357e99b518f']='apps_groups.conf'
- ['ad15b251b93f8b16bb33ec508f44a598']='health.d/netfilter.conf'
- ['ade389c1b6efe0cff47c33e662731f0a']='python.d/squid.conf'
- ['adf69efd83cb5079d0a5746e3568032f']='charts.d/exim.conf'
- ['ae5ac0a3521e50aa6f6eda2a330b4075']='python.d/example.conf'
- ['aee501b7f9b122b962521c45893371bb']='python.d/smartd_log.conf'
- ['af12051cf57dd4e484ef8e64502b7549']='health.d/net.conf'
- ['af14667ee7993acea810f6d50923bdc9']='health.d/web_log.conf'
- ['af44cc53aa2bc5cc8935667119567522']='python.d.conf'
- ['afdae4646c755ff2d117527fbf761c8e']='health.d/disks.conf'
- ['b06d1063bc2200bb2d864021fa1a9cbd']='python.d.conf'
- ['b07eebc6f58d19721ac069171b911d2a']='health_alarm_notify.conf'
- ['b0c59b2bd7a10f6a3f2be6b4b27857db']='health.d/haproxy.conf'
- ['b0f0a0ac415e4b1a82187b80d211e83b']='python.d/mysql.conf'
- ['b181dcca01a258d9792ad703583baed2']='statsd.d/example.conf'
- ['b185914d4f795e1732273dc4c7a35845']='health.d/memory.conf'
- ['b210982cac9accfe43173cef5f46b361']='health.d/beanstalkd.conf'
- ['b27f10a38a95edbbec20f44a4728b7c4']='python.d.conf'
- ['b28c77dceeb398ca4ceec44c646f5431']='stream.conf'
- ['b32164929eda7449a9677044e11151bf']='python.d.conf'
- ['b3d48935ab7f44a57d40ad349df0033d']='python.d/postgres.conf'
- ['b3fc4749b132e55ac0d3a0f92859237e']='health.d/tcp_resets.conf'
- ['b44e33ba5c7a7306b467ac9c9b698895']='health.d/bcache.conf'
- ['b4825f731cc7eb03b374eade14a453c1']='health.d/net.conf'
- ['b5246eed059e33e0903a819fa5460ce0']='python.d/ipfs.conf'
- ['b544e5934ac79a9548b5af6756c042a6']='apps_groups.conf'
- ['b5b5a8d6d991fb1cef8d80afa23ba114']='python.d/cpufreq.conf'
- ['b636e5e603f9d93e52c7577ac8c6bf0c']='health.d/entropy.conf'
- ['b68706bb8101ef85192db92f865a5d80']='health_alarm_notify.conf'
- ['b6ee82968de8fbf974c0d35b55fe6fae']='python.d/web_log.conf'
- ['b735732fbe993d8191d6b3317082efa2']='health.d/qos.conf'
- ['b75e2d3e69c1fe89c2f900bc201f7390']='health_alarm_notify.conf'
- ['b7d769ce86a7aebba01315da5c0799e6']='health.d/ram.conf'
- ['b81b8f331161b0d48e03f6fbf6b6d062']='health.d/memcached.conf'
- ['b846ca1f99fa6a65303b58186f47d7a4']='python.d/squid.conf'
- ['b854fcb711ee4d052741de5fc888682e']='health.d/backend.conf'
- ['b8969be5b3ceb4a99477937119bd4323']='python.d.conf'
- ['b8aff60806fb6829a4e72a824e655375']='health.d/beanstalkd.conf'
- ['b8b87574fd496a66ede884c5336493bd']='python.d/phpfpm.conf'
- ['b8ca1449d142b7f1cd202d875d400882']='health.d/apcupsd.conf'
- ['b915126262d08aa9da81de539a58a3fb']='python.d/redis.conf'
- ['ba11ea2d2f632b2de4b1224bcdc54f07']='python.d/smartd_log.conf'
- ['bb51112d01ff20053196a57632df8962']='apps_groups.conf'
- ['bba2f3886587f137ea08a6e63dd3d376']='python.d.conf'
- ['bcaba2347951b301127fd502a219b26a']='python.d/apache.conf'
- ['bcd94c4fa2f89c710ff807de061ab11c']='health.d/net.conf'
- ['bd12233b529e3066d5b4a78da20c495e']='python.d/ntpd.conf'
- ['bda5517ea01640cfdfa0a27549619d6a']='health.d/memcached.conf'
- ['bdec19a255367f22b6fb652d0bef6bad']='python.d/httpcheck.conf'
- ['bf66f113b2dd8d8fb444cbd5650f284c']='health_alarm_notify.conf'
- ['bfa2f469e83cf2961963841e143049e6']='health.d/tcp_listen.conf'
- ['bfd35a87c77c3a1dbe218fd02b529208']='charts.d/example.conf'
- ['bff38dfe6c879f93ac49b77990fce1cc']='python.d/ipfs.conf'
- ['c004430f55310ae9ed489c4905ed02cb']='charts.d/apache.conf'
- ['c0385cdf9e87aca01f5dee2a5d89c467']='health_alarm_notify.conf'
- ['c080e006f544c949baca33cc24a9c126']='health_alarm_notify.conf'
- ['c0c4c63384ef408f0715331e7615aa60']='python.d/ceph.conf'
- ['c132d2e257fc4df2925be7ad75100d5b']='health.d/entropy.conf'
- ['c1a7e634b5b8aad523a0d115a93379cd']='health.d/memcached.conf'
- ['c1d014ffaebfa0952968aeaf330e5337']='python.d.conf'
- ['c30ee008173ba9f77adfcacbf138143e']='python.d/ovpn_status_log.conf'
- ['c3296c08260bcd556e74711c820817be']='health.d/cpu.conf'
- ['c3661b68232e06de90bb5e63e725b8b6']='health_alarm_notify.conf'
- ['c45ab106725e94615bccf8be4b136d0f']='python.d.conf'
- ['c482676558420c4b47162651a24b8baf']='python.d/httpcheck.conf'
- ['c4f203b4b12c40640dd578af16a49bb1']='health.d/portcheck.conf'
- ['c61948101e0e6846679682794ee48c5b']='python.d/nginx.conf'
- ['c6403d8b1bcfa52d3abb941be155fc03']='python.d.conf'
- ['c6b9f31e14adca433f82054f62388c47']='python.d/web_log.conf'
- ['c84fd3292710091802e443c8e688dee1']='health_alarm_notify.conf'
- ['c878060687b85c46006e9041f3632d88']='health_alarm_notify.conf'
- ['c88fb430f35b7d8f08775d84debffbd2']='python.d/phpfpm.conf'
- ['c8e339491a83df22decbdf5f1f8a037f']='python.d.conf'
- ['c94cb4f4eeaa13c1dcee6248deb01829']='python.d/postgres.conf'
- ['c9a16df512b4a9ce7fa65f5a69bda20a']='python.d/web_log.conf'
- ['c9b792755de59d842ba95f8c315d94c8']='health.d/swap.conf'
- ['c9d102c49da0cb57886c42d1016fa163']='python.d/httpcheck.conf'
- ['ca026d7c779f0a7cb7787713c5be5c47']='charts.d.conf'
- ['ca08a9b18d38ae0a0f5081a7cdc96863']='health.d/swap.conf'
- ['ca0eb92bdd3de67582ea6db37462895f']='health.d/tcp_resets.conf'
- ['ca249db7a0637d55abb938d969f9b486']='python.d/postfix.conf'
- ['ca761cbf8a28317abe526ab3c2428472']='health.d/portcheck.conf'
- ['ca9e52b3ee3c71d3d042dc531753a1fd']='apps_groups.conf'
- ['cad263c67f779029a663b620d6c34704']='charts.d/libreswan.conf'
- ['cb178b15427274d7def5b14bc4c09441']='health.d/net.conf'
- ['cb60badf376d246ad8ec9d3f524db430']='health.d/disks.conf'
- ['cb7f80cd2768c649d7448e01f8aa6579']='python.d.conf'
- ['cc4d31a0d1ff9c339892c1f8a0c5fcd3']='charts.d/load_average.conf'
- ['cca26b4d2384043f1737e0ed4a995600']='python.d/bind_rndc.conf'
- ['ccde91d209aeb02c4a6be0e43a8d92b3']='health.d/apache.conf'
- ['cce5176664d29d137fa7575b77de01e4']='health.d/tcp_resets.conf'
- ['cd08e5534c94bf1f2cd28396c76b8bbc']='health.d/ram.conf'
- ['cd15a9a77a46d66ca0beb55f2acb7538']='health.d/mysql.conf'
- ['cd9a7de356d6424c4a71d87053726c86']='python.d/bind_rndc.conf'
- ['cdd504812ff93073c57d02209d4d0f69']='health.d/cpu.conf'
- ['cde652b15742e377e98e79fb9eb2acab']='health_alarm_notify.conf'
- ['ce0fa3485a0d8d3aa80b25ab0c70cc5a']='charts.d/apcupsd.conf'
- ['ce2e8768964a936f58c4c2144aee8a01']='health_alarm_notify.conf'
- ['ce3b65eac6c472b21905f7f72104f4c9']='python.d/nginx.conf'
- ['ce937f8b9ab7820b61ce9fcde6b946e8']='charts.d/nut.conf'
- ['cf2c9096b3a8c506a3ec76fa52574395']='charts.d/phpfpm.conf'
- ['cf46545065f7698c4d529fdc77955274']='python.d/puppet.conf'
- ['cf48dfd828af70bea04db7a809f94358']='health.d/haproxy.conf'
- ['cf8b87ede2d3233b6f55f4690af7fb08']='python.d/smartd_log.conf'
- ['cfecf298bdafaa7e0a3a263548e82132']='python.d/sensors.conf'
- ['d11711b3647bc2bdd0292dd7deebbeb1']='health.d/net.conf'
- ['d1596fe068c8674efade49a4a8e22b5d']='health.d/isc_dhcpd.conf'
- ['d162b7465a56151312e60151c1d74fba']='health.d/squid.conf'
- ['d1e79707cd9b51a14288e8dd40694fcc']='fping.conf'
- ['d297104e43ce2b544003271181e26ff6']='python.d/cpufreq.conf'
- ['d29c5fa5faf74b86d01c2270a79388d8']='health.d/disks.conf'
- ['d2b2ad30e277a69d8713e620dabc18bc']='python.d/phpfpm.conf'
- ['d3bccdfe06c099673592d5375994c329']='charts.d/hddtemp.conf'
- ['d3f397ead7f2ac8f88a99d7c5b8cff1d']='python.d/dovecot.conf'
- ['d41d8cd98f00b204e9800998ecf8427e']='python.d/portcheck.conf'
- ['d4adcebadc4c86332df247922b85aadc']='python.d/freeradius.conf'
- ['d54f9652d6510d04339682d97cd7b6e4']='python.d/httpcheck.conf'
- ['d55bdb83b9ff606852f6a97c1430258c']='health.d/ram.conf'
- ['d55be5bb5e108da1e7645da007c53cd4']='python.d.conf'
- ['d56c28ece8354850011f213d94d02fe0']='python.d/hddtemp.conf'
- ['d5dab509d8792f795bece27de39dd476']='health.d/mysql.conf'
- ['d69eba15d3e968187a938a7b98e22dda']='python.d.conf'
- ['d6cd34c96e47a8a63732a6f1512f5c39']='python.d/ovpn_status_log.conf'
- ['d712df81b17971884443a4a9bc996c9e']='health_alarm_notify.conf'
- ['d74dc63fbe631dab9a2ff1b0f5d71719']='python.d/hddtemp.conf'
- ['d7e0bd12d4a60a761dcab3531a841711']='python.d/phpfpm.conf'
- ['d86e0502e394e0a16c0ca574db462653']='health.d/megacli.conf'
- ['d8dc489e32f7114c6298fce94e86a8ef']='health.d/entropy.conf'
- ['d9036091e2232fc2b8bfa8c7484dea28']='apps_groups.conf'
- ['d9258e671d0d0b6498af1ce16ef030d2']='apps_groups.conf'
- ['d9fa0290cdfe4153188bb52dd31191df']='apps_groups.conf'
- ['da29d2ab1ab7b8fda189960c840e5144']='health.d/swap.conf'
- ['dad303c5cca7a69345811a01a74f5892']='health.d/net.conf'
- ['db305937e884c9f871bb076d5ed2946f']='health_alarm_notify.conf'
- ['dc0d2b96378f290eec3fcf98b89ad824']='python.d/cpufreq.conf'
- ['dc9c2a66778623a759706c14c3d91983']='health.d/net.conf'
- ['dd220677c42c487549952048ee1f7750']='python.d/postgres.conf'
- ['dd221c29dfb7c5586fc906748aa7c831']='health.d/tcp_listen.conf'
- ['dd7764507804a2296bfd091a58ad4ad7']='health.d/memcached.conf'
- ['dd8254ef74509a3e38cb2838e30f7e63']='health.d/disks.conf'
- ['ddda2bb1c88be03b637d3285406f7910']='health.d/named.conf'
- ['dddc4f93e6187fe4220eb6bf5e20f095']='health.d/ram.conf'
- ['de02f899a61f21b86adb646940f0bcae']='health.d/net.conf'
- ['de581daa11e267a583776bd8b8179884']='python.d/postgres.conf'
- ['de5fe159e14b481d6bd69856eaddd242']='health_alarm_notify.conf'
- ['def883f35986c9d25de63b1a8e7d0f46']='health.d/entropy.conf'
- ['df381f3a7ca9fb2b4b43ae7cb7a4c492']='python.d/mysql.conf'
- ['df7e8044902b5e155fad8430c2ddcfa8']='health.d/fping.conf'
- ['dfd5431b11cf2f3852a40d390c1d5a92']='python.d/varnish.conf'
- ['e0242003fd2e3f9ac1b9314e802ada79']='python.d/hddtemp.conf'
- ['e0ba3bc216ffc9933b4741dbb6b1f8c8']='health.d/web_log.conf'
- ['e0ffc0c34424b35666fddf7f61e05def']='health.d/tcp_resets.conf'
- ['e100d98f3ed1eff59678f035b3b8daf2']='python.d/beanstalk.conf'
- ['e12ab150198467aaa56b1091ba219587']='charts.d/nut.conf'
- ['e1822c48067954e26649f7ad5fdb71f5']='health.d/softnet.conf'
- ['e1a8bf99d36683c10225100f207a2b59']='python.d/web_log.conf'
- ['e22f30680148a29d9738bd4bfe8b252c']='health_alarm_notify.conf'
- ['e2e7adf66a28b8277f55e246b007f25a']='python.d/ntpd.conf'
- ['e2f3388c06726154c10ec22bad5bc7ec']='fping.conf'
- ['e3023092e3b2bbb5351e0fe6682f4fe9']='health_alarm_notify.conf'
- ['e3112d8e06fa77888aab02e8fcd22e25']='apps_groups.conf'
- ['e3996f70a4b09315b4a64e3df7d34d43']='python.d/rabbitmq.conf'
- ['e3d100c2d0347c08efbf6245e05620c6']='python.d/fail2ban.conf'
- ['e3e0c742427c9609ce923e845a0c8532']='health.d/ceph.conf'
- ['e3e5bc57335c489f01b8559f5c70e112']='python.d/squid.conf'
- ['e40947d22f7ed5359f12fc89e3512963']='python.d/dovecot.conf'
- ['e445de5a4d6953bddec36d85b1b2771e']='python.d/linux_power_supply.conf'
- ['e449e5582279742496550df14b6fca95']='health.d/entropy.conf'
- ['e4ed13f996434ac17b40a2228c96283b']='python.d/tomcat.conf'
- ['e5f32f54d6d6728f21f9ac26f37d6573']='python.d/example.conf'
- ['e707ad89a146004ae281d66a4e01e5c1']='health.d/load.conf'
- ['e70a7ee4999f30c6ceb75f31088a3a34']='python.d/powerdns.conf'
- ['e734c5951a8764d4d9de046dd7cf7407']='health.d/softnet.conf'
- ['e7ae3f2b00b9e5178acfe4f5e46228b7']='health.d/tcp_resets.conf'
- ['e7bc22a1942cffbd2b1b0cfd119ee328']='health.d/ipfs.conf'
- ['e8656d72dbd3b6fe603048ded751499a']='python.d/memcached.conf'
- ['e8ec8046c7007af6ca3e8c51e62c99f8']='health.d/disks.conf'
- ['ea031c1c0c36edee3bd08fae559c4203']='health_alarm_notify.conf'
- ['ea1a96c42ad464c354fb250e3408c3e8']='stream.conf'
- ['eaa7beb935cae9c48a40fb934eb105a7']='health.d/web_log.conf'
- ['eb5168f0b516bc982aac45e59da6e52e']='health.d/nginx.conf'
- ['eb748d6fb69d11b0d29c5794657e206c']='health.d/qos.conf'
- ['eb9fedc3c1dface77312d9bf48f673a8']='stream.conf'
- ['ebd0612ccc5807524ebb2b647e3e56c9']='apps_groups.conf'
- ['eca875b2e4402ee07972589bad003e01']='python.d/traefik.conf'
- ['ecb6c01fae255d369748406945a50435']='apps_groups.conf'
- ['ecd3aa97e2581f88eb466d6612690ef2']='charts.d/nginx.conf'
- ['ed43efac299c31f8fd5e2abccff30071']='python.d/samba.conf'
- ['ed80e6b2cfc8b08adea7027fc03daa68']='python.d.conf'
- ['edb48efc8f446624001e07d04f6cad1a']='apps_groups.conf'
- ['ee5343881744e6a97e6ee5cdd329cfb8']='health.d/retroshare.conf'
- ['eee974cea7534aeed2d38bcf0edf3f9e']='python.d/springboot.conf'
- ['ef067629c7456cb934f110ce15200131']='stream.conf'
- ['ef1861bf5725d91e773cbdba05687597']='python.d.conf'
- ['ef9916ea144878a9f37cbb6b1b29da10']='health.d/squid.conf'
- ['f075be84c5bfac7e34de2a091841360c']='statsd.d/example.conf'
- ['f0a86c5bae3c4b32b266dacbf74ca4a3']='python.d/web_log.conf'
- ['f1446cb3f1a905ee06defa2aa15ee806']='python.d/web_log.conf'
- ['f1682835e3414f60284c13bf1662e50f']='health.d/web_log.conf'
- ['f1f114647ed185c4812c361b1d870b44']='python.d/sensors.conf'
- ['f2622abcee86b514976a053b528553d4']='python.d/web_log.conf'
- ['f2f1b8656f5011e965ac45b818cf668d']='apps_groups.conf'
- ['f3c56a769ffdf811ec13467d8f7cd3c0']='python.d/apache.conf'
- ['f42389e5497a28205ba6fef4f716db4f']='python.d/nginx.conf'
- ['f42df9f13abfae2426519c6728b34882']='charts.d/example.conf'
- ['f4609cbd5e748f41ad4f1ea3d2dcfde0']='python.d.conf'
- ['f4c5d88c34d3fb853498124177cc77f1']='python.d.conf'
- ['f5736e0b2945182cb659cb0713eff923']='apps_groups.conf'
- ['f66e5236ba1245bb2e5fd99191f114c6']='charts.d/hddtemp.conf'
- ['f68ac0fca6b4ffc96097779344cabac6']='health.d/tcp_listen.conf'
- ['f6c6656f900ff52d159dca12d624016a']='python.d/postgres.conf'
- ['f72e44a305567c9b21a244ebd6da6800']='health_alarm_notify.conf'
- ['f7401a6e7c7d4fe2e0e2be7f7f523275']='health.d/web_log.conf'
- ['f7a99e94231beda85c6254912d8d31c1']='python.d/tomcat.conf'
- ['f82924563e41d99cdae5431f0af69155']='python.d.conf'
- ['f8c30f22df92765e2c0fab3c8174e2fc']='health.d/memcached.conf'
- ['f8dade4484f1b6a48655388502df7d5a']='health_alarm_notify.conf'
- ['f8e7d23a83fc8ee58f403da7bdbe7f8a']='python.d/phpfpm.conf'
- ['f96acba4b14b0c1b50d0187a04416151']='health_alarm_notify.conf'
- ['f9be549a849d023595d19d5d74263e0f']='health.d/tcp_resets.conf'
- ['fa4396513b358d6ec6a7f5bfb08439b8']='health.d/net.conf'
- ['fb1c75159f855f4b4671835f5bca1ef6']='python.d.conf'
- ['fbdb6f5d3906d3d8ea4e28f6ba6965a6']='python.d/go_expvar.conf'
- ['fc11bd9255ac382f442f31c1f1a32532']='health_alarm_notify.conf'
- ['fc40b83f173bc4676d686867a8369a62']='python.d/dns_query_time.conf'
- ['fc64f44eb19a8b5a88ba8dc28de355f6']='python.d/puppet.conf'
- ['fc987459f82e251e31c41d822e5e8202']='python.d/nsd.conf'
- ['fd3164e6e8cb6726706267eae49aa082']='health_alarm_notify.conf'
- ['fdd11640ba626cc2064c2fe3ea3eee4c']='health.d/cpu.conf'
- ['fde44f62c8d7e52f09705cd273fae6b1']='charts.d/tomcat.conf'
- ['fdea185e0e52b459b48852aa37f20e0f']='apps_groups.conf'
- ['fe069e4d6579ecdda7f36ac2318ffefc']='python.d/exim.conf'
- ['fe2b15369de13b83b18e2ff5c7594a57']='python.d/monit.conf'
- ['fe478efe2e721724edb1fe2ef1addf93']='health_alarm_notify.conf'
- ['feb8bcf828aa2529a7ee4a140feeb12d']='health.d/net.conf'
- ['ff1b3d8ae8b2149c711d8da9b7a9c4bd']='health_alarm_notify.conf'
- ['ff3f0a9b1bf488a5075850cc16de3c26']='python.d/monit.conf'
- ['ff940c5396f16d05deb5c5859832ee48']='health.d/swap.conf'
-)
diff --git a/configure.ac b/configure.ac
index 3c4a75ae3..7477e8159 100644
--- a/configure.ac
+++ b/configure.ac
@@ -130,12 +130,6 @@ AC_ARG_WITH(
[with_libcap="detect"]
)
AC_ARG_WITH(
- [zlib],
- [AS_HELP_STRING([--without-zlib], [build without zlib @<:@default enabled@:>@])],
- ,
- [with_zlib="yes"]
-)
-AC_ARG_WITH(
[math],
[AS_HELP_STRING([--without-math], [build without math @<:@default enabled@:>@])],
,
@@ -213,6 +207,12 @@ AC_ARG_ENABLE(
[aclk_ssl_debug="yes"],
[aclk_ssl_debug="no"]
)
+AC_ARG_ENABLE(
+ [httpd],
+ [AS_HELP_STRING([--disable-httpd], [Disable webserver (h2o based) @<:@default autodetect@:>@])],
+ ,
+ [enable_httpd="detect"]
+)
# -----------------------------------------------------------------------------
# Enforce building with C99, bail early if we can't.
@@ -475,19 +475,7 @@ PKG_CHECK_MODULES(
[have_zlib=yes],
[have_zlib=no]
)
-test "${with_zlib}" = "yes" -a "${have_zlib}" != "yes" && AC_MSG_ERROR([zlib required but not found. Try installing 'zlib1g-dev' or 'zlib-devel'.])
-
-AC_MSG_CHECKING([if zlib should be used])
-if test "${with_zlib}" != "no" -a "${have_zlib}" = "yes"; then
- with_zlib="yes"
- AC_DEFINE([NETDATA_WITH_ZLIB], [1], [zlib usability])
- OPTIONAL_ZLIB_CFLAGS="${ZLIB_CFLAGS}"
- OPTIONAL_ZLIB_LIBS="${ZLIB_LIBS}"
-else
- with_zlib="no"
-fi
-AC_MSG_RESULT([${with_zlib}])
-
+test "${have_zlib}" != "yes" && AC_MSG_ERROR([zlib required but not found. Try installing 'zlib1g-dev' or 'zlib-devel'.])
# -----------------------------------------------------------------------------
# libuuid
@@ -800,6 +788,38 @@ AC_MSG_RESULT([${with_libcap}])
AM_CONDITIONAL([ENABLE_CAPABILITY], [test "${with_libcap}" = "yes"])
# -----------------------------------------------------------------------------
+# HTTPD and h2o related
+
+can_build_httpd="no"
+if test "${enable_httpd}" != "no"; then
+ can_build_httpd="yes"
+ AC_MSG_CHECKING([can build HTTPD])
+ if test -z "${UV_LIBS}"; then
+ can_build_httpd="no"
+ fi
+ if test -n "${SSL_LIBS}"; then
+ OPTIONAL_SSL_CFLAGS="${SSL_CFLAGS}"
+ OPTIONAL_SSL_LIBS="${SSL_LIBS}"
+ else
+ can_build_httpd="no"
+ fi
+ if test "${with_zlib}" != "yes"; then
+ can_build_httpd="no"
+ fi
+ AC_MSG_RESULT([${can_build_httpd}])
+
+ if test "${can_build_httpd}" = "no" -a "${enable_httpd}" = "yes"; then
+ AC_MSG_ERROR([HTTPD was requested but it cannot be built])
+ fi
+
+ if test "${can_build_httpd}" = "yes"; then
+ AC_DEFINE([ENABLE_HTTPD], [1], [HTTPD (h2o based web server)])
+ HTTPD_CFLAGS="-I\$(abs_top_srcdir)/httpd/h2o/include -I\$(abs_top_srcdir)/httpd/h2o/deps/picotls/include -I\$(abs_top_srcdir)/httpd/h2o/deps/quicly/include -DH2O_USE_LIBUV=0"
+ fi
+fi
+AM_CONDITIONAL([ENABLE_HTTPD], [test "${can_build_httpd}" = "yes"])
+
+# -----------------------------------------------------------------------------
# ACLK
bundled_proto_avail="no"
@@ -1237,7 +1257,7 @@ fi
# Check if uuid is available. Fail if ML was explicitly requested.
if test "${enable_ml}" = "yes" -a "${have_uuid}" != "yes"; then
- AC_MSG_ERROR([You have explicitly requested --enable-ml functionality but libuuid can not be found."])
+ AC_MSG_ERROR([You have explicitly requested --enable-ml functionality but libuuid can not be found.])
fi
# Check if submodules have not been fetched. Fail if ML was explicitly requested.
@@ -1274,6 +1294,21 @@ fi
# -----------------------------------------------------------------------------
+# debugfs.plugin
+
+if test "${build_target}" = "linux"; then
+ AC_DEFINE([ENABLE_DEBUGFS_PLUGIN], [1], [debugfs.plugin])
+ enable_plugin_debugfs="yes"
+else
+ enable_plugin_debugfs="no"
+fi
+
+AC_MSG_CHECKING([if debugfs.plugin should be enabled])
+AC_MSG_RESULT([${enable_plugin_debugfs}])
+AM_CONDITIONAL([ENABLE_PLUGIN_DEBUGFS], [test "${enable_plugin_debugfs}" = "yes"])
+
+
+# -----------------------------------------------------------------------------
# ebpf.plugin
if test "${build_target}" = "linux" -a "${enable_ebpf}" != "no"; then
@@ -1685,11 +1720,11 @@ AC_SUBST([netdata_user])
AC_SUBST([libsysdir])
CFLAGS="${originalCFLAGS} ${OPTIONAL_LTO_CFLAGS} ${OPTIONAL_PROTOBUF_CFLAGS} ${OPTIONAL_MATH_CFLAGS} ${OPTIONAL_NFACCT_CFLAGS} \
- ${OPTIONAL_ZLIB_CFLAGS} ${OPTIONAL_UUID_CFLAGS} \
+ ${ZLIB_CFLAGS} ${OPTIONAL_UUID_CFLAGS} \
${OPTIONAL_LIBCAP_CFLAGS} ${OPTIONAL_IPMIMONITORING_CFLAGS} ${OPTIONAL_CUPS_CFLAGS} ${OPTIONAL_XENSTAT_FLAGS} \
${OPTIONAL_KINESIS_CFLAGS} ${OPTIONAL_PUBSUB_CFLAGS} ${OPTIONAL_PROMETHEUS_REMOTE_WRITE_CFLAGS} \
${OPTIONAL_MONGOC_CFLAGS} ${LWS_CFLAGS} ${OPTIONAL_JSONC_STATIC_CFLAGS} ${OPTIONAL_YAML_STATIC_CFLAGS} ${OPTIONAL_BPF_CFLAGS} ${JUDY_CFLAGS} \
- ${OPTIONAL_ACLK_CFLAGS} ${OPTIONAL_ML_CFLAGS} ${OPTIONAL_OS_DEP_CFLAGS}"
+ ${OPTIONAL_ACLK_CFLAGS} ${OPTIONAL_ML_CFLAGS} ${OPTIONAL_OS_DEP_CFLAGS} ${HTTPD_CFLAGS}"
CXXFLAGS="${CFLAGS} ${OPTIONAL_KINESIS_CXXFLAGS} ${CPP_STD_FLAG}"
@@ -1714,8 +1749,8 @@ AC_SUBST([OPTIONAL_JSONC_LIBS])
AC_SUBST([OPTIONAL_YAML_LIBS])
AC_SUBST([OPTIONAL_NFACCT_CFLAGS])
AC_SUBST([OPTIONAL_NFACCT_LIBS])
-AC_SUBST([OPTIONAL_ZLIB_CFLAGS])
-AC_SUBST([OPTIONAL_ZLIB_LIBS])
+AC_SUBST([ZLIB_CFLAGS])
+AC_SUBST([ZLIB_LIBS])
AC_SUBST([OPTIONAL_UUID_CFLAGS])
AC_SUBST([OPTIONAL_UUID_LIBS])
AC_SUBST([OPTIONAL_BPF_CFLAGS])
@@ -1788,6 +1823,7 @@ AC_CONFIG_FILES([
collectors/apps.plugin/Makefile
collectors/cgroups.plugin/Makefile
collectors/charts.d.plugin/Makefile
+ collectors/debugfs.plugin/Makefile
collectors/diskspace.plugin/Makefile
collectors/timex.plugin/Makefile
collectors/ioping.plugin/Makefile
@@ -1896,4 +1932,3 @@ AC_CONFIG_FILES([
AC_OUTPUT
test "${with_math}" != "yes" && AC_MSG_WARN([You are building without math. math allows accurate calculations. It should be enabled.]) || :
-test "${with_zlib}" != "yes" && AC_MSG_WARN([You are building without zlib. zlib allows netdata to transfer a lot less data with web clients. It should be enabled.]) || :
diff --git a/contrib/debian/control b/contrib/debian/control
index eeeb8d25c..f326c4b90 100644
--- a/contrib/debian/control
+++ b/contrib/debian/control
@@ -32,17 +32,28 @@ Homepage: https://netdata.cloud
Package: netdata
Architecture: any
-Depends: adduser,
- libcap2-bin (>= 1:2.0),
- lsb-base (>= 3.1-23.2),
- openssl,
+Depends: openssl,
${misc:Depends},
- ${shlibs:Depends}
+ ${shlibs:Depends},
+ netdata-plugin-ebpf [amd64],
+ netdata-plugin-apps,
+ netdata-plugin-pythond,
+ netdata-plugin-go,
+ netdata-plugin-debugfs,
+ netdata-plugin-nfacct,
+ netdata-plugin-chartsd,
+ netdata-plugin-slabinfo,
+ netdata-plugin-perf
+Pre-Depends: adduser,
+ dpkg (>= 1.17.14),
+ libcap2-bin (>=1:2.0),
+ lsb-base (>= 3.1-23.2)
Conflicts: netdata-core,
netdata-plugins-bash,
netdata-plugins-python,
netdata-web
-Pre-Depends: dpkg (>= 1.17.14)
+Suggests: netdata-plugin-cups,
+ netdata-plugin-freeipmi
Description: real-time charts for system monitoring
Netdata is a daemon that collects data in realtime (per second)
and presents a web site to view and analyze them. The presentation
@@ -53,15 +64,128 @@ Package: netdata-plugin-cups
Architecture: any
Depends: cups,
${shlibs:Depends},
- netdata (>= ${source:Version})
-Description: The Common Unix Printing System plugin for metrics collection from cupsd
+ netdata (= ${source:Version})
+Pre-Depends: adduser
+Description: The CUPS metrics collection plugin for the Netdata Agent
+ This plugin allows the Netdata Agent to collect metrics from the Common
+ UNIX Printing System.
Package: netdata-plugin-freeipmi
Architecture: any
Depends: freeipmi,
${shlibs:Depends},
netdata (= ${source:Version})
-Description: FreeIPMI - The Intelligent Platform Management System.
- The IPMI specification defines a set of interfaces for platform management.
- It is implemented by a number vendors for system management. The features of IPMI that most users will be interested in
- are sensor monitoring, system event monitoring, power control, and serial-over-LAN (SOL).
+Pre-Depends: adduser
+Description: The FreeIPMI metrics collection plugin for the Netdata Agent
+ This plugin allows the Netdata Agent to collect metrics from hardware
+ using FreeIPMI.
+
+Package: netdata-plugin-nfacct
+Architecture: any
+Depends: ${shlibs:Depends},
+ netdata (= ${source:Version})
+Pre-Depends: adduser
+Conflicts: netdata (<< ${source:Version})
+Description: The NFACCT metrics collection plugin for the Netdata Agent
+ This plugin allows the Netdata Agent to collect metrics from the firewall
+ using NFACCT objects.
+
+Package: netdata-plugin-chartsd
+Architecture: all
+Depends: bash,
+ netdata (= ${source:Version})
+Pre-Depends: adduser
+Conflicts: netdata (<< ${source:Version})
+Suggests: apcupsd, nut, iw, sudo
+Description: The charts.d metrics collection plugin for the Netdata Agent
+ This plugin adds a selection of additional collectors written in shell
+ script to the Netdata Agent. It includes collectors for NUT, APCUPSD,
+ LibreSWAN, OpenSIPS, and Wireless access point statistics.
+
+Package: netdata-plugin-ebpf
+Architecture: amd64
+Depends: ${shlibs:Depends},
+ netdata (= ${source:Version}),
+ netdata-ebpf-code-legacy (= ${source:Version})
+Pre-Depends: adduser
+Recommends: netdata-plugin-apps (= ${source:Version})
+Conflicts: netdata (<< ${source:Version})
+Description: The eBPF metrics collection plugin for the Netdata Agent
+ This plugin allows the Netdata Agent to use eBPF code to collect more
+ detailed kernel-level metrics for the system.
+
+Package: netdata-ebpf-code-legacy
+Architecture: amd64
+Depends: netdata-plugin-ebpf (= ${source:Version})
+Pre-Depends: adduser
+Conflicts: netdata (<< ${source:Version})
+Description: Compiled eBPF legacy code for the Netdata eBPF plugin
+ This package provides the pre-compiled eBPF legacy code for use by
+ the Netdata eBPF plugin. This code is only needed when using the eBPF
+ plugin with kernel that do not include BTF support (mostly kernel
+ versions lower than 5.10)..
+
+Package: netdata-plugin-pythond
+Architecture: all
+Depends: ${shlibs:Depends},
+ netdata (= ${source:Version})
+Pre-Depends: adduser
+Suggests: sudo
+Conflicts: netdata (<< ${source:Version})
+Description: The python.d metrics collection plugin for the Netdata Agent
+ Many of the collectors provided by this package are also available
+ in netdata-plugin-god. In msot cases, you probably want to use those
+ versions instead of the Python versions.
+
+Package: netdata-plugin-go
+Architecture: any
+Depends: ${shlibs:Depends},
+ netdata (= ${source:Version})
+Pre-Depends: libcap2-bin, adduser
+Suggests: nvme-cli, sudo
+Conflicts: netdata (<< ${source:Version})
+Description: The go.d metrics collection plugin for the Netdata Agent
+ This plugin adds a selection of additional collectors written in Go to
+ the Netdata Agent. A significant percentage of the application specific
+ collectors provided by Netdata are part of this plugin, so most users
+ will want it installed.
+
+Package: netdata-plugin-apps
+Architecture: any
+Depends: ${shlibs:Depends},
+ netdata (= ${source:Version})
+Pre-Depends: libcap2-bin, adduser
+Conflicts: netdata (<< ${source:Version})
+Description: The per-application metrics collection plugin for the Netdata Agent
+ This plugin allows the Netdata Agent to collect per-application and
+ per-user metrics without using cgroups.
+
+Package: netdata-plugin-slabinfo
+Architecture: any
+Depends: ${shlibs:Depends},
+ netdata (= ${source:Version})
+Pre-Depends: libcap2-bin, adduser
+Conflicts: netdata (<< ${source:Version})
+Description: The slabinfo metrics collector for the Netdata Agent
+ This plugin allows the Netdata Agent to collect perfromance and
+ utilization metrics for the Linux kernel’s SLAB allocator.
+
+Package: netdata-plugin-perf
+Architecture: any
+Depends: ${shlibs:Depends},
+ netdata (= ${source:Version})
+Pre-Depends: libcap2-bin, adduser
+Conflicts: netdata (<< ${source:Version})
+Description: The perf metrics collector for the Netdata Agent
+ This plugin allows the Netdata to collect metrics from the Linux perf
+ subsystem.
+
+Package: netdata-plugin-debugfs
+Architecture: any
+Depends: ${shlibs:Debends},
+ netdata (= ${source:Version})
+Pre-Depends: libcap2-bin, adduser
+Conflicts: netdata (<< ${source:Version})
+Description: The debugfs metrics collector for the Netdata Agent
+ This plugin allows the Netdata Agent to collect Linux kernel metrics
+ exposed through debugfs.
diff --git a/contrib/debian/netdata-ebpf-code-legacy.postinst b/contrib/debian/netdata-ebpf-code-legacy.postinst
new file mode 100644
index 000000000..b82532286
--- /dev/null
+++ b/contrib/debian/netdata-ebpf-code-legacy.postinst
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ grep /usr/libexec/netdata /var/lib/dpkg/info/netdata-ebpf-code-legacy.list | xargs -n 30 chown root:netdata
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/contrib/debian/netdata-ebpf-code-legacy.preinst b/contrib/debian/netdata-ebpf-code-legacy.preinst
new file mode 100644
index 000000000..fcabb415a
--- /dev/null
+++ b/contrib/debian/netdata-ebpf-code-legacy.preinst
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
+
+#DEBHELPER#
diff --git a/contrib/debian/netdata-plugin-apps.postinst b/contrib/debian/netdata-plugin-apps.postinst
new file mode 100644
index 000000000..04f914538
--- /dev/null
+++ b/contrib/debian/netdata-plugin-apps.postinst
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/apps.plugin
+ setcap "cap_dac_read_search=eip cap_sys_ptrace=eip" /usr/libexec/netdata/plugins.d/apps.plugin
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/contrib/debian/netdata-plugin-apps.preinst b/contrib/debian/netdata-plugin-apps.preinst
new file mode 100644
index 000000000..fcabb415a
--- /dev/null
+++ b/contrib/debian/netdata-plugin-apps.preinst
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
+
+#DEBHELPER#
diff --git a/contrib/debian/netdata-plugin-chartsd.postinst b/contrib/debian/netdata-plugin-chartsd.postinst
new file mode 100644
index 000000000..1871bfef6
--- /dev/null
+++ b/contrib/debian/netdata-plugin-chartsd.postinst
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ grep /usr/libexec/netdata /var/lib/dpkg/info/netdata-plugin-chartsd.list | xargs -n 30 chown root:netdata
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/contrib/debian/netdata-plugin-chartsd.preinst b/contrib/debian/netdata-plugin-chartsd.preinst
new file mode 100644
index 000000000..fcabb415a
--- /dev/null
+++ b/contrib/debian/netdata-plugin-chartsd.preinst
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
+
+#DEBHELPER#
diff --git a/contrib/debian/netdata-plugin-debugfs.postinst b/contrib/debian/netdata-plugin-debugfs.postinst
new file mode 100644
index 000000000..75d08fd17
--- /dev/null
+++ b/contrib/debian/netdata-plugin-debugfs.postinst
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/debugfs.plugin
+ setcap "cap_dac_read_search=eip" /usr/libexec/netdata/plugins.d/debugfs.plugin
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/contrib/debian/netdata-plugin-debugfs.preinst b/contrib/debian/netdata-plugin-debugfs.preinst
new file mode 100644
index 000000000..fcabb415a
--- /dev/null
+++ b/contrib/debian/netdata-plugin-debugfs.preinst
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
+
+#DEBHELPER#
diff --git a/contrib/debian/netdata-plugin-ebpf.postinst b/contrib/debian/netdata-plugin-ebpf.postinst
new file mode 100644
index 000000000..2458d6d6c
--- /dev/null
+++ b/contrib/debian/netdata-plugin-ebpf.postinst
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/ebpf.plugin
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/ebpf.plugin
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/contrib/debian/netdata-plugin-ebpf.preinst b/contrib/debian/netdata-plugin-ebpf.preinst
new file mode 100644
index 000000000..fcabb415a
--- /dev/null
+++ b/contrib/debian/netdata-plugin-ebpf.preinst
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
+
+#DEBHELPER#
diff --git a/contrib/debian/netdata-plugin-freeipmi.postinst b/contrib/debian/netdata-plugin-freeipmi.postinst
new file mode 100644
index 000000000..9e88d406a
--- /dev/null
+++ b/contrib/debian/netdata-plugin-freeipmi.postinst
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/freeipmi.plugin
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/freeipmi.plugin
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/contrib/debian/netdata-plugin-freeipmi.preinst b/contrib/debian/netdata-plugin-freeipmi.preinst
new file mode 100644
index 000000000..fcabb415a
--- /dev/null
+++ b/contrib/debian/netdata-plugin-freeipmi.preinst
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
+
+#DEBHELPER#
diff --git a/contrib/debian/netdata-plugin-go.postinst b/contrib/debian/netdata-plugin-go.postinst
new file mode 100644
index 000000000..9cfce16f6
--- /dev/null
+++ b/contrib/debian/netdata-plugin-go.postinst
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/go.d.plugin
+ setcap "cap_net_admin=eip cap_net_raw=eip" /usr/libexec/netdata/plugins.d/go.d.plugin
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/contrib/debian/netdata-plugin-go.preinst b/contrib/debian/netdata-plugin-go.preinst
new file mode 100644
index 000000000..fcabb415a
--- /dev/null
+++ b/contrib/debian/netdata-plugin-go.preinst
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
+
+#DEBHELPER#
diff --git a/contrib/debian/netdata-plugin-nfacct.postinst b/contrib/debian/netdata-plugin-nfacct.postinst
new file mode 100644
index 000000000..3fa37641f
--- /dev/null
+++ b/contrib/debian/netdata-plugin-nfacct.postinst
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/nfacct.plugin
+ chmod -f 4750 /usr/libexec/netdata/plugins.d/nfacct.plugin
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/contrib/debian/netdata-plugin-nfacct.preinst b/contrib/debian/netdata-plugin-nfacct.preinst
new file mode 100644
index 000000000..fcabb415a
--- /dev/null
+++ b/contrib/debian/netdata-plugin-nfacct.preinst
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
+
+#DEBHELPER#
diff --git a/contrib/debian/netdata-plugin-perf.postinst b/contrib/debian/netdata-plugin-perf.postinst
new file mode 100644
index 000000000..5250275cc
--- /dev/null
+++ b/contrib/debian/netdata-plugin-perf.postinst
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/perf.plugin
+ if capsh --supports=cap_perfmon 2>/dev/null; then
+ setcap cap_perfmon+ep /usr/libexec/netdata/plugins.d/perf.plugin
+ else
+ setcap cap_sys_admin+ep /usr/libexec/netdata/plugins.d/perf.plugin
+ fi
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/contrib/debian/netdata-plugin-perf.preinst b/contrib/debian/netdata-plugin-perf.preinst
new file mode 100644
index 000000000..fcabb415a
--- /dev/null
+++ b/contrib/debian/netdata-plugin-perf.preinst
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
+
+#DEBHELPER#
diff --git a/contrib/debian/netdata-plugin-pythond.postinst b/contrib/debian/netdata-plugin-pythond.postinst
new file mode 100644
index 000000000..fc4ac504e
--- /dev/null
+++ b/contrib/debian/netdata-plugin-pythond.postinst
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ grep /usr/libexec/netdata /var/lib/dpkg/info/netdata-plugin-pythond.list | xargs -n 30 chown root:netdata
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/contrib/debian/netdata-plugin-pythond.preinst b/contrib/debian/netdata-plugin-pythond.preinst
new file mode 100644
index 000000000..fcabb415a
--- /dev/null
+++ b/contrib/debian/netdata-plugin-pythond.preinst
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
+
+#DEBHELPER#
diff --git a/contrib/debian/netdata-plugin-slabinfo.postinst b/contrib/debian/netdata-plugin-slabinfo.postinst
new file mode 100644
index 000000000..b697e724e
--- /dev/null
+++ b/contrib/debian/netdata-plugin-slabinfo.postinst
@@ -0,0 +1,14 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ configure|reconfigure)
+ chown root:netdata /usr/libexec/netdata/plugins.d/slabinfo.plugin
+ setcap "cap_dac_read_search=eip" /usr/libexec/netdata/plugins.d/slabinfo.plugin
+ ;;
+esac
+
+#DEBHELPER#
+
+exit 0
diff --git a/contrib/debian/netdata-plugin-slabinfo.preinst b/contrib/debian/netdata-plugin-slabinfo.preinst
new file mode 100644
index 000000000..fcabb415a
--- /dev/null
+++ b/contrib/debian/netdata-plugin-slabinfo.preinst
@@ -0,0 +1,13 @@
+#!/bin/sh
+
+set -e
+
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+ ;;
+esac
+
+#DEBHELPER#
diff --git a/contrib/debian/netdata.postinst b/contrib/debian/netdata.postinst
index 5cce2c3d1..15ffeaf7b 100644
--- a/contrib/debian/netdata.postinst
+++ b/contrib/debian/netdata.postinst
@@ -17,25 +17,6 @@ dpkg-maintscript-helper dir_to_symlink \
case "$1" in
configure|reconfigure)
- if ! getent group netdata > /dev/null; then
- addgroup --quiet --system netdata
- fi
-
- if ! getent passwd netdata > /dev/null; then
- adduser --quiet --system --ingroup netdata --home /var/lib/netdata --no-create-home netdata
- fi
-
- for item in docker nginx varnish haproxy adm nsd proxy squid ceph nobody I2C; do
- if getent group $item > /dev/null 2>&1; then
- usermod -a -G $item netdata
- fi
- done
- # Netdata must be able to read /etc/pve/qemu-server/* and /etc/pve/lxc/*
- # for reading VMs/containers names, CPU and memory limits on Proxmox.
- if [ -d "/etc/pve" ] && getent group "www-data" > /dev/null 2>&1; then
- usermod -a -G www-data netdata
- fi
-
if ! dpkg-statoverride --list /var/lib/netdata > /dev/null 2>&1; then
dpkg-statoverride --update --add netdata netdata 0755 /var/lib/netdata
fi
@@ -58,28 +39,11 @@ case "$1" in
dpkg-statoverride --force --update --add root netdata 0775 /var/lib/netdata/registry > /dev/null 2>&1
- chown -R root:netdata /usr/libexec/netdata/plugins.d
- setcap cap_dac_read_search,cap_sys_ptrace+ep /usr/libexec/netdata/plugins.d/apps.plugin
- setcap cap_dac_read_search+ep /usr/libexec/netdata/plugins.d/slabinfo.plugin
-
- if capsh --supports=cap_perfmon 2>/dev/null; then
- setcap cap_perfmon+ep /usr/libexec/netdata/plugins.d/perf.plugin
- else
- setcap cap_sys_admin+ep /usr/libexec/netdata/plugins.d/perf.plugin
- fi
-
- if [ -f "/usr/libexec/netdata/plugins.d/go.d.plugin" ]; then
- setcap "cap_net_admin+epi cap_net_raw=eip" /usr/libexec/netdata/plugins.d/go.d.plugin
- fi
+ grep /usr/libexec/netdata /var/lib/dpkg/info/netdata.list | xargs -n 30 chown root:netdata
chmod 4750 /usr/libexec/netdata/plugins.d/cgroup-network
- chmod 4750 /usr/libexec/netdata/plugins.d/nfacct.plugin
-
- # Workaround if system does not have ebpf.plugin
- chmod -f 4750 /usr/libexec/netdata/plugins.d/ebpf.plugin || true
# Workaround for other plugins not installed directly by this package
- chmod -f 4750 /usr/libexec/netdata/plugins.d/freeipmi.plugin || true
chmod -f 4750 /usr/libexec/netdata/plugins.d/ioping || true
;;
diff --git a/contrib/debian/netdata.preinst b/contrib/debian/netdata.preinst
index 3bbdea0b3..a5dc9107e 100644
--- a/contrib/debian/netdata.preinst
+++ b/contrib/debian/netdata.preinst
@@ -15,4 +15,27 @@ dpkg-maintscript-helper dir_to_symlink \
dpkg-maintscript-helper dir_to_symlink \
/var/lib/netdata/www/static /usr/share/netdata/www/static 1.18.1~ netdata -- "$@"
+case "$1" in
+ install)
+ if ! getent group netdata > /dev/null; then
+ addgroup --quiet --system netdata
+ fi
+
+ if ! getent passwd netdata > /dev/null; then
+ adduser --quiet --system --ingroup netdata --home /var/lib/netdata --no-create-home netdata
+ fi
+
+ for item in docker nginx varnish haproxy adm nsd proxy squid ceph nobody I2C; do
+ if getent group $item > /dev/null 2>&1; then
+ usermod -a -G $item netdata
+ fi
+ done
+ # Netdata must be able to read /etc/pve/qemu-server/* and /etc/pve/lxc/*
+ # for reading VMs/containers names, CPU and memory limits on Proxmox.
+ if [ -d "/etc/pve" ] && getent group "www-data" > /dev/null 2>&1; then
+ usermod -a -G www-data netdata
+ fi
+ ;;
+esac
+
#DEBHELPER#
diff --git a/contrib/debian/rules b/contrib/debian/rules
index 149b19ec7..314424fcb 100755
--- a/contrib/debian/rules
+++ b/contrib/debian/rules
@@ -15,7 +15,7 @@ else
SYSTEMD_UNIT = system/systemd/netdata.service
endif
-ifeq ($(shell test `uname -m` != "x86_64" && echo "1"), 1)
+ifeq ($(shell test ${DEB_TARGET_ARCH} != "amd64" && echo "1"), 1)
HAVE_EBPF = 0
EBPF_CONFIG = --disable-ebpf
else
@@ -60,6 +60,71 @@ override_dh_install:
mv -f $(TEMPTOP)/usr/libexec/netdata/plugins.d/freeipmi.plugin \
$(TOP)-plugin-freeipmi/usr/libexec/netdata/plugins.d/freeipmi.plugin
+ # Add free IPMI plugin install rules
+ #
+ mkdir -p $(TOP)-plugin-nfacct/usr/libexec/netdata/plugins.d
+ mv -f $(TEMPTOP)/usr/libexec/netdata/plugins.d/nfacct.plugin \
+ $(TOP)-plugin-nfacct/usr/libexec/netdata/plugins.d/nfacct.plugin
+
+ # Add charts.d plugin install rules
+ #
+ mkdir -p $(TOP)-plugin-chartsd/usr/libexec/netdata/plugins.d/
+ mkdir -p $(TOP)-plugin-chartsd/usr/lib/netdata/conf.d/
+ mv -f $(TEMPTOP)/usr/libexec/netdata/plugins.d/charts.d.plugin \
+ $(TOP)-plugin-chartsd/usr/libexec/netdata/plugins.d/charts.d.plugin
+ mv -f $(TEMPTOP)/usr/libexec/netdata/plugins.d/charts.d.dryrun-helper.sh \
+ $(TOP)-plugin-chartsd/usr/libexec/netdata/plugins.d/charts.d.dryrun-helper.sh
+ mv -f $(TEMPTOP)/usr/libexec/netdata/charts.d \
+ $(TOP)-plugin-chartsd/usr/libexec/netdata/charts.d
+ mv -f $(TEMPTOP)/usr/lib/netdata/conf.d/charts.d.conf \
+ $(TOP)-plugin-chartsd/usr/lib/netdata/conf.d/charts.d.conf
+ mv -f $(TEMPTOP)/usr/lib/netdata/conf.d/charts.d \
+ $(TOP)-plugin-chartsd/usr/lib/netdata/conf.d/charts.d
+
+ # Add ebpf plugin install rules
+ if [ $(HAVE_EBPF) -eq 1 ]; then \
+ mkdir -p $(TOP)-plugin-ebpf/usr/libexec/netdata/plugins.d/; \
+ mkdir -p $(TOP)-plugin-ebpf/usr/lib/netdata/conf.d/; \
+ mv -f $(TEMPTOP)/usr/libexec/netdata/plugins.d/ebpf.plugin $(TOP)-plugin-ebpf/usr/libexec/netdata/plugins.d/ebpf.plugin; \
+ mv -f $(TEMPTOP)/usr/lib/netdata/conf.d/ebpf.d.conf $(TOP)-plugin-ebpf/usr/lib/netdata/conf.d/ebpf.d.conf; \
+ mv -f $(TEMPTOP)/usr/lib/netdata/conf.d/ebpf.d $(TOP)-plugin-ebpf/usr/lib/netdata/conf.d/ebpf.d; \
+ fi
+
+ # Add python plugin install rules
+ mkdir -p $(TOP)-plugin-pythond/usr/libexec/netdata/plugins.d/
+ mkdir -p $(TOP)-plugin-pythond/usr/lib/netdata/conf.d/
+ mv -f $(TEMPTOP)/usr/libexec/netdata/plugins.d/python.d.plugin \
+ $(TOP)-plugin-pythond/usr/libexec/netdata/plugins.d/python.d.plugin
+ mv -f $(TEMPTOP)/usr/libexec/netdata/python.d \
+ $(TOP)-plugin-pythond/usr/libexec/netdata/python.d
+ mv -f $(TEMPTOP)/usr/lib/netdata/conf.d/python.d.conf \
+ $(TOP)-plugin-pythond/usr/lib/netdata/conf.d/python.d.conf
+ mv -f $(TEMPTOP)/usr/lib/netdata/conf.d/python.d \
+ $(TOP)-plugin-pythond/usr/lib/netdata/conf.d/python.d
+
+ # Add apps plugin install rules
+ mkdir -p $(TOP)-plugin-apps/usr/libexec/netdata/plugins.d/
+ mkdir -p $(TOP)-plugin-apps/usr/lib/netdata/conf.d/
+ mv -f $(TEMPTOP)/usr/libexec/netdata/plugins.d/apps.plugin \
+ $(TOP)-plugin-apps/usr/libexec/netdata/plugins.d/apps.plugin
+ mv -f $(TEMPTOP)/usr/lib/netdata/conf.d/apps_groups.conf \
+ $(TOP)-plugin-apps/usr/lib/netdata/conf.d/apps_groups.conf
+
+ # Add slabinfo plugin install rules
+ mkdir -p $(TOP)-plugin-slabinfo/usr/libexec/netdata/plugins.d/
+ mv -f $(TEMPTOP)/usr/libexec/netdata/plugins.d/slabinfo.plugin \
+ $(TOP)-plugin-slabinfo/usr/libexec/netdata/plugins.d/slabinfo.plugin
+
+ # Add perf plugin install rules
+ mkdir -p $(TOP)-plugin-perf/usr/libexec/netdata/plugins.d/
+ mv -f $(TEMPTOP)/usr/libexec/netdata/plugins.d/perf.plugin \
+ $(TOP)-plugin-perf/usr/libexec/netdata/plugins.d/perf.plugin
+
+ # Add debugfs plugin install rules
+ mkdir -p $(TOP)-plugin-debugfs/usr/libexec/netdata/plugins.d/
+ mv -f $(TEMPTOP)/usr/libexec/netdata/plugins.d/debugfs.plugin \
+ $(TOP)-plugin-debugfs/usr/libexec/netdata/plugins.d/debugfs.plugin
+
# Set the rest of the software in the main package
#
cp -rp $(TEMPTOP)/usr $(TOP)
@@ -82,13 +147,20 @@ override_dh_install:
ln -s "/usr/share/netdata/www/$$D" "$(TOP)/var/lib/netdata/www/$$D"; \
done
+ # Handle eBPF code
+ #
if [ $(HAVE_EBPF) -eq 1 ]; then \
- packaging/bundle-ebpf.sh . ${TOP}/usr/libexec/netdata/plugins.d; \
+ mkdir -p $(TOP)-ebpf-code-legacy/usr/libexec/netdata/plugins.d/; \
+ packaging/bundle-ebpf.sh . ${TOP}-ebpf-code-legacy/usr/libexec/netdata/plugins.d/ force; \
fi
- # Install go
+ # Install go to it's own package directory
#
- debian/install_go.sh $$(cat ${CURDIR}/packaging/go.d.version) $(TOP)/usr/lib/netdata $(TOP)/usr/libexec/netdata
+ mkdir -p $(TOP)-plugin-go/usr/lib/netdata/conf.d
+ mkdir -p $(TOP)-plugin-go/usr/libexec/netdata/plugins.d
+ debian/install_go.sh $$(cat ${CURDIR}/packaging/go.d.version) \
+ $(TOP)-plugin-go/usr/lib/netdata \
+ $(TOP)-plugin-go/usr/libexec/netdata
override_dh_installdocs:
dh_installdocs
@@ -109,13 +181,22 @@ override_dh_fixperms:
#
chmod 0755 $(TOP)/usr/libexec/netdata/netdata-updater.sh
+ # debugfs plugin
+ chmod 0750 $(TOP)-plugin-debugfs/usr/libexec/netdata/plugins.d/debugfs.plugin
+
# apps.plugin should only be runnable by the netdata user. It will be
# given extra capabilities in the postinst script.
#
- chmod 0750 $(TOP)/usr/libexec/netdata/plugins.d/apps.plugin
- chmod 0750 $(TOP)/usr/libexec/netdata/plugins.d/perf.plugin
- chmod 0750 $(TOP)/usr/libexec/netdata/plugins.d/slabinfo.plugin
- chmod 0750 $(TOP)/usr/libexec/netdata/plugins.d/go.d.plugin
+ chmod 0750 $(TOP)-plugin-apps/usr/libexec/netdata/plugins.d/apps.plugin
+
+ # slabinfo package
+ chmod 0750 $(TOP)-plugin-slabinfo/usr/libexec/netdata/plugins.d/slabinfo.plugin
+
+ # perf package
+ chmod 0750 $(TOP)-plugin-perf/usr/libexec/netdata/plugins.d/perf.plugin
+
+ # Go plugin package
+ chmod 0750 $(TOP)-plugin-go/usr/libexec/netdata/plugins.d/go.d.plugin
# CUPS plugin package
chmod 0750 $(TOP)-plugin-cups/usr/libexec/netdata/plugins.d/cups.plugin
@@ -123,6 +204,9 @@ override_dh_fixperms:
# freeIPMI plugin package
chmod 4750 $(TOP)-plugin-freeipmi/usr/libexec/netdata/plugins.d/freeipmi.plugin
+ # NFACCT plugin package
+ chmod 4750 $(TOP)-plugin-nfacct/usr/libexec/netdata/plugins.d/nfacct.plugin
+
override_dh_installlogrotate:
cp system/logrotate/netdata debian/netdata.logrotate
dh_installlogrotate
@@ -137,5 +221,5 @@ override_dh_clean:
# Tidy up copied/generated files
#
- -[ -r $(CURDIR)/debian/netdata.logrotate ] && rm $(CURDIR)/debian/netdata.logrotate
- -[ -r $(CURDIR)/debian/netdata.conffiles ] && rm $(CURDIR)/debian/netdata.conffiles
+ [ -r $(CURDIR)/debian/netdata.logrotate ] && rm $(CURDIR)/debian/netdata.logrotate ; true
+ [ -r $(CURDIR)/debian/netdata.conffiles ] && rm $(CURDIR)/debian/netdata.conffiles ; true
diff --git a/daemon/analytics.c b/daemon/analytics.c
index b3c802b86..2689886bd 100644
--- a/daemon/analytics.c
+++ b/daemon/analytics.c
@@ -375,8 +375,12 @@ void analytics_https(void)
BUFFER *b = buffer_create(30, NULL);
#ifdef ENABLE_HTTPS
analytics_exporting_connectors_ssl(b);
- buffer_strcat(b, netdata_ssl_client_ctx && rrdhost_flag_check(localhost, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED) && localhost->sender->ssl.flags == NETDATA_SSL_HANDSHAKE_COMPLETE ? "streaming|" : "|");
- buffer_strcat(b, netdata_ssl_srv_ctx ? "web" : "");
+
+ buffer_strcat(b, netdata_ssl_streaming_sender_ctx &&
+ rrdhost_flag_check(localhost, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED) &&
+ SSL_connection(&localhost->sender->ssl) ? "streaming|" : "|");
+
+ buffer_strcat(b, netdata_ssl_web_server_ctx ? "web" : "");
#else
buffer_strcat(b, "||");
#endif
diff --git a/daemon/anonymous-statistics.sh.in b/daemon/anonymous-statistics.sh.in
index 8676ffbe7..32cbc71db 100755
--- a/daemon/anonymous-statistics.sh.in
+++ b/daemon/anonymous-statistics.sh.in
@@ -74,7 +74,6 @@ NETDATA_PREBUILT_DISTRO="${42}"
# define body of request to be sent
REQ_BODY="$(cat << EOF
{
- "api_key": "mqkwGT0JNFqO-zX2t0mW6Tec9yooaVu7xCBlXtHnt5Y",
"event": "${ACTION} ${ACTION_RESULT}",
"properties": {
"distinct_id": "${NETDATA_REGISTRY_UNIQUE_ID}",
@@ -164,9 +163,9 @@ REQ_BODY="$(cat << EOF
EOF
)"
-# send the anonymous statistics to the Netdata PostHog
+# send the anonymous statistics to Netdata
if [ -n "$(command -v curl 2> /dev/null)" ]; then
- curl --silent -o /dev/null --write-out '%{http_code}' -X POST --max-time 2 --header "Content-Type: application/json" -d "${REQ_BODY}" https://app.posthog.com/capture/
+ curl --silent -o /dev/null --write-out '%{http_code}' -X POST --max-time 2 --header "Content-Type: application/json" -d "${REQ_BODY}" https://us-east1-netdata-analytics-bi.cloudfunctions.net/ingest_agent_events
else
wget -q -O - --no-check-certificate \
--server-response \
@@ -174,5 +173,5 @@ else
--timeout=1 \
--header 'Content-Type: application/json' \
--body-data "${REQ_BODY}" \
- 'https://app.posthog.com/capture/' 2>&1 | awk '/^ HTTP/{print $2}'
+ 'https://us-east1-netdata-analytics-bi.cloudfunctions.net/ingest_agent_events' 2>&1 | awk '/^ HTTP/{print $2}'
fi
diff --git a/daemon/buildinfo.c b/daemon/buildinfo.c
index ef813a961..d277d2b3c 100644
--- a/daemon/buildinfo.c
+++ b/daemon/buildinfo.c
@@ -20,6 +20,12 @@
#endif
#endif
+#ifdef ENABLE_HTTPD
+#define FEAT_HTTPD 1
+#else
+#define FEAT_HTTPD 0
+#endif
+
#ifdef ENABLE_DBENGINE
#define FEAT_DBENGINE 1
#else
@@ -89,12 +95,6 @@
#define FEAT_LIBCAP 0
#endif
-#ifdef NETDATA_WITH_ZLIB
-#define FEAT_ZLIB 1
-#else
-#define FEAT_ZLIB 0
-#endif
-
#ifdef STORAGE_WITH_MATH
#define FEAT_LIBM 1
#else
@@ -115,6 +115,12 @@
#define FEAT_APPS_PLUGIN 0
#endif
+#ifdef ENABLE_DEBUGFS_PLUGIN
+#define FEAT_DEBUGFS_PLUGIN 1
+#else
+#define FEAT_DEBUGFS_PLUGIN 0
+#endif
+
#ifdef HAVE_FREEIPMI
#define FEAT_IPMI 1
#else
@@ -275,6 +281,7 @@ void print_build_info(void) {
printf(" TLS Host Verification: %s\n", FEAT_YES_NO(FEAT_TLS_HOST_VERIFY));
printf(" Machine Learning: %s\n", FEAT_YES_NO(FEAT_ML));
printf(" Stream Compression: %s\n", FEAT_YES_NO(FEAT_STREAM_COMPRESSION));
+ printf(" HTTPD (h2o): %s\n", FEAT_YES_NO(FEAT_HTTPD));
printf("Libraries:\n");
printf(" protobuf: %s%s\n", FEAT_YES_NO(FEAT_PROTOBUF), FEAT_PROTOBUF_BUNDLED);
@@ -284,12 +291,13 @@ void print_build_info(void) {
printf(" libcrypto: %s\n", FEAT_YES_NO(FEAT_CRYPTO));
printf(" libm: %s\n", FEAT_YES_NO(FEAT_LIBM));
printf(" tcalloc: %s\n", FEAT_YES_NO(FEAT_TCMALLOC));
- printf(" zlib: %s\n", FEAT_YES_NO(FEAT_ZLIB));
+ printf(" zlib: %s\n", FEAT_YES_NO(1));
printf("Plugins:\n");
printf(" apps: %s\n", FEAT_YES_NO(FEAT_APPS_PLUGIN));
printf(" cgroup Network Tracking: %s\n", FEAT_YES_NO(FEAT_CGROUP_NET));
printf(" CUPS: %s\n", FEAT_YES_NO(FEAT_CUPS));
+ printf(" debugfs: %s\n", FEAT_YES_NO(FEAT_DEBUGFS_PLUGIN));
printf(" EBPF: %s\n", FEAT_YES_NO(FEAT_EBPF));
printf(" IPMI: %s\n", FEAT_YES_NO(FEAT_IPMI));
printf(" NFACCT: %s\n", FEAT_YES_NO(FEAT_NFACCT));
@@ -328,7 +336,8 @@ void print_build_info_json(void) {
printf(" \"tls-host-verify\": %s,\n", FEAT_JSON_BOOL(FEAT_TLS_HOST_VERIFY));
printf(" \"machine-learning\": %s\n", FEAT_JSON_BOOL(FEAT_ML));
- printf(" \"stream-compression\": %s\n", FEAT_JSON_BOOL(FEAT_STREAM_COMPRESSION));
+ printf(" \"stream-compression\": %s\n", FEAT_JSON_BOOL(FEAT_STREAM_COMPRESSION));
+ printf(" \"httpd-h2o\": %s\n", FEAT_JSON_BOOL(FEAT_HTTPD));
printf(" },\n");
printf(" \"libs\": {\n");
@@ -340,13 +349,14 @@ void print_build_info_json(void) {
printf(" \"libcrypto\": %s,\n", FEAT_JSON_BOOL(FEAT_CRYPTO));
printf(" \"libm\": %s,\n", FEAT_JSON_BOOL(FEAT_LIBM));
printf(" \"tcmalloc\": %s,\n", FEAT_JSON_BOOL(FEAT_TCMALLOC));
- printf(" \"zlib\": %s\n", FEAT_JSON_BOOL(FEAT_ZLIB));
+ printf(" \"zlib\": %s\n", FEAT_JSON_BOOL(1));
printf(" },\n");
printf(" \"plugins\": {\n");
printf(" \"apps\": %s,\n", FEAT_JSON_BOOL(FEAT_APPS_PLUGIN));
printf(" \"cgroup-net\": %s,\n", FEAT_JSON_BOOL(FEAT_CGROUP_NET));
printf(" \"cups\": %s,\n", FEAT_JSON_BOOL(FEAT_CUPS));
+ printf(" \"debugfs\": %s,\n", FEAT_JSON_BOOL(FEAT_DEBUGFS_PLUGIN));
printf(" \"ebpf\": %s,\n", FEAT_JSON_BOOL(FEAT_EBPF));
printf(" \"ipmi\": %s,\n", FEAT_JSON_BOOL(FEAT_IPMI));
printf(" \"nfacct\": %s,\n", FEAT_JSON_BOOL(FEAT_NFACCT));
@@ -417,13 +427,14 @@ void analytics_build_info(BUFFER *b) {
#ifdef ENABLE_TCMALLOC
add_to_bi(b, "tcalloc");
#endif
-#ifdef NETDATA_WITH_ZLIB
add_to_bi(b, "zlib");
-#endif
#ifdef ENABLE_APPS_PLUGIN
add_to_bi(b, "apps");
#endif
+#ifdef ENABLE_DEBUGFS_PLUGIN
+ add_to_bi(b, "debugfs");
+#endif
#ifdef HAVE_SETNS
add_to_bi(b, "cgroup Network Tracking");
#endif
diff --git a/daemon/common.h b/daemon/common.h
index 66ffd4a74..aeaf01637 100644
--- a/daemon/common.h
+++ b/daemon/common.h
@@ -41,6 +41,11 @@
// the netdata webserver(s)
#include "web/server/web_server.h"
+// the new h2o based netdata webserver
+#ifdef ENABLE_HTTPD
+#include "httpd/http_server.h"
+#endif
+
// streaming metrics between netdata servers
#include "streaming/rrdpush.h"
diff --git a/daemon/main.c b/daemon/main.c
index 606de128b..cff6530f3 100644
--- a/daemon/main.c
+++ b/daemon/main.c
@@ -170,6 +170,8 @@ static void service_to_buffer(BUFFER *wb, SERVICE_TYPE service) {
buffer_strcat(wb, "ANALYTICS ");
if(service & SERVICE_EXPORTERS)
buffer_strcat(wb, "EXPORTERS ");
+ if(service & SERVICE_HTTPD)
+ buffer_strcat(wb, "HTTPD ");
}
static bool service_wait_exit(SERVICE_TYPE service, usec_t timeout_ut) {
@@ -365,6 +367,7 @@ void netdata_cleanup_and_exit(int ret) {
| SERVICE_EXPORTERS
| SERVICE_HEALTH
| SERVICE_WEB_SERVER
+ | SERVICE_HTTPD
, 3 * USEC_PER_SEC);
delta_shutdown_time("stop collectors and streaming threads");
@@ -479,7 +482,7 @@ void netdata_cleanup_and_exit(int ret) {
#ifdef ENABLE_HTTPS
delta_shutdown_time("free openssl structures");
- security_clean_openssl();
+ netdata_ssl_cleanup();
#endif
delta_shutdown_time("remove incomplete shutdown file");
@@ -573,8 +576,6 @@ void web_server_config_options(void)
web_allow_mgmt_dns =
make_dns_decision(CONFIG_SECTION_WEB, "allow management by dns","heuristic",web_allow_mgmt_from);
-
-#ifdef NETDATA_WITH_ZLIB
web_enable_gzip = config_get_boolean(CONFIG_SECTION_WEB, "enable gzip compression", web_enable_gzip);
char *s = config_get(CONFIG_SECTION_WEB, "gzip compression strategy", "default");
@@ -602,7 +603,6 @@ void web_server_config_options(void)
error("Invalid compression level %d. Valid levels are 1 (fastest) to 9 (best ratio). Proceeding with level 9 (best compression).", web_gzip_level);
web_gzip_level = 9;
}
-#endif /* NETDATA_WITH_ZLIB */
}
@@ -834,7 +834,7 @@ static void security_init(){
tls_version = config_get(CONFIG_SECTION_WEB, "tls version", "1.3");
tls_ciphers = config_get(CONFIG_SECTION_WEB, "tls ciphers", "none");
- security_openssl_library();
+ netdata_ssl_initialize_openssl();
}
#endif
@@ -1979,6 +1979,14 @@ int main(int argc, char **argv) {
if(web_server_mode != WEB_SERVER_MODE_NONE)
api_listen_sockets_setup();
+
+#ifdef ENABLE_HTTPD
+ delta_startup_time("initialize httpd server");
+ for (int i = 0; static_threads[i].name; i++) {
+ if (static_threads[i].start_routine == httpd_main)
+ static_threads[i].enabled = httpd_is_enabled();
+ }
+#endif
}
delta_startup_time("set resource limits");
diff --git a/daemon/main.h b/daemon/main.h
index 3e32c5ad6..232b7a98a 100644
--- a/daemon/main.h
+++ b/daemon/main.h
@@ -41,7 +41,8 @@ typedef enum {
SERVICE_CONTEXT = (1 << 10),
SERVICE_ANALYTICS = (1 << 11),
SERVICE_EXPORTERS = (1 << 12),
- SERVICE_ACLKSYNC = (1 << 13)
+ SERVICE_ACLKSYNC = (1 << 13),
+ SERVICE_HTTPD = (1 << 14)
} SERVICE_TYPE;
typedef enum {
diff --git a/daemon/signals.c b/daemon/signals.c
index c857a9b57..3699010ce 100644
--- a/daemon/signals.c
+++ b/daemon/signals.c
@@ -2,8 +2,6 @@
#include "common.h"
-static int reaper_enabled = 0;
-
typedef enum signal_action {
NETDATA_SIGNAL_END_OF_LIST,
NETDATA_SIGNAL_IGNORE,
@@ -78,16 +76,6 @@ void signals_init(void) {
struct sigaction sa;
sa.sa_flags = 0;
- // Enable process tracking / reaper if running as init (pid == 1).
- // This prevents zombie processes when running in a container.
- if (getpid() == 1) {
- info("SIGNAL: Enabling reaper");
- netdata_popen_tracking_init();
- reaper_enabled = 1;
- } else {
- info("SIGNAL: Not enabling reaper");
- }
-
// ignore all signals while we run in a signal handler
sigfillset(&sa.sa_mask);
@@ -97,10 +85,6 @@ void signals_init(void) {
case NETDATA_SIGNAL_IGNORE:
sa.sa_handler = SIG_IGN;
break;
- case NETDATA_SIGNAL_CHILD:
- if (reaper_enabled == 0)
- continue;
- // FALLTHROUGH
default:
sa.sa_handler = signal_handler;
break;
@@ -115,9 +99,6 @@ void signals_restore_SIGCHLD(void)
{
struct sigaction sa;
- if (reaper_enabled == 0)
- return;
-
sa.sa_flags = 0;
sigfillset(&sa.sa_mask);
sa.sa_handler = signal_handler;
@@ -137,9 +118,6 @@ void signals_reset(void) {
if(sigaction(signals_waiting[i].signo, &sa, NULL) == -1)
error("SIGNAL: Failed to reset signal handler for: %s", signals_waiting[i].name);
}
-
- if (reaper_enabled == 1)
- netdata_popen_tracking_cleanup();
}
// reap_child reaps the child identified by pid.
@@ -147,39 +125,42 @@ static void reap_child(pid_t pid) {
siginfo_t i;
errno = 0;
- debug(D_CHILDS, "SIGNAL: Reaping pid: %d...", pid);
- if (waitid(P_PID, (id_t)pid, &i, WEXITED|WNOHANG) == -1) {
+ debug(D_CHILDS, "SIGNAL: reap_child(%d)...", pid);
+ if (netdata_waitid(P_PID, (id_t)pid, &i, WEXITED|WNOHANG) == -1) {
if (errno != ECHILD)
- error("SIGNAL: Failed to wait for: %d", pid);
+ error("SIGNAL: waitid(%d): failed to wait for child", pid);
else
- debug(D_CHILDS, "SIGNAL: Already reaped: %d", pid);
+ info("SIGNAL: waitid(%d): failed - it seems the child is already reaped", pid);
return;
- } else if (i.si_pid == 0) {
+ }
+ else if (i.si_pid == 0) {
// Process didn't exit, this shouldn't happen.
+ error("SIGNAL: waitid(%d): reports pid 0 - child has not exited", pid);
return;
}
switch (i.si_code) {
- case CLD_EXITED:
- debug(D_CHILDS, "SIGNAL: Child %d exited: %d", pid, i.si_status);
- break;
- case CLD_KILLED:
- debug(D_CHILDS, "SIGNAL: Child %d killed by signal: %d", pid, i.si_status);
- break;
- case CLD_DUMPED:
- debug(D_CHILDS, "SIGNAL: Child %d dumped core by signal: %d", pid, i.si_status);
- break;
- case CLD_STOPPED:
- debug(D_CHILDS, "SIGNAL: Child %d stopped by signal: %d", pid, i.si_status);
- break;
- case CLD_TRAPPED:
- debug(D_CHILDS, "SIGNAL: Child %d trapped by signal: %d", pid, i.si_status);
- break;
- case CLD_CONTINUED:
- debug(D_CHILDS, "SIGNAL: Child %d continued by signal: %d", pid, i.si_status);
- break;
- default:
- debug(D_CHILDS, "SIGNAL: Child %d gave us a SIGCHLD with code %d and status %d.", pid, i.si_code, i.si_status);
+ case CLD_EXITED:
+ info("SIGNAL: reap_child(%d) exited with code: %d", pid, i.si_status);
+ break;
+ case CLD_KILLED:
+ info("SIGNAL: reap_child(%d) killed by signal: %d", pid, i.si_status);
+ break;
+ case CLD_DUMPED:
+ info("SIGNAL: reap_child(%d) dumped core by signal: %d", pid, i.si_status);
+ break;
+ case CLD_STOPPED:
+ info("SIGNAL: reap_child(%d) stopped by signal: %d", pid, i.si_status);
+ break;
+ case CLD_TRAPPED:
+ info("SIGNAL: reap_child(%d) trapped by signal: %d", pid, i.si_status);
+ break;
+ case CLD_CONTINUED:
+ info("SIGNAL: reap_child(%d) continued by signal: %d", pid, i.si_status);
+ break;
+ default:
+ info("SIGNAL: reap_child(%d) gave us a SIGCHLD with code %d and status %d.", pid, i.si_code, i.si_status);
+ break;
}
}
@@ -187,25 +168,13 @@ static void reap_child(pid_t pid) {
static void reap_children() {
siginfo_t i;
- while (1 == 1) {
- // Identify which process caused the signal so we can determine
- // if we need to reap a re-parented process.
+ while(1) {
i.si_pid = 0;
- if (waitid(P_ALL, (id_t)0, &i, WEXITED|WNOHANG|WNOWAIT) == -1) {
- if (errno != ECHILD) // This shouldn't happen with WNOHANG but does.
- error("SIGNAL: Failed to wait");
- return;
- } else if (i.si_pid == 0) {
- // No child exited.
+ if (netdata_waitid(P_ALL, (id_t)0, &i, WEXITED|WNOHANG|WNOWAIT) == -1 || i.si_pid == 0)
+ // nothing to do
return;
- } else if (netdata_popen_tracking_pid_shoud_be_reaped(i.si_pid) == 0) {
- // myp managed, sleep for a short time to avoid busy wait while
- // this is handled by myp.
- usleep(10000);
- } else {
- // Unknown process, likely a re-parented child, reap it.
- reap_child(i.si_pid);
- }
+
+ reap_child(i.si_pid);
}
}
@@ -267,7 +236,6 @@ void signals_handle(void) {
break;
case NETDATA_SIGNAL_CHILD:
- debug(D_CHILDS, "SIGNAL: Received %s. Reaping...", name);
reap_children();
break;
diff --git a/daemon/static_threads.c b/daemon/static_threads.c
index d93cfe9d0..fe83945cf 100644
--- a/daemon/static_threads.c
+++ b/daemon/static_threads.c
@@ -142,6 +142,18 @@ const struct netdata_static_thread static_threads_common[] = {
.start_routine = socket_listen_main_static_threaded
},
+#ifdef ENABLE_HTTPD
+ {
+ .name = "httpd",
+ .config_section = NULL,
+ .config_name = NULL,
+ .enabled = 0,
+ .thread = NULL,
+ .init_routine = NULL,
+ .start_routine = httpd_main
+ },
+#endif
+
#ifdef ENABLE_ACLK
{
.name = "ACLK_MAIN",
diff --git a/database/README.md b/database/README.md
index eb7081620..c5750e114 100644
--- a/database/README.md
+++ b/database/README.md
@@ -39,8 +39,7 @@ So,
- On a single node setup, use `[db].mode = dbengine`.
- On a [Parent - Child](https://github.com/netdata/netdata/blob/master/docs/metrics-storage-management/enable-streaming.md) setup, use `[db].mode = dbengine` on the
- parent to increase retention, a more resource efficient mode like, `dbengine` with light retention settings, and
- `save`, `ram` or `none` modes for the children to minimize resource utilization.
+ parent to increase retention, and a more resource-efficient mode like, `dbengine` with light retention settings, `save`, `ram`, or `none` for the children to minimize resource utilization.
## Choose your database mode
diff --git a/database/contexts/api_v2.c b/database/contexts/api_v2.c
index a08d1509c..d83a9e9e3 100644
--- a/database/contexts/api_v2.c
+++ b/database/contexts/api_v2.c
@@ -352,21 +352,12 @@ static ssize_t rrdcontext_to_json_v2_add_host(void *data, RRDHOST *host, bool qu
buffer_json_member_add_string_or_empty(wb, "osVersion", host->system_info->host_os_version);
}
+ time_t now = now_realtime_sec();
buffer_json_member_add_object(wb, "status");
-
- size_t receiver_hops = host->system_info ? host->system_info->hops : (host == localhost) ? 0 : 1;
- buffer_json_member_add_object(wb, "collection");
- buffer_json_member_add_uint64(wb, "hops", receiver_hops);
- buffer_json_member_add_boolean(wb, "online", host == localhost || !rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN | RRDHOST_FLAG_RRDPUSH_RECEIVER_DISCONNECTED));
- buffer_json_member_add_boolean(wb, "replicating", rrdhost_receiver_replicating_charts(host));
- buffer_json_object_close(wb); // collection
-
- buffer_json_member_add_object(wb, "streaming");
- buffer_json_member_add_uint64(wb, "hops", host->sender ? host->sender->hops : receiver_hops + 1);
- buffer_json_member_add_boolean(wb, "online", rrdhost_flag_check(host, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED));
- buffer_json_member_add_boolean(wb, "replicating", rrdhost_sender_replicating_charts(host));
- buffer_json_object_close(wb); // streaming
-
+ {
+ rrdhost_receiver_to_json(wb, host, "collection", now);
+ rrdhost_sender_to_json(wb, host, "streaming", now);
+ }
buffer_json_object_close(wb); // status
}
diff --git a/database/contexts/query_target.c b/database/contexts/query_target.c
index 69386a3f8..7759f85e8 100644
--- a/database/contexts/query_target.c
+++ b/database/contexts/query_target.c
@@ -1049,7 +1049,7 @@ QUERY_TARGET *query_target_create(QUERY_TARGET_REQUEST *qtr) {
qt->window.before = qt->request.before;
qt->window.options = qt->request.options;
- if(query_target_has_percentage_of_instance(qt))
+ if(query_target_has_percentage_of_group(qt))
qt->window.options &= ~RRDR_OPTION_PERCENTAGE;
rrdr_relative_window_to_absolute(&qt->window.after, &qt->window.before, &qt->window.now);
diff --git a/database/contexts/rrdcontext.h b/database/contexts/rrdcontext.h
index 5328483d6..0f0f90d32 100644
--- a/database/contexts/rrdcontext.h
+++ b/database/contexts/rrdcontext.h
@@ -524,10 +524,41 @@ bool rrdcontext_retention_match(RRDCONTEXT_ACQUIRED *rca, time_t after, time_t b
#define query_target_aggregatable(qt) ((qt)->window.options & RRDR_OPTION_RETURN_RAW)
-static inline bool query_target_has_percentage_of_instance(QUERY_TARGET *qt) {
- for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++)
+static inline bool query_has_group_by_aggregation_percentage(QUERY_TARGET *qt) {
+
+ // backwards compatibility
+ // If the request was made with group_by = "percentage-of-instance"
+ // we need to send back "raw" output with "count"
+ // otherwise, we need to send back "raw" output with "hidden"
+
+ bool last_is_percentage = false;
+
+ for(int g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
+ if(qt->request.group_by[g].group_by == RRDR_GROUP_BY_NONE)
+ break;
+
if(qt->request.group_by[g].group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)
+ // backwards compatibility
+ return false;
+
+ if(qt->request.group_by[g].aggregation == RRDR_GROUP_BY_FUNCTION_PERCENTAGE)
+ last_is_percentage = true;
+
+ else
+ last_is_percentage = false;
+ }
+
+ return last_is_percentage;
+}
+
+static inline bool query_target_has_percentage_of_group(QUERY_TARGET *qt) {
+ for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
+ if (qt->request.group_by[g].group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)
+ return true;
+
+ if (qt->request.group_by[g].aggregation == RRDR_GROUP_BY_FUNCTION_PERCENTAGE)
return true;
+ }
return false;
}
@@ -536,7 +567,7 @@ static inline bool query_target_needs_all_dimensions(QUERY_TARGET *qt) {
if(qt->request.options & RRDR_OPTION_PERCENTAGE)
return true;
- return query_target_has_percentage_of_instance(qt);
+ return query_target_has_percentage_of_group(qt);
}
static inline bool query_target_has_percentage_units(QUERY_TARGET *qt) {
@@ -546,7 +577,7 @@ static inline bool query_target_has_percentage_units(QUERY_TARGET *qt) {
if((qt->request.options & RRDR_OPTION_PERCENTAGE) && !(qt->window.options & RRDR_OPTION_RETURN_RAW))
return true;
- return query_target_has_percentage_of_instance(qt);
+ return query_target_has_percentage_of_group(qt);
}
#endif // NETDATA_RRDCONTEXT_H
diff --git a/database/rrd.h b/database/rrd.h
index 0f67a3b77..3f125c5a7 100644
--- a/database/rrd.h
+++ b/database/rrd.h
@@ -977,6 +977,7 @@ struct alarm_entry {
uint32_t alarm_id;
uint32_t alarm_event_id;
uuid_t config_hash_id;
+ uuid_t transition_id;
time_t when;
time_t duration;
@@ -1149,9 +1150,11 @@ struct rrdhost {
struct rrdpush_destinations *destination; // the current destination from the above list
SIMPLE_PATTERN *rrdpush_send_charts_matching; // pattern to match the charts to be sent
+ const char *rrdpush_last_receiver_exit_reason;
time_t rrdpush_seconds_to_replicate; // max time we want to replicate from the child
time_t rrdpush_replication_step; // seconds per replication step
size_t rrdpush_receiver_replicating_charts; // the number of charts currently being replicated from a child
+ NETDATA_DOUBLE rrdpush_receiver_replication_percent; // the % of replication completion
// the following are state information for the threading
// streaming metrics from this netdata to an upstream netdata
diff --git a/database/rrdcalc.c b/database/rrdcalc.c
index 3ee8719c0..948ebe8a5 100644
--- a/database/rrdcalc.c
+++ b/database/rrdcalc.c
@@ -369,6 +369,10 @@ static inline bool rrdcalc_check_if_it_matches_rrdset(RRDCALC *rc, RRDSET *st) {
st->rrdhost->rrdlabels, rc->host_labels_pattern, '=', NULL))
return false;
+ if (st->rrdlabels && rc->chart_labels_pattern && !rrdlabels_match_simple_pattern_parsed(
+ st->rrdlabels, rc->chart_labels_pattern, '=', NULL))
+ return false;
+
return true;
}
@@ -605,11 +609,13 @@ static void rrdcalc_free_internals(RRDCALC *rc) {
string_freez(rc->host_labels);
string_freez(rc->module_match);
string_freez(rc->plugin_match);
+ string_freez(rc->chart_labels);
simple_pattern_free(rc->foreach_dimension_pattern);
simple_pattern_free(rc->host_labels_pattern);
simple_pattern_free(rc->module_pattern);
simple_pattern_free(rc->plugin_pattern);
+ simple_pattern_free(rc->chart_labels_pattern);
}
static void rrdcalc_rrdhost_delete_callback(const DICTIONARY_ITEM *item __maybe_unused, void *rrdcalc, void *rrdhost __maybe_unused) {
diff --git a/database/rrdcalc.h b/database/rrdcalc.h
index c6d6fd4e6..3b48d74ec 100644
--- a/database/rrdcalc.h
+++ b/database/rrdcalc.h
@@ -109,6 +109,9 @@ struct rrdcalc {
STRING *host_labels; // the label read from an alarm file
SIMPLE_PATTERN *host_labels_pattern; // the simple pattern of labels
+ STRING *chart_labels; // the chart label read from an alarm file
+ SIMPLE_PATTERN *chart_labels_pattern; // the simple pattern of chart labels
+
// ------------------------------------------------------------------------
// runtime information
@@ -168,6 +171,7 @@ struct rrdcalc {
#define rrdcalc_dimensions(rc) string2str((rc)->dimensions)
#define rrdcalc_foreachdim(rc) string2str((rc)->foreach_dimension)
#define rrdcalc_host_labels(rc) string2str((rc)->host_labels)
+#define rrdcalc_chart_labels(rc) string2str((rc)->chart_labels)
#define foreach_rrdcalc_in_rrdhost_read(host, rc) \
dfe_start_read((host)->rrdcalc_root_index, rc) \
@@ -206,6 +210,7 @@ struct alert_config {
STRING *options;
STRING *repeat;
STRING *host_labels;
+ STRING *chart_labels;
STRING *p_db_lookup_dimensions;
STRING *p_db_lookup_method;
diff --git a/database/rrdcalctemplate.c b/database/rrdcalctemplate.c
index 4dacb6c7b..53630f99c 100644
--- a/database/rrdcalctemplate.c
+++ b/database/rrdcalctemplate.c
@@ -51,6 +51,11 @@ bool rrdcalctemplate_check_rrdset_conditions(RRDCALCTEMPLATE *rt, RRDSET *st, RR
'=', NULL))
return false;
+ if(st->rrdlabels && rt->chart_labels_pattern && !rrdlabels_match_simple_pattern_parsed(st->rrdlabels,
+ rt->chart_labels_pattern,
+ '=', NULL))
+ return false;
+
return true;
}
@@ -120,8 +125,10 @@ static void rrdcalctemplate_free_internals(RRDCALCTEMPLATE *rt) {
string_freez(rt->dimensions);
string_freez(rt->foreach_dimension);
string_freez(rt->host_labels);
+ string_freez(rt->chart_labels);
simple_pattern_free(rt->foreach_dimension_pattern);
simple_pattern_free(rt->host_labels_pattern);
+ simple_pattern_free(rt->chart_labels_pattern);
}
void rrdcalctemplate_free_unused_rrdcalctemplate_loaded_from_config(RRDCALCTEMPLATE *rt) {
diff --git a/database/rrdcalctemplate.h b/database/rrdcalctemplate.h
index 22cfe06e8..965a818a1 100644
--- a/database/rrdcalctemplate.h
+++ b/database/rrdcalctemplate.h
@@ -74,6 +74,9 @@ struct rrdcalctemplate {
STRING *host_labels; // the label read from an alarm file
SIMPLE_PATTERN *host_labels_pattern; // the simple pattern of labels
+ STRING *chart_labels; // the chart label read from an alarm file
+ SIMPLE_PATTERN *chart_labels_pattern; // the simple pattern of chart labels
+
// ------------------------------------------------------------------------
// expressions related to the alarm
@@ -107,6 +110,7 @@ struct rrdcalctemplate {
#define rrdcalctemplate_dimensions(rt) string2str((rt)->dimensions)
#define rrdcalctemplate_foreachdim(rt) string2str((rt)->foreach_dimension)
#define rrdcalctemplate_host_labels(rt) string2str((rt)->host_labels)
+#define rrdcalctemplate_chart_labels(rt) string2str((rt)->chart_labels)
#define RRDCALCTEMPLATE_HAS_DB_LOOKUP(rt) ((rt)->after)
diff --git a/database/rrdhost.c b/database/rrdhost.c
index 88e411de8..69e4beabf 100644
--- a/database/rrdhost.c
+++ b/database/rrdhost.c
@@ -257,8 +257,7 @@ static void rrdhost_initialize_rrdpush_sender(RRDHOST *host,
rrdhost_streaming_sender_structures_init(host);
#ifdef ENABLE_HTTPS
- host->sender->ssl.conn = NULL;
- host->sender->ssl.flags = NETDATA_SSL_START;
+ host->sender->ssl = NETDATA_SSL_UNSET_CONNECTION;
#endif
host->rrdpush_send_destination = strdupz(rrdpush_destination);
@@ -341,6 +340,7 @@ int is_legacy = 1;
host->rrdpush_seconds_to_replicate = rrdpush_seconds_to_replicate;
host->rrdpush_replication_step = rrdpush_replication_step;
+ host->rrdpush_receiver_replication_percent = 100.0;
switch(memory_mode) {
default:
diff --git a/database/rrdlabels.c b/database/rrdlabels.c
index f6abd6023..051222109 100644
--- a/database/rrdlabels.c
+++ b/database/rrdlabels.c
@@ -153,9 +153,9 @@ static unsigned char label_values_char_map[256] = {
[88] = 'X', // X keep
[89] = 'Y', // Y keep
[90] = 'Z', // Z keep
- [91] = '_', // [
+ [91] = '[', // [ keep
[92] = '/', // backslash convert \ to /
- [93] = '_', // ]
+ [93] = ']', // ] keep
[94] = '_', // ^
[95] = '_', // _ keep
[96] = '_', // `
@@ -1223,6 +1223,7 @@ int rrdlabels_unittest_sanitization() {
errors += rrdlabels_unittest_sanitize_value("", "[none]");
errors += rrdlabels_unittest_sanitize_value("1", "1");
errors += rrdlabels_unittest_sanitize_value(" hello world ", "hello world");
+ errors += rrdlabels_unittest_sanitize_value("[none]", "[none]");
// 2-byte UTF-8
errors += rrdlabels_unittest_sanitize_value(" Ελλάδα ", "Ελλάδα");
diff --git a/database/sqlite/sqlite3.c b/database/sqlite/sqlite3.c
index d5fb13d0f..005aab85a 100644
--- a/database/sqlite/sqlite3.c
+++ b/database/sqlite/sqlite3.c
@@ -1,6 +1,6 @@
/******************************************************************************
** This file is an amalgamation of many separate C source files from SQLite
-** version 3.40.1. By combining all the individual C code files into this
+** version 3.41.2. By combining all the individual C code files into this
** single large file, the entire code can be compiled as a single translation
** unit. This allows many compilers to do optimizations that would not be
** possible if the files were compiled separately. Performance improvements
@@ -17,7 +17,6 @@
** language. The code for the "sqlite3" command-line shell is also in a
** separate file. This file contains only code for the core SQLite library.
*/
-#define __maybe_unused __attribute__((unused))
#define SQLITE_CORE 1
#define SQLITE_AMALGAMATION 1
#ifndef SQLITE_PRIVATE
@@ -457,9 +456,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.40.1"
-#define SQLITE_VERSION_NUMBER 3040001
-#define SQLITE_SOURCE_ID "2022-12-28 14:03:47 df5c253c0b3dd24916e4ec7cf77d3db5294cc9fd45ae7b9c5e82ad8197f38a24"
+#define SQLITE_VERSION "3.41.2"
+#define SQLITE_VERSION_NUMBER 3041002
+#define SQLITE_SOURCE_ID "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da"
/*
** CAPI3REF: Run-Time Library Version Numbers
@@ -874,6 +873,7 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_CONSTRAINT_DATATYPE (SQLITE_CONSTRAINT |(12<<8))
#define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8))
#define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8))
+#define SQLITE_NOTICE_RBU (SQLITE_NOTICE | (3<<8))
#define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8))
#define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8))
#define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8))
@@ -1486,7 +1486,6 @@ struct sqlite3_io_methods {
** in wal mode after the client has finished copying pages from the wal
** file to the database file, but before the *-shm file is updated to
** record the fact that the pages have been checkpointed.
-** </ul>
**
** <li>[[SQLITE_FCNTL_EXTERNAL_READER]]
** The EXPERIMENTAL [SQLITE_FCNTL_EXTERNAL_READER] opcode is used to detect
@@ -1499,16 +1498,16 @@ struct sqlite3_io_methods {
** the database is not a wal-mode db, or if there is no such connection in any
** other process. This opcode cannot be used to detect transactions opened
** by clients within the current process, only within other processes.
-** </ul>
**
** <li>[[SQLITE_FCNTL_CKSM_FILE]]
-** Used by the cksmvfs VFS module only.
+** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use interally by the
+** [checksum VFS shim] only.
**
** <li>[[SQLITE_FCNTL_RESET_CACHE]]
** If there is currently no transaction open on the database, and the
-** database is not a temp db, then this file-control purges the contents
-** of the in-memory page cache. If there is an open transaction, or if
-** the db is a temp-db, it is a no-op, not an error.
+** database is not a temp db, then the [SQLITE_FCNTL_RESET_CACHE] file-control
+** purges the contents of the in-memory page cache. If there is an open
+** transaction, or if the db is a temp-db, this opcode is a no-op, not an error.
** </ul>
*/
#define SQLITE_FCNTL_LOCKSTATE 1
@@ -2495,7 +2494,7 @@ struct sqlite3_mem_methods {
** configuration for a database connection can only be changed when that
** connection is not currently using lookaside memory, or in other words
** when the "current value" returned by
-** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero.
+** [sqlite3_db_status](D,[SQLITE_DBSTATUS_LOOKASIDE_USED],...) is zero.
** Any attempt to change the lookaside memory configuration when lookaside
** memory is in use leaves the configuration unchanged and returns
** [SQLITE_BUSY].)^</dd>
@@ -2645,8 +2644,12 @@ struct sqlite3_mem_methods {
** <li> sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 0, 0);
** </ol>
** Because resetting a database is destructive and irreversible, the
-** process requires the use of this obscure API and multiple steps to help
-** ensure that it does not happen by accident.
+** process requires the use of this obscure API and multiple steps to
+** help ensure that it does not happen by accident. Because this
+** feature must be capable of resetting corrupt databases, and
+** shutting down virtual tables may require access to that corrupt
+** storage, the library must abandon any installed virtual tables
+** without calling their xDestroy() methods.
**
** [[SQLITE_DBCONFIG_DEFENSIVE]] <dt>SQLITE_DBCONFIG_DEFENSIVE</dt>
** <dd>The SQLITE_DBCONFIG_DEFENSIVE option activates or deactivates the
@@ -2985,8 +2988,12 @@ SQLITE_API sqlite3_int64 sqlite3_total_changes64(sqlite3*);
** ^A call to sqlite3_interrupt(D) that occurs when there are no running
** SQL statements is a no-op and has no effect on SQL statements
** that are started after the sqlite3_interrupt() call returns.
+**
+** ^The [sqlite3_is_interrupted(D)] interface can be used to determine whether
+** or not an interrupt is currently in effect for [database connection] D.
*/
SQLITE_API void sqlite3_interrupt(sqlite3*);
+SQLITE_API int sqlite3_is_interrupted(sqlite3*);
/*
** CAPI3REF: Determine If An SQL Statement Is Complete
@@ -3604,8 +3611,8 @@ SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*,
** <dd>^An SQLITE_TRACE_PROFILE callback provides approximately the same
** information as is provided by the [sqlite3_profile()] callback.
** ^The P argument is a pointer to the [prepared statement] and the
-** X argument points to a 64-bit integer which is the estimated of
-** the number of nanosecond that the prepared statement took to run.
+** X argument points to a 64-bit integer which is approximately
+** the number of nanoseconds that the prepared statement took to run.
** ^The SQLITE_TRACE_PROFILE callback is invoked when the statement finishes.
**
** [[SQLITE_TRACE_ROW]] <dt>SQLITE_TRACE_ROW</dt>
@@ -3668,7 +3675,7 @@ SQLITE_API int sqlite3_trace_v2(
**
** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback
** function X to be invoked periodically during long running calls to
-** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for
+** [sqlite3_step()] and [sqlite3_prepare()] and similar for
** database connection D. An example use for this
** interface is to keep a GUI updated during a large query.
**
@@ -3693,6 +3700,13 @@ SQLITE_API int sqlite3_trace_v2(
** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their
** database connections for the meaning of "modify" in this paragraph.
**
+** The progress handler callback would originally only be invoked from the
+** bytecode engine. It still might be invoked during [sqlite3_prepare()]
+** and similar because those routines might force a reparse of the schema
+** which involves running the bytecode engine. However, beginning with
+** SQLite version 3.41.0, the progress handler callback might also be
+** invoked directly from [sqlite3_prepare()] while analyzing and generating
+** code for complex queries.
*/
SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
@@ -3729,13 +3743,18 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
**
** <dl>
** ^(<dt>[SQLITE_OPEN_READONLY]</dt>
-** <dd>The database is opened in read-only mode. If the database does not
-** already exist, an error is returned.</dd>)^
+** <dd>The database is opened in read-only mode. If the database does
+** not already exist, an error is returned.</dd>)^
**
** ^(<dt>[SQLITE_OPEN_READWRITE]</dt>
-** <dd>The database is opened for reading and writing if possible, or reading
-** only if the file is write protected by the operating system. In either
-** case the database must already exist, otherwise an error is returned.</dd>)^
+** <dd>The database is opened for reading and writing if possible, or
+** reading only if the file is write protected by the operating
+** system. In either case the database must already exist, otherwise
+** an error is returned. For historical reasons, if opening in
+** read-write mode fails due to OS-level permissions, an attempt is
+** made to open it in read-only mode. [sqlite3_db_readonly()] can be
+** used to determine whether the database is actually
+** read-write.</dd>)^
**
** ^(<dt>[SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]</dt>
** <dd>The database is opened for reading and writing, and is created if
@@ -5716,10 +5735,21 @@ SQLITE_API int sqlite3_create_window_function(
** from top-level SQL, and cannot be used in VIEWs or TRIGGERs nor in
** schema structures such as [CHECK constraints], [DEFAULT clauses],
** [expression indexes], [partial indexes], or [generated columns].
-** The SQLITE_DIRECTONLY flags is a security feature which is recommended
-** for all [application-defined SQL functions], and especially for functions
-** that have side-effects or that could potentially leak sensitive
-** information.
+** <p>
+** The SQLITE_DIRECTONLY flag is recommended for any
+** [application-defined SQL function]
+** that has side-effects or that could potentially leak sensitive information.
+** This will prevent attacks in which an application is tricked
+** into using a database file that has had its schema surreptiously
+** modified to invoke the application-defined function in ways that are
+** harmful.
+** <p>
+** Some people say it is good practice to set SQLITE_DIRECTONLY on all
+** [application-defined SQL functions], regardless of whether or not they
+** are security sensitive, as doing so prevents those functions from being used
+** inside of the database schema, and thus ensures that the database
+** can be inspected and modified using generic tools (such as the [CLI])
+** that do not have access to the application-defined functions.
** </dd>
**
** [[SQLITE_INNOCUOUS]] <dt>SQLITE_INNOCUOUS</dt><dd>
@@ -5860,16 +5890,6 @@ SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int6
** then the conversion is performed. Otherwise no conversion occurs.
** The [SQLITE_INTEGER | datatype] after conversion is returned.)^
**
-** ^(The sqlite3_value_encoding(X) interface returns one of [SQLITE_UTF8],
-** [SQLITE_UTF16BE], or [SQLITE_UTF16LE] according to the current encoding
-** of the value X, assuming that X has type TEXT.)^ If sqlite3_value_type(X)
-** returns something other than SQLITE_TEXT, then the return value from
-** sqlite3_value_encoding(X) is meaningless. ^Calls to
-** sqlite3_value_text(X), sqlite3_value_text16(X), sqlite3_value_text16be(X),
-** sqlite3_value_text16le(X), sqlite3_value_bytes(X), or
-** sqlite3_value_bytes16(X) might change the encoding of the value X and
-** thus change the return from subsequent calls to sqlite3_value_encoding(X).
-**
** ^Within the [xUpdate] method of a [virtual table], the
** sqlite3_value_nochange(X) interface returns true if and only if
** the column corresponding to X is unchanged by the UPDATE operation
@@ -5934,6 +5954,27 @@ SQLITE_API int sqlite3_value_type(sqlite3_value*);
SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*);
SQLITE_API int sqlite3_value_nochange(sqlite3_value*);
SQLITE_API int sqlite3_value_frombind(sqlite3_value*);
+
+/*
+** CAPI3REF: Report the internal text encoding state of an sqlite3_value object
+** METHOD: sqlite3_value
+**
+** ^(The sqlite3_value_encoding(X) interface returns one of [SQLITE_UTF8],
+** [SQLITE_UTF16BE], or [SQLITE_UTF16LE] according to the current text encoding
+** of the value X, assuming that X has type TEXT.)^ If sqlite3_value_type(X)
+** returns something other than SQLITE_TEXT, then the return value from
+** sqlite3_value_encoding(X) is meaningless. ^Calls to
+** [sqlite3_value_text(X)], [sqlite3_value_text16(X)], [sqlite3_value_text16be(X)],
+** [sqlite3_value_text16le(X)], [sqlite3_value_bytes(X)], or
+** [sqlite3_value_bytes16(X)] might change the encoding of the value X and
+** thus change the return from subsequent calls to sqlite3_value_encoding(X).
+**
+** This routine is intended for used by applications that test and validate
+** the SQLite implementation. This routine is inquiring about the opaque
+** internal state of an [sqlite3_value] object. Ordinary applications should
+** not need to know what the internal state of an sqlite3_value object is and
+** hence should not need to use this interface.
+*/
SQLITE_API int sqlite3_value_encoding(sqlite3_value*);
/*
@@ -7315,15 +7356,6 @@ SQLITE_API int sqlite3_cancel_auto_extension(void(*xEntryPoint)(void));
SQLITE_API void sqlite3_reset_auto_extension(void);
/*
-** The interface to the virtual-table mechanism is currently considered
-** to be experimental. The interface might change in incompatible ways.
-** If this is a problem for you, do not use the interface at this time.
-**
-** When the virtual-table mechanism stabilizes, we will declare the
-** interface fixed, support it indefinitely, and remove this comment.
-*/
-
-/*
** Structures used by the virtual table interface
*/
typedef struct sqlite3_vtab sqlite3_vtab;
@@ -7441,10 +7473,10 @@ struct sqlite3_module {
** when the omit flag is true there is no guarantee that the constraint will
** not be checked again using byte code.)^
**
-** ^The idxNum and idxPtr values are recorded and passed into the
+** ^The idxNum and idxStr values are recorded and passed into the
** [xFilter] method.
-** ^[sqlite3_free()] is used to free idxPtr if and only if
-** needToFreeIdxPtr is true.
+** ^[sqlite3_free()] is used to free idxStr if and only if
+** needToFreeIdxStr is true.
**
** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in
** the correct order to satisfy the ORDER BY clause so that no separate
@@ -7564,7 +7596,7 @@ struct sqlite3_index_info {
** the [sqlite3_vtab_collation()] interface. For most real-world virtual
** tables, the collating sequence of constraints does not matter (for example
** because the constraints are numeric) and so the sqlite3_vtab_collation()
-** interface is no commonly needed.
+** interface is not commonly needed.
*/
#define SQLITE_INDEX_CONSTRAINT_EQ 2
#define SQLITE_INDEX_CONSTRAINT_GT 4
@@ -7724,16 +7756,6 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3*, const char *zSQL);
SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg);
/*
-** The interface to the virtual-table mechanism defined above (back up
-** to a comment remarkably similar to this one) is currently considered
-** to be experimental. The interface might change in incompatible ways.
-** If this is a problem for you, do not use the interface at this time.
-**
-** When the virtual-table mechanism stabilizes, we will declare the
-** interface fixed, support it indefinitely, and remove this comment.
-*/
-
-/*
** CAPI3REF: A Handle To An Open BLOB
** KEYWORDS: {BLOB handle} {BLOB handles}
**
@@ -9936,7 +9958,7 @@ SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*);
** <li><p> Otherwise, "BINARY" is returned.
** </ol>
*/
-SQLITE_API SQLITE_EXPERIMENTAL const char *sqlite3_vtab_collation(sqlite3_index_info*,int);
+SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int);
/*
** CAPI3REF: Determine if a virtual table query is DISTINCT
@@ -10093,21 +10115,20 @@ SQLITE_API int sqlite3_vtab_in(sqlite3_index_info*, int iCons, int bHandle);
** is undefined and probably harmful.
**
** The X parameter in a call to sqlite3_vtab_in_first(X,P) or
-** sqlite3_vtab_in_next(X,P) must be one of the parameters to the
+** sqlite3_vtab_in_next(X,P) should be one of the parameters to the
** xFilter method which invokes these routines, and specifically
** a parameter that was previously selected for all-at-once IN constraint
** processing use the [sqlite3_vtab_in()] interface in the
** [xBestIndex|xBestIndex method]. ^(If the X parameter is not
** an xFilter argument that was selected for all-at-once IN constraint
-** processing, then these routines return [SQLITE_MISUSE])^ or perhaps
-** exhibit some other undefined or harmful behavior.
+** processing, then these routines return [SQLITE_ERROR].)^
**
** ^(Use these routines to access all values on the right-hand side
** of the IN constraint using code like the following:
**
** <blockquote><pre>
** &nbsp; for(rc=sqlite3_vtab_in_first(pList, &pVal);
-** &nbsp; rc==SQLITE_OK && pVal
+** &nbsp; rc==SQLITE_OK && pVal;
** &nbsp; rc=sqlite3_vtab_in_next(pList, &pVal)
** &nbsp; ){
** &nbsp; // do something with pVal
@@ -10205,6 +10226,10 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value **
** managed by the prepared statement S and will be automatically freed when
** S is finalized.
**
+** Not all values are available for all query elements. When a value is
+** not available, the output variable is set to -1 if the value is numeric,
+** or to NULL if it is a string (SQLITE_SCANSTAT_NAME).
+**
** <dl>
** [[SQLITE_SCANSTAT_NLOOP]] <dt>SQLITE_SCANSTAT_NLOOP</dt>
** <dd>^The [sqlite3_int64] variable pointed to by the V parameter will be
@@ -10232,12 +10257,24 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value **
** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN]
** description for the X-th loop.
**
-** [[SQLITE_SCANSTAT_SELECTID]] <dt>SQLITE_SCANSTAT_SELECT</dt>
+** [[SQLITE_SCANSTAT_SELECTID]] <dt>SQLITE_SCANSTAT_SELECTID</dt>
** <dd>^The "int" variable pointed to by the V parameter will be set to the
-** "select-id" for the X-th loop. The select-id identifies which query or
-** subquery the loop is part of. The main query has a select-id of zero.
-** The select-id is the same value as is output in the first column
-** of an [EXPLAIN QUERY PLAN] query.
+** id for the X-th query plan element. The id value is unique within the
+** statement. The select-id is the same value as is output in the first
+** column of an [EXPLAIN QUERY PLAN] query.
+**
+** [[SQLITE_SCANSTAT_PARENTID]] <dt>SQLITE_SCANSTAT_PARENTID</dt>
+** <dd>The "int" variable pointed to by the V parameter will be set to the
+** the id of the parent of the current query element, if applicable, or
+** to zero if the query element has no parent. This is the same value as
+** returned in the second column of an [EXPLAIN QUERY PLAN] query.
+**
+** [[SQLITE_SCANSTAT_NCYCLE]] <dt>SQLITE_SCANSTAT_NCYCLE</dt>
+** <dd>The sqlite3_int64 output value is set to the number of cycles,
+** according to the processor time-stamp counter, that elapsed while the
+** query element was being processed. This value is not available for
+** all query elements - if it is unavailable the output variable is
+** set to -1.
** </dl>
*/
#define SQLITE_SCANSTAT_NLOOP 0
@@ -10246,12 +10283,14 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value **
#define SQLITE_SCANSTAT_NAME 3
#define SQLITE_SCANSTAT_EXPLAIN 4
#define SQLITE_SCANSTAT_SELECTID 5
+#define SQLITE_SCANSTAT_PARENTID 6
+#define SQLITE_SCANSTAT_NCYCLE 7
/*
** CAPI3REF: Prepared Statement Scan Status
** METHOD: sqlite3_stmt
**
-** This interface returns information about the predicted and measured
+** These interfaces return information about the predicted and measured
** performance for pStmt. Advanced applications can use this
** interface to compare the predicted and the measured performance and
** issue warnings and/or rerun [ANALYZE] if discrepancies are found.
@@ -10262,19 +10301,25 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value **
**
** The "iScanStatusOp" parameter determines which status information to return.
** The "iScanStatusOp" must be one of the [scanstatus options] or the behavior
-** of this interface is undefined.
-** ^The requested measurement is written into a variable pointed to by
-** the "pOut" parameter.
-** Parameter "idx" identifies the specific loop to retrieve statistics for.
-** Loops are numbered starting from zero. ^If idx is out of range - less than
-** zero or greater than or equal to the total number of loops used to implement
-** the statement - a non-zero value is returned and the variable that pOut
-** points to is unchanged.
-**
-** ^Statistics might not be available for all loops in all statements. ^In cases
-** where there exist loops with no available statistics, this function behaves
-** as if the loop did not exist - it returns non-zero and leave the variable
-** that pOut points to unchanged.
+** of this interface is undefined. ^The requested measurement is written into
+** a variable pointed to by the "pOut" parameter.
+**
+** The "flags" parameter must be passed a mask of flags. At present only
+** one flag is defined - SQLITE_SCANSTAT_COMPLEX. If SQLITE_SCANSTAT_COMPLEX
+** is specified, then status information is available for all elements
+** of a query plan that are reported by "EXPLAIN QUERY PLAN" output. If
+** SQLITE_SCANSTAT_COMPLEX is not specified, then only query plan elements
+** that correspond to query loops (the "SCAN..." and "SEARCH..." elements of
+** the EXPLAIN QUERY PLAN output) are available. Invoking API
+** sqlite3_stmt_scanstatus() is equivalent to calling
+** sqlite3_stmt_scanstatus_v2() with a zeroed flags parameter.
+**
+** Parameter "idx" identifies the specific query element to retrieve statistics
+** for. Query elements are numbered starting from zero. A value of -1 may be
+** to query for statistics regarding the entire query. ^If idx is out of range
+** - less than -1 or greater than or equal to the total number of query
+** elements used to implement the statement - a non-zero value is returned and
+** the variable that pOut points to is unchanged.
**
** See also: [sqlite3_stmt_scanstatus_reset()]
*/
@@ -10284,6 +10329,19 @@ SQLITE_API int sqlite3_stmt_scanstatus(
int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */
void *pOut /* Result written here */
);
+SQLITE_API int sqlite3_stmt_scanstatus_v2(
+ sqlite3_stmt *pStmt, /* Prepared statement for which info desired */
+ int idx, /* Index of loop to report on */
+ int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */
+ int flags, /* Mask of flags defined below */
+ void *pOut /* Result written here */
+);
+
+/*
+** CAPI3REF: Prepared Statement Scan Status
+** KEYWORDS: {scan status flags}
+*/
+#define SQLITE_SCANSTAT_COMPLEX 0x0001
/*
** CAPI3REF: Zero Scan-Status Counters
@@ -10374,6 +10432,10 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
** function is not defined for operations on WITHOUT ROWID tables, or for
** DELETE operations on rowid tables.
**
+** ^The sqlite3_preupdate_hook(D,C,P) function returns the P argument from
+** the previous call on the same [database connection] D, or NULL for
+** the first call on D.
+**
** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()],
** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces
** provide additional information about a preupdate event. These routines
@@ -10779,6 +10841,19 @@ SQLITE_API int sqlite3_deserialize(
# undef double
#endif
+#if defined(__wasi__)
+# undef SQLITE_WASI
+# define SQLITE_WASI 1
+# undef SQLITE_OMIT_WAL
+# define SQLITE_OMIT_WAL 1/* because it requires shared memory APIs */
+# ifndef SQLITE_OMIT_LOAD_EXTENSION
+# define SQLITE_OMIT_LOAD_EXTENSION
+# endif
+# ifndef SQLITE_THREADSAFE
+# define SQLITE_THREADSAFE 0
+# endif
+#endif
+
#if 0
} /* End of the 'extern "C"' block */
#endif
@@ -14330,15 +14405,9 @@ typedef INT8_TYPE i8; /* 1-byte signed integer */
/*
** The datatype used to store estimates of the number of rows in a
-** table or index. This is an unsigned integer type. For 99.9% of
-** the world, a 32-bit integer is sufficient. But a 64-bit integer
-** can be used at compile-time if desired.
+** table or index.
*/
-#ifdef SQLITE_64BIT_STATS
- typedef u64 tRowcnt; /* 64-bit only if requested at compile-time */
-#else
- typedef u32 tRowcnt; /* 32-bit is the default */
-#endif
+typedef u64 tRowcnt;
/*
** Estimated quantities used for query planning are stored as 16-bit
@@ -14484,9 +14553,9 @@ typedef INT16_TYPE LogEst;
** pointers. In that case, only verify 4-byte alignment.
*/
#ifdef SQLITE_4_BYTE_ALIGNED_MALLOC
-# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&3)==0)
+# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&3)==0)
#else
-# define EIGHT_BYTE_ALIGNMENT(X) ((((char*)(X) - (char*)0)&7)==0)
+# define EIGHT_BYTE_ALIGNMENT(X) ((((uptr)(X) - (uptr)0)&7)==0)
#endif
/*
@@ -14540,15 +14609,38 @@ SQLITE_PRIVATE u32 sqlite3TreeTrace;
&& (defined(SQLITE_TEST) || defined(SQLITE_ENABLE_SELECTTRACE) \
|| defined(SQLITE_ENABLE_TREETRACE))
# define TREETRACE_ENABLED 1
-# define SELECTTRACE(K,P,S,X) \
+# define TREETRACE(K,P,S,X) \
if(sqlite3TreeTrace&(K)) \
sqlite3DebugPrintf("%u/%d/%p: ",(S)->selId,(P)->addrExplain,(S)),\
sqlite3DebugPrintf X
#else
-# define SELECTTRACE(K,P,S,X)
+# define TREETRACE(K,P,S,X)
# define TREETRACE_ENABLED 0
#endif
+/* TREETRACE flag meanings:
+**
+** 0x00000001 Beginning and end of SELECT processing
+** 0x00000002 WHERE clause processing
+** 0x00000004 Query flattener
+** 0x00000008 Result-set wildcard expansion
+** 0x00000010 Query name resolution
+** 0x00000020 Aggregate analysis
+** 0x00000040 Window functions
+** 0x00000080 Generated column names
+** 0x00000100 Move HAVING terms into WHERE
+** 0x00000200 Count-of-view optimization
+** 0x00000400 Compound SELECT processing
+** 0x00000800 Drop superfluous ORDER BY
+** 0x00001000 LEFT JOIN simplifies to JOIN
+** 0x00002000 Constant propagation
+** 0x00004000 Push-down optimization
+** 0x00008000 After all FROM-clause analysis
+** 0x00010000 Beginning of DELETE/INSERT/UPDATE processing
+** 0x00020000 Transform DISTINCT into GROUP BY
+** 0x00040000 SELECT tree dump after all code has been generated
+*/
+
/*
** Macros for "wheretrace"
*/
@@ -14561,6 +14653,36 @@ SQLITE_PRIVATE u32 sqlite3WhereTrace;
# define WHERETRACE(K,X)
#endif
+/*
+** Bits for the sqlite3WhereTrace mask:
+**
+** (---any--) Top-level block structure
+** 0x-------F High-level debug messages
+** 0x----FFF- More detail
+** 0xFFFF---- Low-level debug messages
+**
+** 0x00000001 Code generation
+** 0x00000002 Solver
+** 0x00000004 Solver costs
+** 0x00000008 WhereLoop inserts
+**
+** 0x00000010 Display sqlite3_index_info xBestIndex calls
+** 0x00000020 Range an equality scan metrics
+** 0x00000040 IN operator decisions
+** 0x00000080 WhereLoop cost adjustements
+** 0x00000100
+** 0x00000200 Covering index decisions
+** 0x00000400 OR optimization
+** 0x00000800 Index scanner
+** 0x00001000 More details associated with code generation
+** 0x00002000
+** 0x00004000 Show all WHERE terms at key points
+** 0x00008000 Show the full SELECT statement at key places
+**
+** 0x00010000 Show more detail when printing WHERE terms
+** 0x00020000 Show WHERE terms returned from whereScanNext()
+*/
+
/*
** An instance of the following structure is used to store the busy-handler
@@ -15541,7 +15663,7 @@ SQLITE_PRIVATE int sqlite3BtreeNewDb(Btree *p);
** reduce network bandwidth.
**
** Note that BTREE_HINT_FLAGS with BTREE_BULKLOAD is the only hint used by
-** standard SQLite. The other hints are provided for extentions that use
+** standard SQLite. The other hints are provided for extensions that use
** the SQLite parser and code generator but substitute their own storage
** engine.
*/
@@ -15687,7 +15809,15 @@ SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor*, u32 *pAmt);
SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor*);
SQLITE_PRIVATE sqlite3_int64 sqlite3BtreeMaxRecordSize(BtCursor*);
-SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(sqlite3*,Btree*,Pgno*aRoot,int nRoot,int,int*);
+SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck(
+ sqlite3 *db, /* Database connection that is running the check */
+ Btree *p, /* The btree to be checked */
+ Pgno *aRoot, /* An array of root pages numbers for individual trees */
+ int nRoot, /* Number of entries in aRoot[] */
+ int mxErr, /* Stop reporting errors after this many */
+ int *pnErr, /* OUT: Write number of errors seen to this variable */
+ char **pzOut /* OUT: Write the error message string here */
+);
SQLITE_PRIVATE struct Pager *sqlite3BtreePager(Btree*);
SQLITE_PRIVATE i64 sqlite3BtreeRowCountEst(BtCursor*);
@@ -15844,14 +15974,14 @@ struct VdbeOp {
#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
char *zComment; /* Comment to improve readability */
#endif
-#ifdef VDBE_PROFILE
- u32 cnt; /* Number of times this instruction was executed */
- u64 cycles; /* Total time spent executing this instruction */
-#endif
#ifdef SQLITE_VDBE_COVERAGE
u32 iSrcLine; /* Source-code line that generated this opcode
** with flags in the upper 8 bits */
#endif
+#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE)
+ u64 nExec;
+ u64 nCycle;
+#endif
};
typedef struct VdbeOp VdbeOp;
@@ -16142,29 +16272,30 @@ typedef struct VdbeOpList VdbeOpList;
#define OPFLG_IN3 0x08 /* in3: P3 is an input */
#define OPFLG_OUT2 0x10 /* out2: P2 is an output */
#define OPFLG_OUT3 0x20 /* out3: P3 is an output */
+#define OPFLG_NCYCLE 0x40 /* ncycle:Cycles count against P1 */
#define OPFLG_INITIALIZER {\
-/* 0 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x01, 0x00,\
+/* 0 */ 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x41, 0x00,\
/* 8 */ 0x01, 0x01, 0x01, 0x01, 0x03, 0x03, 0x01, 0x01,\
-/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x09, 0x09, 0x09,\
-/* 24 */ 0x09, 0x01, 0x09, 0x09, 0x09, 0x09, 0x09, 0x09,\
-/* 32 */ 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,\
-/* 40 */ 0x01, 0x01, 0x01, 0x26, 0x26, 0x01, 0x23, 0x0b,\
+/* 16 */ 0x03, 0x03, 0x01, 0x12, 0x01, 0x49, 0x49, 0x49,\
+/* 24 */ 0x49, 0x01, 0x49, 0x49, 0x49, 0x49, 0x49, 0x49,\
+/* 32 */ 0x41, 0x01, 0x01, 0x01, 0x41, 0x01, 0x41, 0x41,\
+/* 40 */ 0x41, 0x41, 0x41, 0x26, 0x26, 0x41, 0x23, 0x0b,\
/* 48 */ 0x01, 0x01, 0x03, 0x03, 0x0b, 0x0b, 0x0b, 0x0b,\
-/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x03, 0x01, 0x01,\
+/* 56 */ 0x0b, 0x0b, 0x01, 0x03, 0x03, 0x03, 0x01, 0x41,\
/* 64 */ 0x01, 0x00, 0x00, 0x02, 0x02, 0x08, 0x00, 0x10,\
/* 72 */ 0x10, 0x10, 0x00, 0x10, 0x00, 0x10, 0x10, 0x00,\
/* 80 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x02, 0x02,\
-/* 88 */ 0x02, 0x00, 0x00, 0x12, 0x1e, 0x20, 0x00, 0x00,\
-/* 96 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x00, 0x26, 0x26,\
+/* 88 */ 0x02, 0x00, 0x00, 0x12, 0x1e, 0x20, 0x40, 0x00,\
+/* 96 */ 0x00, 0x00, 0x10, 0x10, 0x00, 0x40, 0x26, 0x26,\
/* 104 */ 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26, 0x26,\
-/* 112 */ 0x00, 0x00, 0x12, 0x00, 0x00, 0x10, 0x00, 0x00,\
-/* 120 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10,\
-/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10,\
-/* 136 */ 0x00, 0x00, 0x04, 0x04, 0x00, 0x00, 0x10, 0x00,\
+/* 112 */ 0x40, 0x00, 0x12, 0x40, 0x40, 0x10, 0x40, 0x00,\
+/* 120 */ 0x00, 0x00, 0x40, 0x00, 0x40, 0x40, 0x10, 0x10,\
+/* 128 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x50,\
+/* 136 */ 0x00, 0x40, 0x04, 0x04, 0x00, 0x40, 0x50, 0x40,\
/* 144 */ 0x10, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00,\
/* 152 */ 0x00, 0x10, 0x00, 0x00, 0x06, 0x10, 0x00, 0x04,\
/* 160 */ 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00,\
+/* 168 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x50, 0x40,\
/* 176 */ 0x00, 0x10, 0x10, 0x02, 0x00, 0x00, 0x00, 0x00,\
/* 184 */ 0x00, 0x00, 0x00,}
@@ -16219,14 +16350,20 @@ SQLITE_PRIVATE void sqlite3VdbeNoJumpsOutsideSubrtn(Vdbe*,int,int,int);
#endif
SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp,int iLineno);
#ifndef SQLITE_OMIT_EXPLAIN
-SQLITE_PRIVATE void sqlite3VdbeExplain(Parse*,u8,const char*,...);
+SQLITE_PRIVATE int sqlite3VdbeExplain(Parse*,u8,const char*,...);
SQLITE_PRIVATE void sqlite3VdbeExplainPop(Parse*);
SQLITE_PRIVATE int sqlite3VdbeExplainParent(Parse*);
# define ExplainQueryPlan(P) sqlite3VdbeExplain P
+# ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+# define ExplainQueryPlan2(V,P) (V = sqlite3VdbeExplain P)
+# else
+# define ExplainQueryPlan2(V,P) ExplainQueryPlan(P)
+# endif
# define ExplainQueryPlanPop(P) sqlite3VdbeExplainPop(P)
# define ExplainQueryPlanParent(P) sqlite3VdbeExplainParent(P)
#else
# define ExplainQueryPlan(P)
+# define ExplainQueryPlan2(V,P)
# define ExplainQueryPlanPop(P)
# define ExplainQueryPlanParent(P) 0
# define sqlite3ExplainBreakpoint(A,B) /*no-op*/
@@ -16399,8 +16536,12 @@ SQLITE_PRIVATE void sqlite3VdbeSetLineNumber(Vdbe*,int);
#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
SQLITE_PRIVATE void sqlite3VdbeScanStatus(Vdbe*, int, int, int, LogEst, const char*);
+SQLITE_PRIVATE void sqlite3VdbeScanStatusRange(Vdbe*, int, int, int);
+SQLITE_PRIVATE void sqlite3VdbeScanStatusCounters(Vdbe*, int, int, int);
#else
-# define sqlite3VdbeScanStatus(a,b,c,d,e)
+# define sqlite3VdbeScanStatus(a,b,c,d,e,f)
+# define sqlite3VdbeScanStatusRange(a,b,c,d)
+# define sqlite3VdbeScanStatusCounters(a,b,c,d)
#endif
#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
@@ -16455,7 +16596,7 @@ struct PgHdr {
** private to pcache.c and should not be accessed by other modules.
** pCache is grouped with the public elements for efficiency.
*/
- i16 nRef; /* Number of users of this page */
+ i64 nRef; /* Number of users of this page */
PgHdr *pDirtyNext; /* Next element in list of dirty pages */
PgHdr *pDirtyPrev; /* Previous element in list of dirty pages */
/* NB: pDirtyNext and pDirtyPrev are undefined if the
@@ -16536,12 +16677,12 @@ SQLITE_PRIVATE void sqlite3PcacheClearSyncFlags(PCache *);
SQLITE_PRIVATE void sqlite3PcacheClear(PCache*);
/* Return the total number of outstanding page references */
-SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache*);
+SQLITE_PRIVATE i64 sqlite3PcacheRefCount(PCache*);
/* Increment the reference count of an existing page */
SQLITE_PRIVATE void sqlite3PcacheRef(PgHdr*);
-SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr*);
+SQLITE_PRIVATE i64 sqlite3PcachePageRefcount(PgHdr*);
/* Return the total number of pages stored in the cache */
SQLITE_PRIVATE int sqlite3PcachePagecount(PCache*);
@@ -17197,6 +17338,7 @@ struct sqlite3 {
#define SQLITE_FlttnUnionAll 0x00800000 /* Disable the UNION ALL flattener */
/* TH3 expects this value ^^^^^^^^^^ See flatten04.test */
#define SQLITE_IndexedExpr 0x01000000 /* Pull exprs from index when able */
+#define SQLITE_Coroutines 0x02000000 /* Co-routines for subqueries */
#define SQLITE_AllOpts 0xffffffff /* All optimizations */
/*
@@ -17281,8 +17423,14 @@ struct FuncDestructor {
** SQLITE_FUNC_TYPEOF == OPFLAG_TYPEOFARG
** SQLITE_FUNC_CONSTANT == SQLITE_DETERMINISTIC from the API
** SQLITE_FUNC_DIRECT == SQLITE_DIRECTONLY from the API
-** SQLITE_FUNC_UNSAFE == SQLITE_INNOCUOUS
+** SQLITE_FUNC_UNSAFE == SQLITE_INNOCUOUS -- opposite meanings!!!
** SQLITE_FUNC_ENCMASK depends on SQLITE_UTF* macros in the API
+**
+** Note that even though SQLITE_FUNC_UNSAFE and SQLITE_INNOCUOUS have the
+** same bit value, their meanings are inverted. SQLITE_FUNC_UNSAFE is
+** used internally and if set means tha the function has side effects.
+** SQLITE_INNOCUOUS is used by application code and means "not unsafe".
+** See multiple instances of tag-20230109-1.
*/
#define SQLITE_FUNC_ENCMASK 0x0003 /* SQLITE_UTF8, SQLITE_UTF16BE or UTF16LE */
#define SQLITE_FUNC_LIKE 0x0004 /* Candidate for the LIKE optimization */
@@ -17399,7 +17547,7 @@ struct FuncDestructor {
{nArg, SQLITE_FUNC_BUILTIN|SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \
xPtr, 0, xFunc, 0, 0, 0, #zName, {0} }
#define JFUNCTION(zName, nArg, iArg, xFunc) \
- {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS|\
+ {nArg, SQLITE_FUNC_BUILTIN|SQLITE_DETERMINISTIC|\
SQLITE_FUNC_CONSTANT|SQLITE_UTF8, \
SQLITE_INT_TO_PTR(iArg), 0, xFunc, 0, 0, 0, #zName, {0} }
#define INLINE_FUNC(zName, nArg, iArg, mFlags) \
@@ -17591,6 +17739,7 @@ struct CollSeq {
#define SQLITE_AFF_NUMERIC 0x43 /* 'C' */
#define SQLITE_AFF_INTEGER 0x44 /* 'D' */
#define SQLITE_AFF_REAL 0x45 /* 'E' */
+#define SQLITE_AFF_FLEXNUM 0x46 /* 'F' */
#define sqlite3IsNumericAffinity(X) ((X)>=SQLITE_AFF_NUMERIC)
@@ -18122,16 +18271,15 @@ struct AggInfo {
** from source tables rather than from accumulators */
u8 useSortingIdx; /* In direct mode, reference the sorting index rather
** than the source table */
+ u16 nSortingColumn; /* Number of columns in the sorting index */
int sortingIdx; /* Cursor number of the sorting index */
int sortingIdxPTab; /* Cursor number of pseudo-table */
- int nSortingColumn; /* Number of columns in the sorting index */
- int mnReg, mxReg; /* Range of registers allocated for aCol and aFunc */
+ int iFirstReg; /* First register in range for aCol[] and aFunc[] */
ExprList *pGroupBy; /* The group by clause */
struct AggInfo_col { /* For each column used in source tables */
Table *pTab; /* Source table */
Expr *pCExpr; /* The original expression */
int iTable; /* Cursor number of the source table */
- int iMem; /* Memory location that acts as accumulator */
i16 iColumn; /* Column number within the source table */
i16 iSorterColumn; /* Column number in the sorting index */
} *aCol;
@@ -18142,15 +18290,28 @@ struct AggInfo {
struct AggInfo_func { /* For each aggregate function */
Expr *pFExpr; /* Expression encoding the function */
FuncDef *pFunc; /* The aggregate function implementation */
- int iMem; /* Memory location that acts as accumulator */
int iDistinct; /* Ephemeral table used to enforce DISTINCT */
int iDistAddr; /* Address of OP_OpenEphemeral */
} *aFunc;
int nFunc; /* Number of entries in aFunc[] */
u32 selId; /* Select to which this AggInfo belongs */
+#ifdef SQLITE_DEBUG
+ Select *pSelect; /* SELECT statement that this AggInfo supports */
+#endif
};
/*
+** Macros to compute aCol[] and aFunc[] register numbers.
+**
+** These macros should not be used prior to the call to
+** assignAggregateRegisters() that computes the value of pAggInfo->iFirstReg.
+** The assert()s that are part of this macro verify that constraint.
+*/
+#define AggInfoColumnReg(A,I) (assert((A)->iFirstReg),(A)->iFirstReg+(I))
+#define AggInfoFuncReg(A,I) \
+ (assert((A)->iFirstReg),(A)->iFirstReg+(A)->nColumn+(I))
+
+/*
** The datatype ynVar is a signed integer, either 16-bit or 32-bit.
** Usually it is 16-bits. But if SQLITE_MAX_VARIABLE_NUMBER is greater
** than 32767 we have to make it 32-bit. 16-bit is preferred because
@@ -18680,7 +18841,7 @@ struct NameContext {
#define NC_HasAgg 0x000010 /* One or more aggregate functions seen */
#define NC_IdxExpr 0x000020 /* True if resolving columns of CREATE INDEX */
#define NC_SelfRef 0x00002e /* Combo: PartIdx, isCheck, GenCol, and IdxExpr */
-#define NC_VarSelect 0x000040 /* A correlated subquery has been seen */
+#define NC_Subquery 0x000040 /* A subquery has been seen */
#define NC_UEList 0x000080 /* True if uNC.pEList is used */
#define NC_UAggInfo 0x000100 /* True if uNC.pAggInfo is used */
#define NC_UUpsert 0x000200 /* True if uNC.pUpsert is used */
@@ -18809,6 +18970,7 @@ struct Select {
#define SF_MultiPart 0x2000000 /* Has multiple incompatible PARTITIONs */
#define SF_CopyCte 0x4000000 /* SELECT statement is a copy of a CTE */
#define SF_OrderByReqd 0x8000000 /* The ORDER BY clause may not be omitted */
+#define SF_UpdateFrom 0x10000000 /* Query originates with UPDATE FROM */
/* True if S exists and has SF_NestedFrom */
#define IsNestedFrom(S) ((S)!=0 && ((S)->selFlags&SF_NestedFrom)!=0)
@@ -18917,7 +19079,7 @@ struct SelectDest {
int iSDParm2; /* A second parameter for the eDest disposal method */
int iSdst; /* Base register where results are written */
int nSdst; /* Number of registers allocated */
- char *zAffSdst; /* Affinity used for SRT_Set, SRT_Table, and similar */
+ char *zAffSdst; /* Affinity used for SRT_Set */
ExprList *pOrderBy; /* Key columns for SRT_Queue and SRT_DistQueue */
};
@@ -18976,10 +19138,10 @@ struct TriggerPrg {
#else
typedef unsigned int yDbMask;
# define DbMaskTest(M,I) (((M)&(((yDbMask)1)<<(I)))!=0)
-# define DbMaskZero(M) (M)=0
-# define DbMaskSet(M,I) (M)|=(((yDbMask)1)<<(I))
-# define DbMaskAllZero(M) (M)==0
-# define DbMaskNonZero(M) (M)!=0
+# define DbMaskZero(M) ((M)=0)
+# define DbMaskSet(M,I) ((M)|=(((yDbMask)1)<<(I)))
+# define DbMaskAllZero(M) ((M)==0)
+# define DbMaskNonZero(M) ((M)!=0)
#endif
/*
@@ -18998,6 +19160,7 @@ struct IndexedExpr {
int iIdxCur; /* The index cursor */
int iIdxCol; /* The index column that contains value of pExpr */
u8 bMaybeNullRow; /* True if we need an OP_IfNullRow check */
+ u8 aff; /* Affinity of the pExpr expression */
IndexedExpr *pIENext; /* Next in a list of all indexed expressions */
#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
const char *zIdxName; /* Name of index, used only for bytecode comments */
@@ -19050,6 +19213,9 @@ struct Parse {
#if defined(SQLITE_DEBUG) || defined(SQLITE_COVERAGE_TEST)
u8 earlyCleanup; /* OOM inside sqlite3ParserAddCleanup() */
#endif
+#ifdef SQLITE_DEBUG
+ u8 ifNotExists; /* Might be true if IF NOT EXISTS. Assert()s only */
+#endif
int nRangeReg; /* Size of the temporary register block */
int iRangeReg; /* First register in temporary register block */
int nErr; /* Number of errors seen */
@@ -19062,7 +19228,7 @@ struct Parse {
int nLabelAlloc; /* Number of slots in aLabel */
int *aLabel; /* Space to hold the labels */
ExprList *pConstExpr;/* Constant expressions */
- IndexedExpr *pIdxExpr;/* List of expressions used by active indexes */
+ IndexedExpr *pIdxEpr;/* List of expressions used by active indexes */
Token constraintName;/* Name of the constraint currently being parsed */
yDbMask writeMask; /* Start a write transaction on these databases */
yDbMask cookieMask; /* Bitmask of schema verified databases */
@@ -19086,6 +19252,9 @@ struct Parse {
u32 nQueryLoop; /* Est number of iterations of a query (10*log2(N)) */
u32 oldmask; /* Mask of old.* columns referenced */
u32 newmask; /* Mask of new.* columns referenced */
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+ u32 nProgressSteps; /* xProgress steps taken during sqlite3_prepare() */
+#endif
u8 eTriggerOp; /* TK_UPDATE, TK_INSERT or TK_DELETE */
u8 bReturning; /* Coding a RETURNING trigger */
u8 eOrconf; /* Default ON CONFLICT policy for trigger steps */
@@ -19834,13 +20003,11 @@ SQLITE_PRIVATE int sqlite3HeapNearlyFull(void);
#ifdef SQLITE_USE_ALLOCA
# define sqlite3StackAllocRaw(D,N) alloca(N)
# define sqlite3StackAllocRawNN(D,N) alloca(N)
-# define sqlite3StackAllocZero(D,N) memset(alloca(N), 0, N)
# define sqlite3StackFree(D,P)
# define sqlite3StackFreeNN(D,P)
#else
# define sqlite3StackAllocRaw(D,N) sqlite3DbMallocRaw(D,N)
# define sqlite3StackAllocRawNN(D,N) sqlite3DbMallocRawNN(D,N)
-# define sqlite3StackAllocZero(D,N) sqlite3DbMallocZero(D,N)
# define sqlite3StackFree(D,P) sqlite3DbFree(D,P)
# define sqlite3StackFreeNN(D,P) sqlite3DbFreeNN(D,P)
#endif
@@ -19965,6 +20132,7 @@ SQLITE_PRIVATE void sqlite3ShowWinFunc(const Window*);
#endif
SQLITE_PRIVATE void sqlite3SetString(char **, sqlite3*, const char*);
+SQLITE_PRIVATE void sqlite3ProgressCheck(Parse*);
SQLITE_PRIVATE void sqlite3ErrorMsg(Parse*, const char*, ...);
SQLITE_PRIVATE int sqlite3ErrorToParser(sqlite3*,int);
SQLITE_PRIVATE void sqlite3Dequote(char*);
@@ -20022,7 +20190,7 @@ SQLITE_PRIVATE const char *sqlite3ColumnColl(Column*);
SQLITE_PRIVATE void sqlite3DeleteColumnNames(sqlite3*,Table*);
SQLITE_PRIVATE void sqlite3GenerateColumnNames(Parse *pParse, Select *pSelect);
SQLITE_PRIVATE int sqlite3ColumnsFromExprList(Parse*,ExprList*,i16*,Column**);
-SQLITE_PRIVATE void sqlite3SelectAddColumnTypeAndCollation(Parse*,Table*,Select*,char);
+SQLITE_PRIVATE void sqlite3SubqueryColumnTypes(Parse*,Table*,Select*,char);
SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse*,Select*,char);
SQLITE_PRIVATE void sqlite3OpenSchemaTable(Parse *, int);
SQLITE_PRIVATE Index *sqlite3PrimaryKeyIndex(Table*);
@@ -20342,7 +20510,7 @@ SQLITE_PRIVATE int sqlite3FixExpr(DbFixer*, Expr*);
SQLITE_PRIVATE int sqlite3FixTriggerStep(DbFixer*, TriggerStep*);
SQLITE_PRIVATE int sqlite3RealSameAsInt(double,sqlite3_int64);
SQLITE_PRIVATE i64 sqlite3RealToI64(double);
-SQLITE_PRIVATE void sqlite3Int64ToText(i64,char*);
+SQLITE_PRIVATE int sqlite3Int64ToText(i64,char*);
SQLITE_PRIVATE int sqlite3AtoF(const char *z, double*, int, u8);
SQLITE_PRIVATE int sqlite3GetInt32(const char *, int*);
SQLITE_PRIVATE int sqlite3GetUInt32(const char*, u32*);
@@ -20393,6 +20561,7 @@ SQLITE_PRIVATE char sqlite3CompareAffinity(const Expr *pExpr, char aff2);
SQLITE_PRIVATE int sqlite3IndexAffinityOk(const Expr *pExpr, char idx_affinity);
SQLITE_PRIVATE char sqlite3TableColumnAffinity(const Table*,int);
SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr);
+SQLITE_PRIVATE int sqlite3ExprDataType(const Expr *pExpr);
SQLITE_PRIVATE int sqlite3Atoi64(const char*, i64*, int, u8);
SQLITE_PRIVATE int sqlite3DecOrHexToI64(const char*, i64*);
SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3*, int, const char*,...);
@@ -20409,6 +20578,9 @@ SQLITE_PRIVATE const char *sqlite3ErrName(int);
#ifndef SQLITE_OMIT_DESERIALIZE
SQLITE_PRIVATE int sqlite3MemdbInit(void);
+SQLITE_PRIVATE int sqlite3IsMemdb(const sqlite3_vfs*);
+#else
+# define sqlite3IsMemdb(X) 0
#endif
SQLITE_PRIVATE const char *sqlite3ErrStr(int);
@@ -20548,7 +20720,7 @@ SQLITE_PRIVATE int sqlite3ApiExit(sqlite3 *db, int);
SQLITE_PRIVATE int sqlite3OpenTempDatabase(Parse *);
SQLITE_PRIVATE void sqlite3StrAccumInit(StrAccum*, sqlite3*, char*, int, int);
-SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum*, int);
+SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum*, i64);
SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum*);
SQLITE_PRIVATE void sqlite3StrAccumSetError(StrAccum*, u8);
SQLITE_PRIVATE void sqlite3ResultStrAccum(sqlite3_context*,StrAccum*);
@@ -20906,6 +21078,12 @@ SQLITE_PRIVATE const char **sqlite3CompileOptions(int *pnOpt);
SQLITE_PRIVATE int sqlite3KvvfsInit(void);
#endif
+#if defined(VDBE_PROFILE) \
+ || defined(SQLITE_PERFORMANCE_TRACE) \
+ || defined(SQLITE_ENABLE_STMT_SCANSTATUS)
+SQLITE_PRIVATE sqlite3_uint64 sqlite3Hwtime(void);
+#endif
+
#endif /* SQLITEINT_H */
/************** End of sqliteInt.h *******************************************/
@@ -20947,101 +21125,6 @@ SQLITE_PRIVATE int sqlite3KvvfsInit(void);
*/
#ifdef SQLITE_PERFORMANCE_TRACE
-/*
-** hwtime.h contains inline assembler code for implementing
-** high-performance timing routines.
-*/
-/************** Include hwtime.h in the middle of os_common.h ****************/
-/************** Begin file hwtime.h ******************************************/
-/*
-** 2008 May 27
-**
-** The author disclaims copyright to this source code. In place of
-** a legal notice, here is a blessing:
-**
-** May you do good and not evil.
-** May you find forgiveness for yourself and forgive others.
-** May you share freely, never taking more than you give.
-**
-******************************************************************************
-**
-** This file contains inline asm code for retrieving "high-performance"
-** counters for x86 and x86_64 class CPUs.
-*/
-#ifndef SQLITE_HWTIME_H
-#define SQLITE_HWTIME_H
-
-/*
-** The following routine only works on pentium-class (or newer) processors.
-** It uses the RDTSC opcode to read the cycle count value out of the
-** processor and returns that value. This can be used for high-res
-** profiling.
-*/
-#if !defined(__STRICT_ANSI__) && \
- (defined(__GNUC__) || defined(_MSC_VER)) && \
- (defined(i386) || defined(__i386__) || defined(_M_IX86))
-
- #if defined(__GNUC__)
-
- __inline__ sqlite_uint64 sqlite3Hwtime(void){
- unsigned int lo, hi;
- __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
- return (sqlite_uint64)hi << 32 | lo;
- }
-
- #elif defined(_MSC_VER)
-
- __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){
- __asm {
- rdtsc
- ret ; return value at EDX:EAX
- }
- }
-
- #endif
-
-#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__))
-
- __inline__ sqlite_uint64 sqlite3Hwtime(void){
- unsigned long val;
- __asm__ __volatile__ ("rdtsc" : "=A" (val));
- return val;
- }
-
-#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__))
-
- __inline__ sqlite_uint64 sqlite3Hwtime(void){
- unsigned long long retval;
- unsigned long junk;
- __asm__ __volatile__ ("\n\
- 1: mftbu %1\n\
- mftb %L0\n\
- mftbu %0\n\
- cmpw %0,%1\n\
- bne 1b"
- : "=r" (retval), "=r" (junk));
- return retval;
- }
-
-#else
-
- /*
- ** asm() is needed for hardware timing support. Without asm(),
- ** disable the sqlite3Hwtime() routine.
- **
- ** sqlite3Hwtime() is only used for some obscure debugging
- ** and analysis configurations, not in any deliverable, so this
- ** should not be a great loss.
- */
-SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); }
-
-#endif
-
-#endif /* !defined(SQLITE_HWTIME_H) */
-
-/************** End of hwtime.h **********************************************/
-/************** Continuing where we left off in os_common.h ******************/
-
static sqlite_uint64 g_start;
static sqlite_uint64 g_elapsed;
#define TIMER_START g_start=sqlite3Hwtime()
@@ -22486,7 +22569,6 @@ struct VdbeFrame {
Vdbe *v; /* VM this frame belongs to */
VdbeFrame *pParent; /* Parent of this frame, or NULL if parent is main */
Op *aOp; /* Program instructions for parent frame */
- i64 *anExec; /* Event counters from parent frame */
Mem *aMem; /* Array of memory cells for parent frame */
VdbeCursor **apCsr; /* Array of Vdbe cursors for parent frame */
u8 *aOnce; /* Bitmask used by OP_Once */
@@ -22702,10 +22784,19 @@ typedef unsigned bft; /* Bit Field Type */
/* The ScanStatus object holds a single value for the
** sqlite3_stmt_scanstatus() interface.
+**
+** aAddrRange[]:
+** This array is used by ScanStatus elements associated with EQP
+** notes that make an SQLITE_SCANSTAT_NCYCLE value available. It is
+** an array of up to 3 ranges of VM addresses for which the Vdbe.anCycle[]
+** values should be summed to calculate the NCYCLE value. Each pair of
+** integer addresses is a start and end address (both inclusive) for a range
+** instructions. A start value of 0 indicates an empty range.
*/
typedef struct ScanStatus ScanStatus;
struct ScanStatus {
int addrExplain; /* OP_Explain for loop */
+ int aAddrRange[6];
int addrLoop; /* Address of "loops" counter */
int addrVisit; /* Address of "rows visited" counter */
int iSelectID; /* The "Select-ID" for this loop */
@@ -22761,7 +22852,7 @@ struct Vdbe {
int nOp; /* Number of instructions in the program */
int nOpAlloc; /* Slots allocated for aOp[] */
Mem *aColName; /* Column names to return */
- Mem *pResultSet; /* Pointer to an array of results */
+ Mem *pResultRow; /* Current output row */
char *zErrMsg; /* Error message written here */
VList *pVList; /* Name of variables */
#ifndef SQLITE_OMIT_TRACE
@@ -22798,7 +22889,6 @@ struct Vdbe {
SubProgram *pProgram; /* Linked list of all sub-programs used by VM */
AuxData *pAuxData; /* Linked list of auxdata allocations */
#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- i64 *anExec; /* Number of times each op has been executed */
int nScan; /* Entries in aScan[] */
ScanStatus *aScan; /* Scan definitions for sqlite3_stmt_scanstatus() */
#endif
@@ -22965,6 +23055,8 @@ SQLITE_PRIVATE int sqlite3VdbeSorterRewind(const VdbeCursor *, int *);
SQLITE_PRIVATE int sqlite3VdbeSorterWrite(const VdbeCursor *, Mem *);
SQLITE_PRIVATE int sqlite3VdbeSorterCompare(const VdbeCursor *, Mem *, int, int *);
+SQLITE_PRIVATE void sqlite3VdbeValueListFree(void*);
+
#ifdef SQLITE_DEBUG
SQLITE_PRIVATE void sqlite3VdbeIncrWriteCounter(Vdbe*, VdbeCursor*);
SQLITE_PRIVATE void sqlite3VdbeAssertAbortable(Vdbe*);
@@ -24152,7 +24244,7 @@ static int parseModifier(
i64 iOrigJD; /* Original localtime */
i64 iGuess; /* Guess at the corresponding utc time */
int cnt = 0; /* Safety to prevent infinite loop */
- int iErr; /* Guess is off by this much */
+ i64 iErr; /* Guess is off by this much */
computeJD(p);
iGuess = iOrigJD = p->iJD;
@@ -29220,7 +29312,7 @@ static void mallocWithAlarm(int n, void **pp){
** The upper bound is slightly less than 2GiB: 0x7ffffeff == 2,147,483,391
** This provides a 256-byte safety margin for defense against 32-bit
** signed integer overflow bugs when computing memory allocation sizes.
-** Parnoid applications might want to reduce the maximum allocation size
+** Paranoid applications might want to reduce the maximum allocation size
** further for an even larger safety margin. 0x3fffffff or 0x0fffffff
** or even smaller would be reasonable upper bounds on the size of a memory
** allocations for most applications.
@@ -29734,9 +29826,14 @@ SQLITE_PRIVATE char *sqlite3DbStrNDup(sqlite3 *db, const char *z, u64 n){
*/
SQLITE_PRIVATE char *sqlite3DbSpanDup(sqlite3 *db, const char *zStart, const char *zEnd){
int n;
+#ifdef SQLITE_DEBUG
+ /* Because of the way the parser works, the span is guaranteed to contain
+ ** at least one non-space character */
+ for(n=0; sqlite3Isspace(zStart[n]); n++){ assert( &zStart[n]<zEnd ); }
+#endif
while( sqlite3Isspace(zStart[0]) ) zStart++;
n = (int)(zEnd - zStart);
- while( ALWAYS(n>0) && sqlite3Isspace(zStart[n-1]) ) n--;
+ while( sqlite3Isspace(zStart[n-1]) ) n--;
return sqlite3DbStrNDup(db, zStart, n);
}
@@ -30575,13 +30672,26 @@ SQLITE_API void sqlite3_str_vappendf(
}
}
if( precision>1 ){
+ i64 nPrior = 1;
width -= precision-1;
if( width>1 && !flag_leftjustify ){
sqlite3_str_appendchar(pAccum, width-1, ' ');
width = 0;
}
- while( precision-- > 1 ){
- sqlite3_str_append(pAccum, buf, length);
+ sqlite3_str_append(pAccum, buf, length);
+ precision--;
+ while( precision > 1 ){
+ i64 nCopyBytes;
+ if( nPrior > precision-1 ) nPrior = precision - 1;
+ nCopyBytes = length*nPrior;
+ if( nCopyBytes + pAccum->nChar >= pAccum->nAlloc ){
+ sqlite3StrAccumEnlarge(pAccum, nCopyBytes);
+ }
+ if( pAccum->accError ) break;
+ sqlite3_str_append(pAccum,
+ &pAccum->zText[pAccum->nChar-nCopyBytes], nCopyBytes);
+ precision -= nPrior;
+ nPrior *= 2;
}
}
bufpt = buf;
@@ -30809,9 +30919,9 @@ SQLITE_PRIVATE void sqlite3RecordErrorOffsetOfExpr(sqlite3 *db, const Expr *pExp
** Return the number of bytes of text that StrAccum is able to accept
** after the attempted enlargement. The value returned might be zero.
*/
-SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum *p, int N){
+SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum *p, i64 N){
char *zNew;
- assert( p->nChar+(i64)N >= p->nAlloc ); /* Only called if really needed */
+ assert( p->nChar+N >= p->nAlloc ); /* Only called if really needed */
if( p->accError ){
testcase(p->accError==SQLITE_TOOBIG);
testcase(p->accError==SQLITE_NOMEM);
@@ -30822,8 +30932,7 @@ SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum *p, int N){
return p->nAlloc - p->nChar - 1;
}else{
char *zOld = isMalloced(p) ? p->zText : 0;
- i64 szNew = p->nChar;
- szNew += (sqlite3_int64)N + 1;
+ i64 szNew = p->nChar + N + 1;
if( szNew+p->nChar<=p->mxAlloc ){
/* Force exponential buffer size growth as long as it does not overflow,
** to avoid having to call this routine too often */
@@ -30853,7 +30962,8 @@ SQLITE_PRIVATE int sqlite3StrAccumEnlarge(StrAccum *p, int N){
return 0;
}
}
- return N;
+ assert( N>=0 && N<=0x7fffffff );
+ return (int)N;
}
/*
@@ -31449,6 +31559,13 @@ SQLITE_PRIVATE void sqlite3TreeViewSrcList(TreeView *pView, const SrcList *pSrc)
if( pItem->fg.isOn || (pItem->fg.isUsing==0 && pItem->u3.pOn!=0) ){
sqlite3_str_appendf(&x, " ON");
}
+ if( pItem->fg.isTabFunc ) sqlite3_str_appendf(&x, " isTabFunc");
+ if( pItem->fg.isCorrelated ) sqlite3_str_appendf(&x, " isCorrelated");
+ if( pItem->fg.isMaterialized ) sqlite3_str_appendf(&x, " isMaterialized");
+ if( pItem->fg.viaCoroutine ) sqlite3_str_appendf(&x, " viaCoroutine");
+ if( pItem->fg.notCte ) sqlite3_str_appendf(&x, " notCte");
+ if( pItem->fg.isNestedFrom ) sqlite3_str_appendf(&x, " isNestedFrom");
+
sqlite3StrAccumFinish(&x);
sqlite3TreeViewItem(pView, zLine, i<pSrc->nSrc-1);
n = 0;
@@ -31718,7 +31835,7 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
sqlite3TreeViewPop(&pView);
return;
}
- if( pExpr->flags || pExpr->affExpr || pExpr->vvaFlags ){
+ if( pExpr->flags || pExpr->affExpr || pExpr->vvaFlags || pExpr->pAggInfo ){
StrAccum x;
sqlite3StrAccumInit(&x, 0, zFlgs, sizeof(zFlgs), 0);
sqlite3_str_appendf(&x, " fg.af=%x.%c",
@@ -31735,6 +31852,9 @@ SQLITE_PRIVATE void sqlite3TreeViewExpr(TreeView *pView, const Expr *pExpr, u8 m
if( ExprHasVVAProperty(pExpr, EP_Immutable) ){
sqlite3_str_appendf(&x, " IMMUTABLE");
}
+ if( pExpr->pAggInfo!=0 ){
+ sqlite3_str_appendf(&x, " agg-column[%d]", pExpr->iAgg);
+ }
sqlite3StrAccumFinish(&x);
}else{
zFlgs[0] = 0;
@@ -33675,6 +33795,26 @@ SQLITE_PRIVATE void sqlite3ErrorWithMsg(sqlite3 *db, int err_code, const char *z
}
/*
+** Check for interrupts and invoke progress callback.
+*/
+SQLITE_PRIVATE void sqlite3ProgressCheck(Parse *p){
+ sqlite3 *db = p->db;
+ if( AtomicLoad(&db->u1.isInterrupted) ){
+ p->nErr++;
+ p->rc = SQLITE_INTERRUPT;
+ }
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+ if( db->xProgress && (++p->nProgressSteps)>=db->nProgressOps ){
+ if( db->xProgress(db->pProgressArg) ){
+ p->nErr++;
+ p->rc = SQLITE_INTERRUPT;
+ }
+ p->nProgressSteps = 0;
+ }
+#endif
+}
+
+/*
** Add an error message to pParse->zErrMsg and increment pParse->nErr.
**
** This function should be used to report any error that occurs while
@@ -34131,11 +34271,14 @@ do_atof_calc:
#endif
/*
-** Render an signed 64-bit integer as text. Store the result in zOut[].
+** Render an signed 64-bit integer as text. Store the result in zOut[] and
+** return the length of the string that was stored, in bytes. The value
+** returned does not include the zero terminator at the end of the output
+** string.
**
** The caller must ensure that zOut[] is at least 21 bytes in size.
*/
-SQLITE_PRIVATE void sqlite3Int64ToText(i64 v, char *zOut){
+SQLITE_PRIVATE int sqlite3Int64ToText(i64 v, char *zOut){
int i;
u64 x;
char zTemp[22];
@@ -34152,6 +34295,7 @@ SQLITE_PRIVATE void sqlite3Int64ToText(i64 v, char *zOut){
}while( x );
if( v<0 ) zTemp[i--] = '-';
memcpy(zOut, &zTemp[i+1], sizeof(zTemp)-1-i);
+ return sizeof(zTemp)-2-i;
}
/*
@@ -35213,6 +35357,104 @@ SQLITE_PRIVATE int sqlite3VListNameToNum(VList *pIn, const char *zName, int nNam
return 0;
}
+/*
+** High-resolution hardware timer used for debugging and testing only.
+*/
+#if defined(VDBE_PROFILE) \
+ || defined(SQLITE_PERFORMANCE_TRACE) \
+ || defined(SQLITE_ENABLE_STMT_SCANSTATUS)
+/************** Include hwtime.h in the middle of util.c *********************/
+/************** Begin file hwtime.h ******************************************/
+/*
+** 2008 May 27
+**
+** The author disclaims copyright to this source code. In place of
+** a legal notice, here is a blessing:
+**
+** May you do good and not evil.
+** May you find forgiveness for yourself and forgive others.
+** May you share freely, never taking more than you give.
+**
+******************************************************************************
+**
+** This file contains inline asm code for retrieving "high-performance"
+** counters for x86 and x86_64 class CPUs.
+*/
+#ifndef SQLITE_HWTIME_H
+#define SQLITE_HWTIME_H
+
+/*
+** The following routine only works on pentium-class (or newer) processors.
+** It uses the RDTSC opcode to read the cycle count value out of the
+** processor and returns that value. This can be used for high-res
+** profiling.
+*/
+#if !defined(__STRICT_ANSI__) && \
+ (defined(__GNUC__) || defined(_MSC_VER)) && \
+ (defined(i386) || defined(__i386__) || defined(_M_IX86))
+
+ #if defined(__GNUC__)
+
+ __inline__ sqlite_uint64 sqlite3Hwtime(void){
+ unsigned int lo, hi;
+ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
+ return (sqlite_uint64)hi << 32 | lo;
+ }
+
+ #elif defined(_MSC_VER)
+
+ __declspec(naked) __inline sqlite_uint64 __cdecl sqlite3Hwtime(void){
+ __asm {
+ rdtsc
+ ret ; return value at EDX:EAX
+ }
+ }
+
+ #endif
+
+#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__x86_64__))
+
+ __inline__ sqlite_uint64 sqlite3Hwtime(void){
+ unsigned int lo, hi;
+ __asm__ __volatile__ ("rdtsc" : "=a" (lo), "=d" (hi));
+ return (sqlite_uint64)hi << 32 | lo;
+ }
+
+#elif !defined(__STRICT_ANSI__) && (defined(__GNUC__) && defined(__ppc__))
+
+ __inline__ sqlite_uint64 sqlite3Hwtime(void){
+ unsigned long long retval;
+ unsigned long junk;
+ __asm__ __volatile__ ("\n\
+ 1: mftbu %1\n\
+ mftb %L0\n\
+ mftbu %0\n\
+ cmpw %0,%1\n\
+ bne 1b"
+ : "=r" (retval), "=r" (junk));
+ return retval;
+ }
+
+#else
+
+ /*
+ ** asm() is needed for hardware timing support. Without asm(),
+ ** disable the sqlite3Hwtime() routine.
+ **
+ ** sqlite3Hwtime() is only used for some obscure debugging
+ ** and analysis configurations, not in any deliverable, so this
+ ** should not be a great loss.
+ */
+SQLITE_PRIVATE sqlite_uint64 sqlite3Hwtime(void){ return ((sqlite_uint64)0); }
+
+#endif
+
+#endif /* !defined(SQLITE_HWTIME_H) */
+
+/************** End of hwtime.h **********************************************/
+/************** Continuing where we left off in util.c ***********************/
+#endif
+
/************** End of util.c ************************************************/
/************** Begin file hash.c ********************************************/
/*
@@ -35383,12 +35625,13 @@ static HashElem *findElementWithHash(
count = pH->count;
}
if( pHash ) *pHash = h;
- while( count-- ){
+ while( count ){
assert( elem!=0 );
if( sqlite3StrICmp(elem->pKey,pKey)==0 ){
return elem;
}
elem = elem->next;
+ count--;
}
return &nullElement;
}
@@ -35747,7 +35990,9 @@ struct KVVfsFile {
char *aJrnl; /* Journal content */
int szPage; /* Last known page size */
sqlite3_int64 szDb; /* Database file size. -1 means unknown */
+ char *aData; /* Buffer to hold page data */
};
+#define SQLITE_KVOS_SZ 133073
/*
** Methods for KVVfsFile
@@ -36110,8 +36355,7 @@ static int kvvfsDecode(const char *a, char *aOut, int nOut){
if( j+n>nOut ) return -1;
memset(&aOut[j], 0, n);
j += n;
- c = aIn[i];
- if( c==0 ) break;
+ if( c==0 || mult==1 ) break; /* progress stalled if mult==1 */
}else{
aOut[j] = c<<4;
c = kvvfsHexValue[aIn[++i]];
@@ -36188,6 +36432,7 @@ static int kvvfsClose(sqlite3_file *pProtoFile){
SQLITE_KV_LOG(("xClose %s %s\n", pFile->zClass,
pFile->isJournal ? "journal" : "db"));
sqlite3_free(pFile->aJrnl);
+ sqlite3_free(pFile->aData);
return SQLITE_OK;
}
@@ -36236,7 +36481,7 @@ static int kvvfsReadDb(
unsigned int pgno;
int got, n;
char zKey[30];
- char aData[133073];
+ char *aData = pFile->aData;
assert( iOfst>=0 );
assert( iAmt>=0 );
SQLITE_KV_LOG(("xRead('%s-db',%d,%lld)\n", pFile->zClass, iAmt, iOfst));
@@ -36253,7 +36498,8 @@ static int kvvfsReadDb(
pgno = 1;
}
sqlite3_snprintf(sizeof(zKey), zKey, "%u", pgno);
- got = sqlite3KvvfsMethods.xRead(pFile->zClass, zKey, aData, sizeof(aData)-1);
+ got = sqlite3KvvfsMethods.xRead(pFile->zClass, zKey,
+ aData, SQLITE_KVOS_SZ-1);
if( got<0 ){
n = 0;
}else{
@@ -36261,7 +36507,7 @@ static int kvvfsReadDb(
if( iOfst+iAmt<512 ){
int k = iOfst+iAmt;
aData[k*2] = 0;
- n = kvvfsDecode(aData, &aData[2000], sizeof(aData)-2000);
+ n = kvvfsDecode(aData, &aData[2000], SQLITE_KVOS_SZ-2000);
if( n>=iOfst+iAmt ){
memcpy(zBuf, &aData[2000+iOfst], iAmt);
n = iAmt;
@@ -36320,7 +36566,7 @@ static int kvvfsWriteDb(
KVVfsFile *pFile = (KVVfsFile*)pProtoFile;
unsigned int pgno;
char zKey[30];
- char aData[131073];
+ char *aData = pFile->aData;
SQLITE_KV_LOG(("xWrite('%s-db',%d,%lld)\n", pFile->zClass, iAmt, iOfst));
assert( iAmt>=512 && iAmt<=65536 );
assert( (iAmt & (iAmt-1))==0 );
@@ -36529,6 +36775,10 @@ static int kvvfsOpen(
}else{
pFile->zClass = "local";
}
+ pFile->aData = sqlite3_malloc64(SQLITE_KVOS_SZ);
+ if( pFile->aData==0 ){
+ return SQLITE_NOMEM;
+ }
pFile->aJrnl = 0;
pFile->nJrnl = 0;
pFile->szPage = -1;
@@ -36765,7 +37015,8 @@ SQLITE_PRIVATE int sqlite3KvvfsInit(void){
/* #include <time.h> */
#include <sys/time.h> /* amalgamator: keep */
#include <errno.h>
-#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0
+#if (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) \
+ && !defined(SQLITE_WASI)
# include <sys/mman.h>
#endif
@@ -36853,9 +37104,46 @@ SQLITE_PRIVATE int sqlite3KvvfsInit(void){
*/
#define SQLITE_MAX_SYMLINKS 100
+/*
+** Remove and stub certain info for WASI (WebAssembly System
+** Interface) builds.
+*/
+#ifdef SQLITE_WASI
+# undef HAVE_FCHMOD
+# undef HAVE_FCHOWN
+# undef HAVE_MREMAP
+# define HAVE_MREMAP 0
+# ifndef SQLITE_DEFAULT_UNIX_VFS
+# define SQLITE_DEFAULT_UNIX_VFS "unix-dotfile"
+ /* ^^^ should SQLITE_DEFAULT_UNIX_VFS be "unix-none"? */
+# endif
+# ifndef F_RDLCK
+# define F_RDLCK 0
+# define F_WRLCK 1
+# define F_UNLCK 2
+# if __LONG_MAX == 0x7fffffffL
+# define F_GETLK 12
+# define F_SETLK 13
+# define F_SETLKW 14
+# else
+# define F_GETLK 5
+# define F_SETLK 6
+# define F_SETLKW 7
+# endif
+# endif
+#else /* !SQLITE_WASI */
+# ifndef HAVE_FCHMOD
+# define HAVE_FCHMOD
+# endif
+#endif /* SQLITE_WASI */
+
+#ifdef SQLITE_WASI
+# define osGetpid(X) (pid_t)1
+#else
/* Always cast the getpid() return type for compatibility with
** kernel modules in VxWorks. */
-#define osGetpid(X) (pid_t)getpid()
+# define osGetpid(X) (pid_t)getpid()
+#endif
/*
** Only set the lastErrno if the error code is a real error and not
@@ -37127,7 +37415,11 @@ static struct unix_syscall {
#define osPwrite64 ((ssize_t(*)(int,const void*,size_t,off64_t))\
aSyscall[13].pCurrent)
+#if defined(HAVE_FCHMOD)
{ "fchmod", (sqlite3_syscall_ptr)fchmod, 0 },
+#else
+ { "fchmod", (sqlite3_syscall_ptr)0, 0 },
+#endif
#define osFchmod ((int(*)(int,mode_t))aSyscall[14].pCurrent)
#if defined(HAVE_POSIX_FALLOCATE) && HAVE_POSIX_FALLOCATE
@@ -37163,14 +37455,16 @@ static struct unix_syscall {
#endif
#define osGeteuid ((uid_t(*)(void))aSyscall[21].pCurrent)
-#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0
+#if (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) \
+ && !defined(SQLITE_WASI)
{ "mmap", (sqlite3_syscall_ptr)mmap, 0 },
#else
{ "mmap", (sqlite3_syscall_ptr)0, 0 },
#endif
#define osMmap ((void*(*)(void*,size_t,int,int,int,off_t))aSyscall[22].pCurrent)
-#if !defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0
+#if (!defined(SQLITE_OMIT_WAL) || SQLITE_MAX_MMAP_SIZE>0) \
+ && !defined(SQLITE_WASI)
{ "munmap", (sqlite3_syscall_ptr)munmap, 0 },
#else
{ "munmap", (sqlite3_syscall_ptr)0, 0 },
@@ -38321,7 +38615,7 @@ static int unixFileLock(unixFile *pFile, struct flock *pLock){
**
** UNLOCKED -> SHARED
** SHARED -> RESERVED
-** SHARED -> (PENDING) -> EXCLUSIVE
+** SHARED -> EXCLUSIVE
** RESERVED -> (PENDING) -> EXCLUSIVE
** PENDING -> EXCLUSIVE
**
@@ -38354,19 +38648,20 @@ static int unixLock(sqlite3_file *id, int eFileLock){
** A RESERVED lock is implemented by grabbing a write-lock on the
** 'reserved byte'.
**
- ** A process may only obtain a PENDING lock after it has obtained a
- ** SHARED lock. A PENDING lock is implemented by obtaining a write-lock
- ** on the 'pending byte'. This ensures that no new SHARED locks can be
- ** obtained, but existing SHARED locks are allowed to persist. A process
- ** does not have to obtain a RESERVED lock on the way to a PENDING lock.
- ** This property is used by the algorithm for rolling back a journal file
- ** after a crash.
+ ** An EXCLUSIVE lock may only be requested after either a SHARED or
+ ** RESERVED lock is held. An EXCLUSIVE lock is implemented by obtaining
+ ** a write-lock on the entire 'shared byte range'. Since all other locks
+ ** require a read-lock on one of the bytes within this range, this ensures
+ ** that no other locks are held on the database.
**
- ** An EXCLUSIVE lock, obtained after a PENDING lock is held, is
- ** implemented by obtaining a write-lock on the entire 'shared byte
- ** range'. Since all other locks require a read-lock on one of the bytes
- ** within this range, this ensures that no other locks are held on the
- ** database.
+ ** If a process that holds a RESERVED lock requests an EXCLUSIVE, then
+ ** a PENDING lock is obtained first. A PENDING lock is implemented by
+ ** obtaining a write-lock on the 'pending byte'. This ensures that no new
+ ** SHARED locks can be obtained, but existing SHARED locks are allowed to
+ ** persist. If the call to this function fails to obtain the EXCLUSIVE
+ ** lock in this case, it holds the PENDING lock intead. The client may
+ ** then re-attempt the EXCLUSIVE lock later on, after existing SHARED
+ ** locks have cleared.
*/
int rc = SQLITE_OK;
unixFile *pFile = (unixFile*)id;
@@ -38437,7 +38732,7 @@ static int unixLock(sqlite3_file *id, int eFileLock){
lock.l_len = 1L;
lock.l_whence = SEEK_SET;
if( eFileLock==SHARED_LOCK
- || (eFileLock==EXCLUSIVE_LOCK && pFile->eFileLock<PENDING_LOCK)
+ || (eFileLock==EXCLUSIVE_LOCK && pFile->eFileLock==RESERVED_LOCK)
){
lock.l_type = (eFileLock==SHARED_LOCK?F_RDLCK:F_WRLCK);
lock.l_start = PENDING_BYTE;
@@ -38448,6 +38743,9 @@ static int unixLock(sqlite3_file *id, int eFileLock){
storeLastErrno(pFile, tErrno);
}
goto end_lock;
+ }else if( eFileLock==EXCLUSIVE_LOCK ){
+ pFile->eFileLock = PENDING_LOCK;
+ pInode->eFileLock = PENDING_LOCK;
}
}
@@ -38535,13 +38833,9 @@ static int unixLock(sqlite3_file *id, int eFileLock){
}
#endif
-
if( rc==SQLITE_OK ){
pFile->eFileLock = eFileLock;
pInode->eFileLock = eFileLock;
- }else if( eFileLock==EXCLUSIVE_LOCK ){
- pFile->eFileLock = PENDING_LOCK;
- pInode->eFileLock = PENDING_LOCK;
}
end_lock:
@@ -43132,12 +43426,10 @@ static void appendOnePathElement(
if( zName[0]=='.' ){
if( nName==1 ) return;
if( zName[1]=='.' && nName==2 ){
- if( pPath->nUsed<=1 ){
- pPath->rc = SQLITE_ERROR;
- return;
+ if( pPath->nUsed>1 ){
+ assert( pPath->zOut[0]=='/' );
+ while( pPath->zOut[--pPath->nUsed]!='/' ){}
}
- assert( pPath->zOut[0]=='/' );
- while( pPath->zOut[--pPath->nUsed]!='/' ){}
return;
}
}
@@ -43349,7 +43641,7 @@ static int unixRandomness(sqlite3_vfs *NotUsed, int nBuf, char *zBuf){
** than the argument.
*/
static int unixSleep(sqlite3_vfs *NotUsed, int microseconds){
-#if OS_VXWORKS
+#if OS_VXWORKS || _POSIX_C_SOURCE >= 199309L
struct timespec sp;
sp.tv_sec = microseconds / 1000000;
@@ -51886,9 +52178,9 @@ end_deserialize:
/*
** Return true if the VFS is the memvfs.
*/
-//SQLITE_PRIVATE int sqlite3IsMemdb(const sqlite3_vfs *pVfs){
-// return pVfs==&memdb_vfs;
-//}
+SQLITE_PRIVATE int sqlite3IsMemdb(const sqlite3_vfs *pVfs){
+ return pVfs==&memdb_vfs;
+}
/*
** This routine is called when the extension is loaded.
@@ -52369,7 +52661,7 @@ bitvec_end:
struct PCache {
PgHdr *pDirty, *pDirtyTail; /* List of dirty pages in LRU order */
PgHdr *pSynced; /* Last synced page in dirty page list */
- int nRefSum; /* Sum of ref counts over all pages */
+ i64 nRefSum; /* Sum of ref counts over all pages */
int szCache; /* Configured cache size */
int szSpill; /* Size before spilling occurs */
int szPage; /* Size of every page in this cache */
@@ -52399,7 +52691,7 @@ struct PCache {
unsigned char *a;
int j;
pPg = (PgHdr*)pLower->pExtra;
- printf("%3d: nRef %2d flgs %02x data ", i, pPg->nRef, pPg->flags);
+ printf("%3lld: nRef %2d flgs %02x data ", i, pPg->nRef, pPg->flags);
a = (unsigned char *)pLower->pBuf;
for(j=0; j<12; j++) printf("%02x", a[j]);
printf(" ptr %p\n", pPg);
@@ -53143,14 +53435,14 @@ SQLITE_PRIVATE PgHdr *sqlite3PcacheDirtyList(PCache *pCache){
** This is not the total number of pages referenced, but the sum of the
** reference count for all pages.
*/
-SQLITE_PRIVATE int sqlite3PcacheRefCount(PCache *pCache){
+SQLITE_PRIVATE i64 sqlite3PcacheRefCount(PCache *pCache){
return pCache->nRefSum;
}
/*
** Return the number of references to the page supplied as an argument.
*/
-SQLITE_PRIVATE int sqlite3PcachePageRefcount(PgHdr *p){
+SQLITE_PRIVATE i64 sqlite3PcachePageRefcount(PgHdr *p){
return p->nRef;
}
@@ -58135,7 +58427,7 @@ end_playback:
** see if it is possible to delete the super-journal.
*/
assert( zSuper==&pPager->pTmpSpace[4] );
- memset(&zSuper[-4], 0, 4);
+ memset(pPager->pTmpSpace, 0, 4);
rc = pager_delsuper(pPager, zSuper);
testcase( rc!=SQLITE_OK );
}
@@ -58756,7 +59048,6 @@ SQLITE_PRIVATE void sqlite3PagerShrink(Pager *pPager){
** Numeric values associated with these states are OFF==1, NORMAL=2,
** and FULL=3.
*/
-#ifndef SQLITE_OMIT_PAGER_PRAGMAS
SQLITE_PRIVATE void sqlite3PagerSetFlags(
Pager *pPager, /* The pager to set safety level for */
unsigned pgFlags /* Various flags */
@@ -58791,7 +59082,6 @@ SQLITE_PRIVATE void sqlite3PagerSetFlags(
pPager->doNotSpill |= SPILLFLAG_OFF;
}
}
-#endif
/*
** The following global variable is incremented whenever the library
@@ -59893,7 +60183,6 @@ SQLITE_PRIVATE int sqlite3PagerOpen(
u32 szPageDflt = SQLITE_DEFAULT_PAGE_SIZE; /* Default page size */
const char *zUri = 0; /* URI args to copy */
int nUriByte = 1; /* Number of bytes of URI args at *zUri */
- int nUri = 0; /* Number of URI parameters */
/* Figure out how much space is required for each journal file-handle
** (there are two of them, the main journal and the sub-journal). */
@@ -59941,7 +60230,6 @@ SQLITE_PRIVATE int sqlite3PagerOpen(
while( *z ){
z += strlen(z)+1;
z += strlen(z)+1;
- nUri++;
}
nUriByte = (int)(&z[1] - zUri);
assert( nUriByte>=1 );
@@ -60197,18 +60485,7 @@ act_like_temp_file:
pPager->memDb = (u8)memDb;
pPager->readOnly = (u8)readOnly;
assert( useJournal || pPager->tempFile );
- pPager->noSync = pPager->tempFile;
- if( pPager->noSync ){
- assert( pPager->fullSync==0 );
- assert( pPager->extraSync==0 );
- assert( pPager->syncFlags==0 );
- assert( pPager->walSyncFlags==0 );
- }else{
- pPager->fullSync = 1;
- pPager->extraSync = 0;
- pPager->syncFlags = SQLITE_SYNC_NORMAL;
- pPager->walSyncFlags = SQLITE_SYNC_NORMAL | (SQLITE_SYNC_NORMAL<<2);
- }
+ sqlite3PagerSetFlags(pPager, (SQLITE_DEFAULT_SYNCHRONOUS+1)|PAGER_CACHESPILL);
/* pPager->pFirst = 0; */
/* pPager->pFirstSynced = 0; */
/* pPager->pLast = 0; */
@@ -61469,7 +61746,7 @@ static int pager_incr_changecounter(Pager *pPager, int isDirectMode){
# define DIRECT_MODE isDirectMode
#endif
- if( !pPager->changeCountDone && ALWAYS(pPager->dbSize>0) ){
+ if( !pPager->changeCountDone && pPager->dbSize>0 ){
PgHdr *pPgHdr; /* Reference to page 1 */
assert( !pPager->tempFile && isOpen(pPager->fd) );
@@ -62209,7 +62486,11 @@ SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint){
*/
SQLITE_PRIVATE const char *sqlite3PagerFilename(const Pager *pPager, int nullIfMemDb){
static const char zFake[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
- return (nullIfMemDb && pPager->memDb) ? &zFake[4] : pPager->zFilename;
+ if( nullIfMemDb && (pPager->memDb || sqlite3IsMemdb(pPager->pVfs)) ){
+ return &zFake[4];
+ }else{
+ return pPager->zFilename;
+ }
}
/*
@@ -67792,15 +68073,15 @@ struct BtCursor {
** So, this macro is defined instead.
*/
#ifndef SQLITE_OMIT_AUTOVACUUM
-#define ISAUTOVACUUM (pBt->autoVacuum)
+#define ISAUTOVACUUM(pBt) (pBt->autoVacuum)
#else
-#define ISAUTOVACUUM 0
+#define ISAUTOVACUUM(pBt) 0
#endif
/*
-** This structure is passed around through all the sanity checking routines
-** in order to keep track of some global state information.
+** This structure is passed around through all the PRAGMA integrity_check
+** checking routines in order to keep track of some global state information.
**
** The aRef[] array is allocated so that there is 1 bit for each page in
** the database. As the integrity-check proceeds, for each page used in
@@ -67816,7 +68097,8 @@ struct IntegrityCk {
Pgno nPage; /* Number of pages in the database */
int mxErr; /* Stop accumulating errors when this reaches zero */
int nErr; /* Number of messages written to zErrMsg so far */
- int bOomFault; /* A memory allocation error has occurred */
+ int rc; /* SQLITE_OK, SQLITE_NOMEM, or SQLITE_INTERRUPT */
+ u32 nStep; /* Number of steps into the integrity_check process */
const char *zPfx; /* Error message prefix */
Pgno v1; /* Value for first %u substitution in zPfx */
int v2; /* Value for second %d substitution in zPfx */
@@ -70046,62 +70328,67 @@ static int freeSpace(MemPage *pPage, u16 iStart, u16 iSize){
** Only the following combinations are supported. Anything different
** indicates a corrupt database files:
**
-** PTF_ZERODATA
-** PTF_ZERODATA | PTF_LEAF
-** PTF_LEAFDATA | PTF_INTKEY
-** PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF
+** PTF_ZERODATA (0x02, 2)
+** PTF_LEAFDATA | PTF_INTKEY (0x05, 5)
+** PTF_ZERODATA | PTF_LEAF (0x0a, 10)
+** PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF (0x0d, 13)
*/
static int decodeFlags(MemPage *pPage, int flagByte){
BtShared *pBt; /* A copy of pPage->pBt */
assert( pPage->hdrOffset==(pPage->pgno==1 ? 100 : 0) );
assert( sqlite3_mutex_held(pPage->pBt->mutex) );
- pPage->leaf = (u8)(flagByte>>3); assert( PTF_LEAF == 1<<3 );
- flagByte &= ~PTF_LEAF;
- pPage->childPtrSize = 4-4*pPage->leaf;
pBt = pPage->pBt;
- if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){
- /* EVIDENCE-OF: R-07291-35328 A value of 5 (0x05) means the page is an
- ** interior table b-tree page. */
- assert( (PTF_LEAFDATA|PTF_INTKEY)==5 );
- /* EVIDENCE-OF: R-26900-09176 A value of 13 (0x0d) means the page is a
- ** leaf table b-tree page. */
- assert( (PTF_LEAFDATA|PTF_INTKEY|PTF_LEAF)==13 );
- pPage->intKey = 1;
- if( pPage->leaf ){
+ pPage->max1bytePayload = pBt->max1bytePayload;
+ if( flagByte>=(PTF_ZERODATA | PTF_LEAF) ){
+ pPage->childPtrSize = 0;
+ pPage->leaf = 1;
+ if( flagByte==(PTF_LEAFDATA | PTF_INTKEY | PTF_LEAF) ){
pPage->intKeyLeaf = 1;
pPage->xCellSize = cellSizePtrTableLeaf;
pPage->xParseCell = btreeParseCellPtr;
+ pPage->intKey = 1;
+ pPage->maxLocal = pBt->maxLeaf;
+ pPage->minLocal = pBt->minLeaf;
+ }else if( flagByte==(PTF_ZERODATA | PTF_LEAF) ){
+ pPage->intKey = 0;
+ pPage->intKeyLeaf = 0;
+ pPage->xCellSize = cellSizePtr;
+ pPage->xParseCell = btreeParseCellPtrIndex;
+ pPage->maxLocal = pBt->maxLocal;
+ pPage->minLocal = pBt->minLocal;
}else{
+ pPage->intKey = 0;
+ pPage->intKeyLeaf = 0;
+ pPage->xCellSize = cellSizePtr;
+ pPage->xParseCell = btreeParseCellPtrIndex;
+ return SQLITE_CORRUPT_PAGE(pPage);
+ }
+ }else{
+ pPage->childPtrSize = 4;
+ pPage->leaf = 0;
+ if( flagByte==(PTF_ZERODATA) ){
+ pPage->intKey = 0;
+ pPage->intKeyLeaf = 0;
+ pPage->xCellSize = cellSizePtr;
+ pPage->xParseCell = btreeParseCellPtrIndex;
+ pPage->maxLocal = pBt->maxLocal;
+ pPage->minLocal = pBt->minLocal;
+ }else if( flagByte==(PTF_LEAFDATA | PTF_INTKEY) ){
pPage->intKeyLeaf = 0;
pPage->xCellSize = cellSizePtrNoPayload;
pPage->xParseCell = btreeParseCellPtrNoPayload;
+ pPage->intKey = 1;
+ pPage->maxLocal = pBt->maxLeaf;
+ pPage->minLocal = pBt->minLeaf;
+ }else{
+ pPage->intKey = 0;
+ pPage->intKeyLeaf = 0;
+ pPage->xCellSize = cellSizePtr;
+ pPage->xParseCell = btreeParseCellPtrIndex;
+ return SQLITE_CORRUPT_PAGE(pPage);
}
- pPage->maxLocal = pBt->maxLeaf;
- pPage->minLocal = pBt->minLeaf;
- }else if( flagByte==PTF_ZERODATA ){
- /* EVIDENCE-OF: R-43316-37308 A value of 2 (0x02) means the page is an
- ** interior index b-tree page. */
- assert( (PTF_ZERODATA)==2 );
- /* EVIDENCE-OF: R-59615-42828 A value of 10 (0x0a) means the page is a
- ** leaf index b-tree page. */
- assert( (PTF_ZERODATA|PTF_LEAF)==10 );
- pPage->intKey = 0;
- pPage->intKeyLeaf = 0;
- pPage->xCellSize = cellSizePtr;
- pPage->xParseCell = btreeParseCellPtrIndex;
- pPage->maxLocal = pBt->maxLocal;
- pPage->minLocal = pBt->minLocal;
- }else{
- /* EVIDENCE-OF: R-47608-56469 Any other value for the b-tree page type is
- ** an error. */
- pPage->intKey = 0;
- pPage->intKeyLeaf = 0;
- pPage->xCellSize = cellSizePtr;
- pPage->xParseCell = btreeParseCellPtrIndex;
- return SQLITE_CORRUPT_PAGE(pPage);
}
- pPage->max1bytePayload = pBt->max1bytePayload;
return SQLITE_OK;
}
@@ -73641,9 +73928,25 @@ SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor *pCur, int *pRes){
** on success. Set *pRes to 0 if the cursor actually points to something
** or set *pRes to 1 if the table is empty.
*/
+static SQLITE_NOINLINE int btreeLast(BtCursor *pCur, int *pRes){
+ int rc = moveToRoot(pCur);
+ if( rc==SQLITE_OK ){
+ assert( pCur->eState==CURSOR_VALID );
+ *pRes = 0;
+ rc = moveToRightmost(pCur);
+ if( rc==SQLITE_OK ){
+ pCur->curFlags |= BTCF_AtLast;
+ }else{
+ pCur->curFlags &= ~BTCF_AtLast;
+ }
+ }else if( rc==SQLITE_EMPTY ){
+ assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 );
+ *pRes = 1;
+ rc = SQLITE_OK;
+ }
+ return rc;
+}
SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){
- int rc;
-
assert( cursorOwnsBtShared(pCur) );
assert( sqlite3_mutex_held(pCur->pBtree->db->mutex) );
@@ -73664,23 +73967,7 @@ SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor *pCur, int *pRes){
*pRes = 0;
return SQLITE_OK;
}
-
- rc = moveToRoot(pCur);
- if( rc==SQLITE_OK ){
- assert( pCur->eState==CURSOR_VALID );
- *pRes = 0;
- rc = moveToRightmost(pCur);
- if( rc==SQLITE_OK ){
- pCur->curFlags |= BTCF_AtLast;
- }else{
- pCur->curFlags &= ~BTCF_AtLast;
- }
- }else if( rc==SQLITE_EMPTY ){
- assert( pCur->pgnoRoot==0 || pCur->pPage->nCell==0 );
- *pRes = 1;
- rc = SQLITE_OK;
- }
- return rc;
+ return btreeLast(pCur, pRes);
}
/* Move the cursor so that it points to an entry in a table (a.k.a INTKEY)
@@ -74225,7 +74512,7 @@ static SQLITE_NOINLINE int btreeNext(BtCursor *pCur){
pPage = pCur->pPage;
idx = ++pCur->ix;
- if( NEVER(!pPage->isInit) || sqlite3FaultSim(412) ){
+ if( !pPage->isInit || sqlite3FaultSim(412) ){
return SQLITE_CORRUPT_BKPT;
}
@@ -74747,7 +75034,7 @@ static int freePage2(BtShared *pBt, MemPage *pMemPage, Pgno iPage){
/* If the database supports auto-vacuum, write an entry in the pointer-map
** to indicate that the page is free.
*/
- if( ISAUTOVACUUM ){
+ if( ISAUTOVACUUM(pBt) ){
ptrmapPut(pBt, iPage, PTRMAP_FREEPAGE, 0, &rc);
if( rc ) goto freepage_out;
}
@@ -75187,24 +75474,20 @@ static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){
** in pTemp or the original pCell) and also record its index.
** Allocating a new entry in pPage->aCell[] implies that
** pPage->nOverflow is incremented.
-**
-** *pRC must be SQLITE_OK when this routine is called.
*/
-static void insertCell(
+static int insertCell(
MemPage *pPage, /* Page into which we are copying */
int i, /* New cell becomes the i-th cell of the page */
u8 *pCell, /* Content of the new cell */
int sz, /* Bytes of content in pCell */
u8 *pTemp, /* Temp storage space for pCell, if needed */
- Pgno iChild, /* If non-zero, replace first 4 bytes with this value */
- int *pRC /* Read and write return code from here */
+ Pgno iChild /* If non-zero, replace first 4 bytes with this value */
){
int idx = 0; /* Where to write new cell content in data[] */
int j; /* Loop counter */
u8 *data; /* The content of the whole page */
u8 *pIns; /* The point in pPage->aCellIdx[] where no cell inserted */
- assert( *pRC==SQLITE_OK );
assert( i>=0 && i<=pPage->nCell+pPage->nOverflow );
assert( MX_CELL(pPage->pBt)<=10921 );
assert( pPage->nCell<=MX_CELL(pPage->pBt) || CORRUPT_DB );
@@ -75239,14 +75522,13 @@ static void insertCell(
}else{
int rc = sqlite3PagerWrite(pPage->pDbPage);
if( rc!=SQLITE_OK ){
- *pRC = rc;
- return;
+ return rc;
}
assert( sqlite3PagerIswriteable(pPage->pDbPage) );
data = pPage->aData;
assert( &data[pPage->cellOffset]==pPage->aCellIdx );
rc = allocateSpace(pPage, sz, &idx);
- if( rc ){ *pRC = rc; return; }
+ if( rc ){ return rc; }
/* The allocateSpace() routine guarantees the following properties
** if it returns successfully */
assert( idx >= 0 );
@@ -75273,13 +75555,16 @@ static void insertCell(
assert( get2byte(&data[pPage->hdrOffset+3])==pPage->nCell || CORRUPT_DB );
#ifndef SQLITE_OMIT_AUTOVACUUM
if( pPage->pBt->autoVacuum ){
+ int rc2 = SQLITE_OK;
/* The cell may contain a pointer to an overflow page. If so, write
** the entry for the overflow page into the pointer map.
*/
- ptrmapPutOvflPtr(pPage, pPage, pCell, pRC);
+ ptrmapPutOvflPtr(pPage, pPage, pCell, &rc2);
+ if( rc2 ) return rc2;
}
#endif
}
+ return SQLITE_OK;
}
/*
@@ -75380,14 +75665,16 @@ struct CellArray {
** computed.
*/
static void populateCellCache(CellArray *p, int idx, int N){
+ MemPage *pRef = p->pRef;
+ u16 *szCell = p->szCell;
assert( idx>=0 && idx+N<=p->nCell );
while( N>0 ){
assert( p->apCell[idx]!=0 );
- if( p->szCell[idx]==0 ){
- p->szCell[idx] = p->pRef->xCellSize(p->pRef, p->apCell[idx]);
+ if( szCell[idx]==0 ){
+ szCell[idx] = pRef->xCellSize(pRef, p->apCell[idx]);
}else{
assert( CORRUPT_DB ||
- p->szCell[idx]==p->pRef->xCellSize(p->pRef, p->apCell[idx]) );
+ szCell[idx]==pRef->xCellSize(pRef, p->apCell[idx]) );
}
idx++;
N--;
@@ -75589,8 +75876,8 @@ static int pageFreeArray(
int nRet = 0;
int i;
int iEnd = iFirst + nCell;
- u8 *pFree = 0;
- int szFree = 0;
+ u8 *pFree = 0; /* \__ Parameters for pending call to */
+ int szFree = 0; /* / freeSpace() */
for(i=iFirst; i<iEnd; i++){
u8 *pCell = pCArray->apCell[i];
@@ -75611,6 +75898,9 @@ static int pageFreeArray(
return 0;
}
}else{
+ /* The current cell is adjacent to and before the pFree cell.
+ ** Combine the two regions into one to reduce the number of calls
+ ** to freeSpace(). */
pFree = pCell;
szFree += sz;
}
@@ -75818,7 +76108,7 @@ static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){
** be marked as dirty. Returning an error code will cause a
** rollback, undoing any changes made to the parent page.
*/
- if( ISAUTOVACUUM ){
+ if( ISAUTOVACUUM(pBt) ){
ptrmapPut(pBt, pgnoNew, PTRMAP_BTREE, pParent->pgno, &rc);
if( szCell>pNew->minLocal ){
ptrmapPutOvflPtr(pNew, pNew, pCell, &rc);
@@ -75846,8 +76136,8 @@ static int balance_quick(MemPage *pParent, MemPage *pPage, u8 *pSpace){
/* Insert the new divider cell into pParent. */
if( rc==SQLITE_OK ){
- insertCell(pParent, pParent->nCell, pSpace, (int)(pOut-pSpace),
- 0, pPage->pgno, &rc);
+ rc = insertCell(pParent, pParent->nCell, pSpace, (int)(pOut-pSpace),
+ 0, pPage->pgno);
}
/* Set the right-child pointer of pParent to point to the new page. */
@@ -75956,7 +76246,7 @@ static void copyNodeContent(MemPage *pFrom, MemPage *pTo, int *pRC){
/* If this is an auto-vacuum database, update the pointer-map entries
** for any b-tree or overflow pages that pTo now contains the pointers to.
*/
- if( ISAUTOVACUUM ){
+ if( ISAUTOVACUUM(pBt) ){
*pRC = setChildPtrmaps(pTo);
}
}
@@ -76380,15 +76670,17 @@ static int balance_nonroot(
d = r + 1 - leafData;
(void)cachedCellSize(&b, d);
do{
+ int szR, szD;
assert( d<nMaxCells );
assert( r<nMaxCells );
- (void)cachedCellSize(&b, r);
+ szR = cachedCellSize(&b, r);
+ szD = b.szCell[d];
if( szRight!=0
- && (bBulk || szRight+b.szCell[d]+2 > szLeft-(b.szCell[r]+(i==k-1?0:2)))){
+ && (bBulk || szRight+szD+2 > szLeft-(szR+(i==k-1?0:2)))){
break;
}
- szRight += b.szCell[d] + 2;
- szLeft -= b.szCell[r] + 2;
+ szRight += szD + 2;
+ szLeft -= szR + 2;
cntNew[i-1] = r;
r--;
d--;
@@ -76442,7 +76734,7 @@ static int balance_nonroot(
cntOld[i] = b.nCell;
/* Set the pointer-map entry for the new sibling page. */
- if( ISAUTOVACUUM ){
+ if( ISAUTOVACUUM(pBt) ){
ptrmapPut(pBt, pNew->pgno, PTRMAP_BTREE, pParent->pgno, &rc);
if( rc!=SQLITE_OK ){
goto balance_cleanup;
@@ -76535,7 +76827,7 @@ static int balance_nonroot(
** updated. This happens below, after the sibling pages have been
** populated, not here.
*/
- if( ISAUTOVACUUM ){
+ if( ISAUTOVACUUM(pBt) ){
MemPage *pOld;
MemPage *pNew = pOld = apNew[0];
int cntOldNext = pNew->nCell + pNew->nOverflow;
@@ -76632,7 +76924,7 @@ static int balance_nonroot(
rc = SQLITE_CORRUPT_BKPT;
goto balance_cleanup;
}
- insertCell(pParent, nxDiv+i, pCell, sz, pTemp, pNew->pgno, &rc);
+ rc = insertCell(pParent, nxDiv+i, pCell, sz, pTemp, pNew->pgno);
if( rc!=SQLITE_OK ) goto balance_cleanup;
assert( sqlite3PagerIswriteable(pParent->pDbPage) );
}
@@ -76728,7 +77020,7 @@ static int balance_nonroot(
);
copyNodeContent(apNew[0], pParent, &rc);
freePage(apNew[0], &rc);
- }else if( ISAUTOVACUUM && !leafCorrection ){
+ }else if( ISAUTOVACUUM(pBt) && !leafCorrection ){
/* Fix the pointer map entries associated with the right-child of each
** sibling page. All other pointer map entries have already been taken
** care of. */
@@ -76749,7 +77041,7 @@ static int balance_nonroot(
}
#if 0
- if( ISAUTOVACUUM && rc==SQLITE_OK && apNew[0]->isInit ){
+ if( ISAUTOVACUUM(pBt) && rc==SQLITE_OK && apNew[0]->isInit ){
/* The ptrmapCheckPages() contains assert() statements that verify that
** all pointer map pages are set correctly. This is helpful while
** debugging. This is usually disabled because a corrupt database may
@@ -76811,7 +77103,7 @@ static int balance_deeper(MemPage *pRoot, MemPage **ppChild){
if( rc==SQLITE_OK ){
rc = allocateBtreePage(pBt,&pChild,&pgnoChild,pRoot->pgno,0);
copyNodeContent(pRoot, pChild, &rc);
- if( ISAUTOVACUUM ){
+ if( ISAUTOVACUUM(pBt) ){
ptrmapPut(pBt, pgnoChild, PTRMAP_BTREE, pRoot->pgno, &rc);
}
}
@@ -77050,9 +77342,13 @@ static int btreeOverwriteContent(
/*
** Overwrite the cell that cursor pCur is pointing to with fresh content
-** contained in pX.
+** contained in pX. In this variant, pCur is pointing to an overflow
+** cell.
*/
-static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){
+static SQLITE_NOINLINE int btreeOverwriteOverflowCell(
+ BtCursor *pCur, /* Cursor pointing to cell to ovewrite */
+ const BtreePayload *pX /* Content to write into the cell */
+){
int iOffset; /* Next byte of pX->pData to write */
int nTotal = pX->nData + pX->nZero; /* Total bytes of to write */
int rc; /* Return code */
@@ -77061,16 +77357,12 @@ static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){
Pgno ovflPgno; /* Next overflow page to write */
u32 ovflPageSize; /* Size to write on overflow page */
- if( pCur->info.pPayload + pCur->info.nLocal > pPage->aDataEnd
- || pCur->info.pPayload < pPage->aData + pPage->cellOffset
- ){
- return SQLITE_CORRUPT_BKPT;
- }
+ assert( pCur->info.nLocal<nTotal ); /* pCur is an overflow cell */
+
/* Overwrite the local portion first */
rc = btreeOverwriteContent(pPage, pCur->info.pPayload, pX,
0, pCur->info.nLocal);
if( rc ) return rc;
- if( pCur->info.nLocal==nTotal ) return SQLITE_OK;
/* Now overwrite the overflow pages */
iOffset = pCur->info.nLocal;
@@ -77100,6 +77392,29 @@ static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){
return SQLITE_OK;
}
+/*
+** Overwrite the cell that cursor pCur is pointing to with fresh content
+** contained in pX.
+*/
+static int btreeOverwriteCell(BtCursor *pCur, const BtreePayload *pX){
+ int nTotal = pX->nData + pX->nZero; /* Total bytes of to write */
+ MemPage *pPage = pCur->pPage; /* Page being written */
+
+ if( pCur->info.pPayload + pCur->info.nLocal > pPage->aDataEnd
+ || pCur->info.pPayload < pPage->aData + pPage->cellOffset
+ ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ if( pCur->info.nLocal==nTotal ){
+ /* The entire cell is local */
+ return btreeOverwriteContent(pPage, pCur->info.pPayload, pX,
+ 0, pCur->info.nLocal);
+ }else{
+ /* The cell contains overflow content */
+ return btreeOverwriteOverflowCell(pCur, pX);
+ }
+}
+
/*
** Insert a new record into the BTree. The content of the new record
@@ -77143,7 +77458,6 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
int idx;
MemPage *pPage;
Btree *p = pCur->pBtree;
- BtShared *pBt = p->pBt;
unsigned char *oldCell;
unsigned char *newCell = 0;
@@ -77162,7 +77476,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
** not to clear the cursor here.
*/
if( pCur->curFlags & BTCF_Multiple ){
- rc = saveAllCursors(pBt, pCur->pgnoRoot, pCur);
+ rc = saveAllCursors(p->pBt, pCur->pgnoRoot, pCur);
if( rc ) return rc;
if( loc && pCur->iPage<0 ){
/* This can only happen if the schema is corrupt such that there is more
@@ -77186,8 +77500,8 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
assert( cursorOwnsBtShared(pCur) );
assert( (pCur->curFlags & BTCF_WriteFlag)!=0
- && pBt->inTransaction==TRANS_WRITE
- && (pBt->btsFlags & BTS_READ_ONLY)==0 );
+ && p->pBt->inTransaction==TRANS_WRITE
+ && (p->pBt->btsFlags & BTS_READ_ONLY)==0 );
assert( hasSharedCacheTableLock(p, pCur->pgnoRoot, pCur->pKeyInfo!=0, 2) );
/* Assert that the caller has been consistent. If this cursor was opened
@@ -77304,27 +77618,30 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
pCur->pgnoRoot, pX->nKey, pX->nData, pPage->pgno,
loc==0 ? "overwrite" : "new entry"));
assert( pPage->isInit || CORRUPT_DB );
- newCell = pBt->pTmpSpace;
+ newCell = p->pBt->pTmpSpace;
assert( newCell!=0 );
+ assert( BTREE_PREFORMAT==OPFLAG_PREFORMAT );
if( flags & BTREE_PREFORMAT ){
rc = SQLITE_OK;
- szNew = pBt->nPreformatSize;
+ szNew = p->pBt->nPreformatSize;
if( szNew<4 ) szNew = 4;
- if( ISAUTOVACUUM && szNew>pPage->maxLocal ){
+ if( ISAUTOVACUUM(p->pBt) && szNew>pPage->maxLocal ){
CellInfo info;
pPage->xParseCell(pPage, newCell, &info);
if( info.nPayload!=info.nLocal ){
Pgno ovfl = get4byte(&newCell[szNew-4]);
- ptrmapPut(pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno, &rc);
+ ptrmapPut(p->pBt, ovfl, PTRMAP_OVERFLOW1, pPage->pgno, &rc);
+ if( NEVER(rc) ) goto end_insert;
}
}
}else{
rc = fillInCell(pPage, newCell, pX, &szNew);
+ if( rc ) goto end_insert;
}
- if( rc ) goto end_insert;
assert( szNew==pPage->xCellSize(pPage, newCell) );
- assert( szNew <= MX_CELL_SIZE(pBt) );
+ assert( szNew <= MX_CELL_SIZE(p->pBt) );
idx = pCur->ix;
+ pCur->info.nSize = 0;
if( loc==0 ){
CellInfo info;
assert( idx>=0 );
@@ -77343,7 +77660,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
testcase( pCur->curFlags & BTCF_ValidOvfl );
invalidateOverflowCache(pCur);
if( info.nSize==szNew && info.nLocal==info.nPayload
- && (!ISAUTOVACUUM || szNew<pPage->minLocal)
+ && (!ISAUTOVACUUM(p->pBt) || szNew<pPage->minLocal)
){
/* Overwrite the old cell with the new if they are the same size.
** We could also try to do this if the old cell is smaller, then add
@@ -77373,7 +77690,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
}else{
assert( pPage->leaf );
}
- insertCell(pPage, idx, newCell, szNew, 0, 0, &rc);
+ rc = insertCell(pPage, idx, newCell, szNew, 0, 0);
assert( pPage->nOverflow==0 || rc==SQLITE_OK );
assert( rc!=SQLITE_OK || pPage->nCell>0 || pPage->nOverflow>0 );
@@ -77397,7 +77714,6 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
** larger than the largest existing key, it is possible to insert the
** row without seeking the cursor. This can be a big performance boost.
*/
- pCur->info.nSize = 0;
if( pPage->nOverflow ){
assert( rc==SQLITE_OK );
pCur->curFlags &= ~(BTCF_ValidNKey);
@@ -77446,7 +77762,6 @@ end_insert:
** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
*/
SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64 iKey){
- int rc = SQLITE_OK;
BtShared *pBt = pDest->pBt;
u8 *aOut = pBt->pTmpSpace; /* Pointer to next output buffer */
const u8 *aIn; /* Pointer to next input buffer */
@@ -77469,7 +77784,9 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64
if( nIn==nRem && nIn<pDest->pPage->maxLocal ){
memcpy(aOut, aIn, nIn);
pBt->nPreformatSize = nIn + (aOut - pBt->pTmpSpace);
+ return SQLITE_OK;
}else{
+ int rc = SQLITE_OK;
Pager *pSrcPager = pSrc->pBt->pPager;
u8 *pPgnoOut = 0;
Pgno ovflIn = 0;
@@ -77521,7 +77838,7 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64
MemPage *pNew = 0;
rc = allocateBtreePage(pBt, &pNew, &pgnoNew, 0, 0);
put4byte(pPgnoOut, pgnoNew);
- if( ISAUTOVACUUM && pPageOut ){
+ if( ISAUTOVACUUM(pBt) && pPageOut ){
ptrmapPut(pBt, pgnoNew, PTRMAP_OVERFLOW2, pPageOut->pgno, &rc);
}
releasePage(pPageOut);
@@ -77537,9 +77854,8 @@ SQLITE_PRIVATE int sqlite3BtreeTransferRow(BtCursor *pDest, BtCursor *pSrc, i64
releasePage(pPageOut);
sqlite3PagerUnref(pPageIn);
+ return rc;
}
-
- return rc;
}
/*
@@ -77694,7 +78010,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){
assert( pTmp!=0 );
rc = sqlite3PagerWrite(pLeaf->pDbPage);
if( rc==SQLITE_OK ){
- insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n, &rc);
+ rc = insertCell(pPage, iCellIdx, pCell-4, nCell+4, pTmp, n);
}
dropCell(pLeaf, pLeaf->nCell-1, nCell, &rc);
if( rc ) return rc;
@@ -78294,6 +78610,41 @@ SQLITE_PRIVATE Pager *sqlite3BtreePager(Btree *p){
#ifndef SQLITE_OMIT_INTEGRITY_CHECK
/*
+** Record an OOM error during integrity_check
+*/
+static void checkOom(IntegrityCk *pCheck){
+ pCheck->rc = SQLITE_NOMEM;
+ pCheck->mxErr = 0; /* Causes integrity_check processing to stop */
+ if( pCheck->nErr==0 ) pCheck->nErr++;
+}
+
+/*
+** Invoke the progress handler, if appropriate. Also check for an
+** interrupt.
+*/
+static void checkProgress(IntegrityCk *pCheck){
+ sqlite3 *db = pCheck->db;
+ if( AtomicLoad(&db->u1.isInterrupted) ){
+ pCheck->rc = SQLITE_INTERRUPT;
+ pCheck->nErr++;
+ pCheck->mxErr = 0;
+ }
+#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
+ if( db->xProgress ){
+ assert( db->nProgressOps>0 );
+ pCheck->nStep++;
+ if( (pCheck->nStep % db->nProgressOps)==0
+ && db->xProgress(db->pProgressArg)
+ ){
+ pCheck->rc = SQLITE_INTERRUPT;
+ pCheck->nErr++;
+ pCheck->mxErr = 0;
+ }
+ }
+#endif
+}
+
+/*
** Append a message to the error message string.
*/
static void checkAppendMsg(
@@ -78302,6 +78653,7 @@ static void checkAppendMsg(
...
){
va_list ap;
+ checkProgress(pCheck);
if( !pCheck->mxErr ) return;
pCheck->mxErr--;
pCheck->nErr++;
@@ -78315,7 +78667,7 @@ static void checkAppendMsg(
sqlite3_str_vappendf(&pCheck->errMsg, zFormat, ap);
va_end(ap);
if( pCheck->errMsg.accError==SQLITE_NOMEM ){
- pCheck->bOomFault = 1;
+ checkOom(pCheck);
}
}
#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
@@ -78357,7 +78709,6 @@ static int checkRef(IntegrityCk *pCheck, Pgno iPage){
checkAppendMsg(pCheck, "2nd reference to page %d", iPage);
return 1;
}
- if( AtomicLoad(&pCheck->db->u1.isInterrupted) ) return 1;
setPageReferenced(pCheck, iPage);
return 0;
}
@@ -78380,7 +78731,7 @@ static void checkPtrmap(
rc = ptrmapGet(pCheck->pBt, iChild, &ePtrmapType, &iPtrmapParent);
if( rc!=SQLITE_OK ){
- if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) pCheck->bOomFault = 1;
+ if( rc==SQLITE_NOMEM || rc==SQLITE_IOERR_NOMEM ) checkOom(pCheck);
checkAppendMsg(pCheck, "Failed to read ptrmap key=%d", iChild);
return;
}
@@ -78487,7 +78838,9 @@ static void checkList(
** lower 16 bits are the index of the last byte of that range.
*/
static void btreeHeapInsert(u32 *aHeap, u32 x){
- u32 j, i = ++aHeap[0];
+ u32 j, i;
+ assert( aHeap!=0 );
+ i = ++aHeap[0];
aHeap[i] = x;
while( (j = i/2)>0 && aHeap[j]>aHeap[i] ){
x = aHeap[j];
@@ -78564,6 +78917,8 @@ static int checkTreePage(
/* Check that the page exists
*/
+ checkProgress(pCheck);
+ if( pCheck->mxErr==0 ) goto end_of_check;
pBt = pCheck->pBt;
usableSize = pBt->usableSize;
if( iPage==0 ) return 0;
@@ -78809,13 +79164,14 @@ end_of_check:
** the unverified btrees. Except, if aRoot[1] is 1, then the freelist
** checks are still performed.
*/
-SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(
+SQLITE_PRIVATE int sqlite3BtreeIntegrityCheck(
sqlite3 *db, /* Database connection that is running the check */
Btree *p, /* The btree to be checked */
Pgno *aRoot, /* An array of root pages numbers for individual trees */
int nRoot, /* Number of entries in aRoot[] */
int mxErr, /* Stop reporting errors after this many */
- int *pnErr /* Write number of errors seen to this variable */
+ int *pnErr, /* OUT: Write number of errors seen to this variable */
+ char **pzOut /* OUT: Write the error message string here */
){
Pgno i;
IntegrityCk sCheck;
@@ -78838,18 +79194,12 @@ SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(
assert( p->inTrans>TRANS_NONE && pBt->inTransaction>TRANS_NONE );
VVA_ONLY( nRef = sqlite3PagerRefcount(pBt->pPager) );
assert( nRef>=0 );
+ memset(&sCheck, 0, sizeof(sCheck));
sCheck.db = db;
sCheck.pBt = pBt;
sCheck.pPager = pBt->pPager;
sCheck.nPage = btreePagecount(sCheck.pBt);
sCheck.mxErr = mxErr;
- sCheck.nErr = 0;
- sCheck.bOomFault = 0;
- sCheck.zPfx = 0;
- sCheck.v1 = 0;
- sCheck.v2 = 0;
- sCheck.aPgRef = 0;
- sCheck.heap = 0;
sqlite3StrAccumInit(&sCheck.errMsg, 0, zErr, sizeof(zErr), SQLITE_MAX_LENGTH);
sCheck.errMsg.printfFlags = SQLITE_PRINTF_INTERNAL;
if( sCheck.nPage==0 ){
@@ -78858,12 +79208,12 @@ SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(
sCheck.aPgRef = sqlite3MallocZero((sCheck.nPage / 8)+ 1);
if( !sCheck.aPgRef ){
- sCheck.bOomFault = 1;
+ checkOom(&sCheck);
goto integrity_ck_cleanup;
}
sCheck.heap = (u32*)sqlite3PageMalloc( pBt->pageSize );
if( sCheck.heap==0 ){
- sCheck.bOomFault = 1;
+ checkOom(&sCheck);
goto integrity_ck_cleanup;
}
@@ -78944,16 +79294,17 @@ SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(
integrity_ck_cleanup:
sqlite3PageFree(sCheck.heap);
sqlite3_free(sCheck.aPgRef);
- if( sCheck.bOomFault ){
+ *pnErr = sCheck.nErr;
+ if( sCheck.nErr==0 ){
sqlite3_str_reset(&sCheck.errMsg);
- sCheck.nErr++;
+ *pzOut = 0;
+ }else{
+ *pzOut = sqlite3StrAccumFinish(&sCheck.errMsg);
}
- *pnErr = sCheck.nErr;
- if( sCheck.nErr==0 ) sqlite3_str_reset(&sCheck.errMsg);
/* Make sure this analysis did not leave any unref() pages. */
assert( nRef==sqlite3PagerRefcount(pBt->pPager) );
sqlite3BtreeLeave(p);
- return sqlite3StrAccumFinish(&sCheck.errMsg);
+ return sCheck.rc;
}
#endif /* SQLITE_OMIT_INTEGRITY_CHECK */
@@ -80139,9 +80490,9 @@ static void vdbeMemRenderNum(int sz, char *zBuf, Mem *p){
i64 x;
assert( (p->flags&MEM_Int)*2==sizeof(x) );
memcpy(&x, (char*)&p->u, (p->flags&MEM_Int)*2);
- sqlite3Int64ToText(x, zBuf);
+ p->n = sqlite3Int64ToText(x, zBuf);
#else
- sqlite3Int64ToText(p->u.i, zBuf);
+ p->n = sqlite3Int64ToText(p->u.i, zBuf);
#endif
}else{
sqlite3StrAccumInit(&acc, 0, zBuf, sz, 0);
@@ -80149,6 +80500,7 @@ static void vdbeMemRenderNum(int sz, char *zBuf, Mem *p){
(p->flags & MEM_IntReal)!=0 ? (double)p->u.i : p->u.r);
assert( acc.zText==zBuf && acc.mxAlloc<=0 );
zBuf[acc.nChar] = 0; /* Fast version of sqlite3StrAccumFinish(&acc) */
+ p->n = acc.nChar;
}
}
@@ -80176,6 +80528,7 @@ static void vdbeMemRenderNum(int sz, char *zBuf, Mem *p){
** This routine is for use inside of assert() statements only.
*/
SQLITE_PRIVATE int sqlite3VdbeMemValidStrRep(Mem *p){
+ Mem tmp;
char zBuf[100];
char *z;
int i, j, incr;
@@ -80192,7 +80545,8 @@ SQLITE_PRIVATE int sqlite3VdbeMemValidStrRep(Mem *p){
assert( p->enc==SQLITE_UTF8 || p->z[((p->n+1)&~1)+1]==0 );
}
if( (p->flags & (MEM_Int|MEM_Real|MEM_IntReal))==0 ) return 1;
- vdbeMemRenderNum(sizeof(zBuf), zBuf, p);
+ memcpy(&tmp, p, sizeof(tmp));
+ vdbeMemRenderNum(sizeof(zBuf), zBuf, &tmp);
z = p->z;
i = j = 0;
incr = 1;
@@ -80461,7 +80815,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemStringify(Mem *pMem, u8 enc, u8 bForce){
vdbeMemRenderNum(nByte, pMem->z, pMem);
assert( pMem->z!=0 );
- pMem->n = sqlite3Strlen30NN(pMem->z);
+ assert( pMem->n==sqlite3Strlen30NN(pMem->z) );
pMem->enc = SQLITE_UTF8;
pMem->flags |= MEM_Str|MEM_Term;
if( bForce ) pMem->flags &= ~(MEM_Int|MEM_Real|MEM_IntReal);
@@ -80701,32 +81055,35 @@ SQLITE_PRIVATE int sqlite3VdbeBooleanValue(Mem *pMem, int ifNull){
}
/*
-** The MEM structure is already a MEM_Real. Try to also make it a
-** MEM_Int if we can.
+** The MEM structure is already a MEM_Real or MEM_IntReal. Try to
+** make it a MEM_Int if we can.
*/
SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem *pMem){
- i64 ix;
assert( pMem!=0 );
- assert( pMem->flags & MEM_Real );
+ assert( pMem->flags & (MEM_Real|MEM_IntReal) );
assert( !sqlite3VdbeMemIsRowSet(pMem) );
assert( pMem->db==0 || sqlite3_mutex_held(pMem->db->mutex) );
assert( EIGHT_BYTE_ALIGNMENT(pMem) );
- ix = doubleToInt64(pMem->u.r);
-
- /* Only mark the value as an integer if
- **
- ** (1) the round-trip conversion real->int->real is a no-op, and
- ** (2) The integer is neither the largest nor the smallest
- ** possible integer (ticket #3922)
- **
- ** The second and third terms in the following conditional enforces
- ** the second condition under the assumption that addition overflow causes
- ** values to wrap around.
- */
- if( pMem->u.r==ix && ix>SMALLEST_INT64 && ix<LARGEST_INT64 ){
- pMem->u.i = ix;
+ if( pMem->flags & MEM_IntReal ){
MemSetTypeFlag(pMem, MEM_Int);
+ }else{
+ i64 ix = doubleToInt64(pMem->u.r);
+
+ /* Only mark the value as an integer if
+ **
+ ** (1) the round-trip conversion real->int->real is a no-op, and
+ ** (2) The integer is neither the largest nor the smallest
+ ** possible integer (ticket #3922)
+ **
+ ** The second and third terms in the following conditional enforces
+ ** the second condition under the assumption that addition overflow causes
+ ** values to wrap around.
+ */
+ if( pMem->u.r==ix && ix>SMALLEST_INT64 && ix<LARGEST_INT64 ){
+ pMem->u.i = ix;
+ MemSetTypeFlag(pMem, MEM_Int);
+ }
}
}
@@ -81527,8 +81884,6 @@ static int valueFromFunction(
goto value_from_function_out;
}
- testcase( pCtx->pParse->rc==SQLITE_ERROR );
- testcase( pCtx->pParse->rc==SQLITE_OK );
memset(&ctx, 0, sizeof(ctx));
ctx.pOut = pVal;
ctx.pFunc = pFunc;
@@ -81540,17 +81895,22 @@ static int valueFromFunction(
}else{
sqlite3ValueApplyAffinity(pVal, aff, SQLITE_UTF8);
assert( rc==SQLITE_OK );
+ assert( enc==pVal->enc
+ || (pVal->flags & MEM_Str)==0
+ || db->mallocFailed );
+#if 0 /* Not reachable except after a prior failure */
rc = sqlite3VdbeChangeEncoding(pVal, enc);
if( rc==SQLITE_OK && sqlite3VdbeMemTooBig(pVal) ){
rc = SQLITE_TOOBIG;
pCtx->pParse->nErr++;
}
+#endif
}
- pCtx->pParse->rc = rc;
value_from_function_out:
if( rc!=SQLITE_OK ){
pVal = 0;
+ pCtx->pParse->rc = rc;
}
if( apVal ){
for(i=0; i<nVal; i++){
@@ -82223,6 +82583,8 @@ static int growOpArray(Vdbe *v, int nOp){
*/
static void test_addop_breakpoint(int pc, Op *pOp){
static int n = 0;
+ (void)pc;
+ (void)pOp;
n++;
}
#endif
@@ -82273,16 +82635,16 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp3(Vdbe *p, int op, int p1, int p2, int p3){
#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
pOp->zComment = 0;
#endif
+#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE)
+ pOp->nExec = 0;
+ pOp->nCycle = 0;
+#endif
#ifdef SQLITE_DEBUG
if( p->db->flags & SQLITE_VdbeAddopTrace ){
sqlite3VdbePrintOp(0, i, &p->aOp[i]);
test_addop_breakpoint(i, &p->aOp[i]);
}
#endif
-#ifdef VDBE_PROFILE
- pOp->cycles = 0;
- pOp->cnt = 0;
-#endif
#ifdef SQLITE_VDBE_COVERAGE
pOp->iSrcLine = 0;
#endif
@@ -82450,8 +82812,9 @@ SQLITE_PRIVATE void sqlite3ExplainBreakpoint(const char *z1, const char *z2){
** If the bPush flag is true, then make this opcode the parent for
** subsequent Explains until sqlite3VdbeExplainPop() is called.
*/
-SQLITE_PRIVATE void sqlite3VdbeExplain(Parse *pParse, u8 bPush, const char *zFmt, ...){
-#ifndef SQLITE_DEBUG
+SQLITE_PRIVATE int sqlite3VdbeExplain(Parse *pParse, u8 bPush, const char *zFmt, ...){
+ int addr = 0;
+#if !defined(SQLITE_DEBUG) && !defined(SQLITE_ENABLE_STMT_SCANSTATUS)
/* Always include the OP_Explain opcodes if SQLITE_DEBUG is defined.
** But omit them (for performance) during production builds */
if( pParse->explain==2 )
@@ -82466,13 +82829,15 @@ SQLITE_PRIVATE void sqlite3VdbeExplain(Parse *pParse, u8 bPush, const char *zFmt
va_end(ap);
v = pParse->pVdbe;
iThis = v->nOp;
- sqlite3VdbeAddOp4(v, OP_Explain, iThis, pParse->addrExplain, 0,
+ addr = sqlite3VdbeAddOp4(v, OP_Explain, iThis, pParse->addrExplain, 0,
zMsg, P4_DYNAMIC);
sqlite3ExplainBreakpoint(bPush?"PUSH":"", sqlite3VdbeGetLastOp(v)->p4.z);
if( bPush){
pParse->addrExplain = iThis;
}
+ sqlite3VdbeScanStatus(v, iThis, 0, 0, 0, 0);
}
+ return addr;
}
/*
@@ -82580,6 +82945,9 @@ static SQLITE_NOINLINE void resizeResolveLabel(Parse *p, Vdbe *v, int j){
int i;
for(i=p->nLabelAlloc; i<nNewSize; i++) p->aLabel[i] = -1;
#endif
+ if( nNewSize>=100 && (nNewSize/100)>(p->nLabelAlloc/100) ){
+ sqlite3ProgressCheck(p);
+ }
p->nLabelAlloc = nNewSize;
p->aLabel[j] = v->nOp;
}
@@ -83130,6 +83498,7 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatus(
aNew = (ScanStatus*)sqlite3DbRealloc(p->db, p->aScan, nByte);
if( aNew ){
ScanStatus *pNew = &aNew[p->nScan++];
+ memset(pNew, 0, sizeof(ScanStatus));
pNew->addrExplain = addrExplain;
pNew->addrLoop = addrLoop;
pNew->addrVisit = addrVisit;
@@ -83138,6 +83507,62 @@ SQLITE_PRIVATE void sqlite3VdbeScanStatus(
p->aScan = aNew;
}
}
+
+/*
+** Add the range of instructions from addrStart to addrEnd (inclusive) to
+** the set of those corresponding to the sqlite3_stmt_scanstatus() counters
+** associated with the OP_Explain instruction at addrExplain. The
+** sum of the sqlite3Hwtime() values for each of these instructions
+** will be returned for SQLITE_SCANSTAT_NCYCLE requests.
+*/
+SQLITE_PRIVATE void sqlite3VdbeScanStatusRange(
+ Vdbe *p,
+ int addrExplain,
+ int addrStart,
+ int addrEnd
+){
+ ScanStatus *pScan = 0;
+ int ii;
+ for(ii=p->nScan-1; ii>=0; ii--){
+ pScan = &p->aScan[ii];
+ if( pScan->addrExplain==addrExplain ) break;
+ pScan = 0;
+ }
+ if( pScan ){
+ if( addrEnd<0 ) addrEnd = sqlite3VdbeCurrentAddr(p)-1;
+ for(ii=0; ii<ArraySize(pScan->aAddrRange); ii+=2){
+ if( pScan->aAddrRange[ii]==0 ){
+ pScan->aAddrRange[ii] = addrStart;
+ pScan->aAddrRange[ii+1] = addrEnd;
+ break;
+ }
+ }
+ }
+}
+
+/*
+** Set the addresses for the SQLITE_SCANSTAT_NLOOP and SQLITE_SCANSTAT_NROW
+** counters for the query element associated with the OP_Explain at
+** addrExplain.
+*/
+SQLITE_PRIVATE void sqlite3VdbeScanStatusCounters(
+ Vdbe *p,
+ int addrExplain,
+ int addrLoop,
+ int addrVisit
+){
+ ScanStatus *pScan = 0;
+ int ii;
+ for(ii=p->nScan-1; ii>=0; ii--){
+ pScan = &p->aScan[ii];
+ if( pScan->addrExplain==addrExplain ) break;
+ pScan = 0;
+ }
+ if( pScan ){
+ pScan->addrLoop = addrLoop;
+ pScan->addrVisit = addrVisit;
+ }
+}
#endif
@@ -84267,7 +84692,6 @@ SQLITE_PRIVATE int sqlite3VdbeList(
** sqlite3_column_text16(), causing a translation to UTF-16 encoding.
*/
releaseMemArray(pMem, 8);
- p->pResultSet = 0;
if( p->rc==SQLITE_NOMEM ){
/* This happens if a malloc() inside a call to sqlite3_column_text() or
@@ -84324,7 +84748,7 @@ SQLITE_PRIVATE int sqlite3VdbeList(
sqlite3VdbeMemSetStr(pMem+5, zP4, -1, SQLITE_UTF8, sqlite3_free);
p->nResColumn = 8;
}
- p->pResultSet = pMem;
+ p->pResultRow = pMem;
if( db->mallocFailed ){
p->rc = SQLITE_NOMEM;
rc = SQLITE_ERROR;
@@ -84435,7 +84859,7 @@ static void *allocSpace(
** running it.
*/
SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe *p){
-#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
+#if defined(SQLITE_DEBUG)
int i;
#endif
assert( p!=0 );
@@ -84464,8 +84888,8 @@ SQLITE_PRIVATE void sqlite3VdbeRewind(Vdbe *p){
p->nFkConstraint = 0;
#ifdef VDBE_PROFILE
for(i=0; i<p->nOp; i++){
- p->aOp[i].cnt = 0;
- p->aOp[i].cycles = 0;
+ p->aOp[i].nExec = 0;
+ p->aOp[i].nCycle = 0;
}
#endif
}
@@ -84574,9 +84998,6 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
p->aVar = allocSpace(&x, 0, nVar*sizeof(Mem));
p->apArg = allocSpace(&x, 0, nArg*sizeof(Mem*));
p->apCsr = allocSpace(&x, 0, nCursor*sizeof(VdbeCursor*));
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- p->anExec = allocSpace(&x, 0, p->nOp*sizeof(i64));
-#endif
if( x.nNeeded ){
x.pSpace = p->pFree = sqlite3DbMallocRawNN(db, x.nNeeded);
x.nFree = x.nNeeded;
@@ -84585,9 +85006,6 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
p->aVar = allocSpace(&x, p->aVar, nVar*sizeof(Mem));
p->apArg = allocSpace(&x, p->apArg, nArg*sizeof(Mem*));
p->apCsr = allocSpace(&x, p->apCsr, nCursor*sizeof(VdbeCursor*));
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- p->anExec = allocSpace(&x, p->anExec, p->nOp*sizeof(i64));
-#endif
}
}
@@ -84602,9 +85020,6 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
p->nMem = nMem;
initMemArray(p->aMem, nMem, db, MEM_Undefined);
memset(p->apCsr, 0, nCursor*sizeof(VdbeCursor*));
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- memset(p->anExec, 0, p->nOp*sizeof(i64));
-#endif
}
sqlite3VdbeRewind(p);
}
@@ -84662,9 +85077,6 @@ static void closeCursorsInFrame(Vdbe *p){
SQLITE_PRIVATE int sqlite3VdbeFrameRestore(VdbeFrame *pFrame){
Vdbe *v = pFrame->v;
closeCursorsInFrame(v);
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- v->anExec = pFrame->anExec;
-#endif
v->aOp = pFrame->aOp;
v->nOp = pFrame->nOp;
v->aMem = pFrame->aMem;
@@ -85468,7 +85880,7 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){
sqlite3DbFree(db, p->zErrMsg);
p->zErrMsg = 0;
}
- p->pResultSet = 0;
+ p->pResultRow = 0;
#ifdef SQLITE_DEBUG
p->nWrite = 0;
#endif
@@ -85496,10 +85908,12 @@ SQLITE_PRIVATE int sqlite3VdbeReset(Vdbe *p){
}
for(i=0; i<p->nOp; i++){
char zHdr[100];
+ i64 cnt = p->aOp[i].nExec;
+ i64 cycles = p->aOp[i].nCycle;
sqlite3_snprintf(sizeof(zHdr), zHdr, "%6u %12llu %8llu ",
- p->aOp[i].cnt,
- p->aOp[i].cycles,
- p->aOp[i].cnt>0 ? p->aOp[i].cycles/p->aOp[i].cnt : 0
+ cnt,
+ cycles,
+ cnt>0 ? cycles/cnt : 0
);
fprintf(out, "%s", zHdr);
sqlite3VdbePrintOp(out, i, &p->aOp[i]);
@@ -87354,6 +87768,7 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook(
*/
/* #include "sqliteInt.h" */
/* #include "vdbeInt.h" */
+/* #include "opcodes.h" */
#ifndef SQLITE_OMIT_DEPRECATED
/*
@@ -87844,7 +88259,10 @@ SQLITE_API void sqlite3_result_text64(
){
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
assert( xDel!=SQLITE_DYNAMIC );
- if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE;
+ if( enc!=SQLITE_UTF8 ){
+ if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE;
+ n &= ~(u64)1;
+ }
if( n>0x7fffffff ){
(void)invokeValueDestructor(z, xDel, pCtx);
}else{
@@ -87859,7 +88277,7 @@ SQLITE_API void sqlite3_result_text16(
void (*xDel)(void *)
){
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
- setResultStrOrError(pCtx, z, n, SQLITE_UTF16NATIVE, xDel);
+ setResultStrOrError(pCtx, z, n & ~(u64)1, SQLITE_UTF16NATIVE, xDel);
}
SQLITE_API void sqlite3_result_text16be(
sqlite3_context *pCtx,
@@ -87868,7 +88286,7 @@ SQLITE_API void sqlite3_result_text16be(
void (*xDel)(void *)
){
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
- setResultStrOrError(pCtx, z, n, SQLITE_UTF16BE, xDel);
+ setResultStrOrError(pCtx, z, n & ~(u64)1, SQLITE_UTF16BE, xDel);
}
SQLITE_API void sqlite3_result_text16le(
sqlite3_context *pCtx,
@@ -87877,7 +88295,7 @@ SQLITE_API void sqlite3_result_text16le(
void (*xDel)(void *)
){
assert( sqlite3_mutex_held(pCtx->pOut->db->mutex) );
- setResultStrOrError(pCtx, z, n, SQLITE_UTF16LE, xDel);
+ setResultStrOrError(pCtx, z, n & ~(u64)1, SQLITE_UTF16LE, xDel);
}
#endif /* SQLITE_OMIT_UTF16 */
SQLITE_API void sqlite3_result_value(sqlite3_context *pCtx, sqlite3_value *pValue){
@@ -88088,7 +88506,7 @@ static int sqlite3Step(Vdbe *p){
/* If the statement completed successfully, invoke the profile callback */
checkProfileCallback(db, p);
#endif
-
+ p->pResultRow = 0;
if( rc==SQLITE_DONE && db->autoCommit ){
assert( p->rc==SQLITE_OK );
p->rc = doWalCallbacks(db);
@@ -88218,6 +88636,17 @@ SQLITE_API int sqlite3_vtab_nochange(sqlite3_context *p){
}
/*
+** The destructor function for a ValueList object. This needs to be
+** a separate function, unknowable to the application, to ensure that
+** calls to sqlite3_vtab_in_first()/sqlite3_vtab_in_next() that are not
+** preceeded by activation of IN processing via sqlite3_vtab_int() do not
+** try to access a fake ValueList object inserted by a hostile extension.
+*/
+SQLITE_PRIVATE void sqlite3VdbeValueListFree(void *pToDelete){
+ sqlite3_free(pToDelete);
+}
+
+/*
** Implementation of sqlite3_vtab_in_first() (if bNext==0) and
** sqlite3_vtab_in_next() (if bNext!=0).
*/
@@ -88231,8 +88660,15 @@ static int valueFromValueList(
*ppOut = 0;
if( pVal==0 ) return SQLITE_MISUSE;
- pRhs = (ValueList*)sqlite3_value_pointer(pVal, "ValueList");
- if( pRhs==0 ) return SQLITE_MISUSE;
+ if( (pVal->flags & MEM_Dyn)==0 || pVal->xDel!=sqlite3VdbeValueListFree ){
+ return SQLITE_ERROR;
+ }else{
+ assert( (pVal->flags&(MEM_TypeMask|MEM_Term|MEM_Subtype)) ==
+ (MEM_Null|MEM_Term|MEM_Subtype) );
+ assert( pVal->eSubtype=='p' );
+ assert( pVal->u.zPType!=0 && strcmp(pVal->u.zPType,"ValueList")==0 );
+ pRhs = (ValueList*)pVal->z;
+ }
if( bNext ){
rc = sqlite3BtreeNext(pRhs->pCsr, 0);
}else{
@@ -88452,7 +88888,7 @@ SQLITE_API int sqlite3_column_count(sqlite3_stmt *pStmt){
*/
SQLITE_API int sqlite3_data_count(sqlite3_stmt *pStmt){
Vdbe *pVm = (Vdbe *)pStmt;
- if( pVm==0 || pVm->pResultSet==0 ) return 0;
+ if( pVm==0 || pVm->pResultRow==0 ) return 0;
return pVm->nResColumn;
}
@@ -88507,8 +88943,8 @@ static Mem *columnMem(sqlite3_stmt *pStmt, int i){
if( pVm==0 ) return (Mem*)columnNullValue();
assert( pVm->db );
sqlite3_mutex_enter(pVm->db->mutex);
- if( pVm->pResultSet!=0 && i<pVm->nResColumn && i>=0 ){
- pOut = &pVm->pResultSet[i];
+ if( pVm->pResultRow!=0 && i<pVm->nResColumn && i>=0 ){
+ pOut = &pVm->pResultRow[i];
}else{
sqlite3Error(pVm->db, SQLITE_RANGE);
pOut = (Mem*)columnNullValue();
@@ -88942,7 +89378,10 @@ SQLITE_API int sqlite3_bind_text64(
unsigned char enc
){
assert( xDel!=SQLITE_DYNAMIC );
- if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE;
+ if( enc!=SQLITE_UTF8 ){
+ if( enc==SQLITE_UTF16 ) enc = SQLITE_UTF16NATIVE;
+ nData &= ~(u16)1;
+ }
return bindText(pStmt, i, zData, nData, xDel, enc);
}
#ifndef SQLITE_OMIT_UTF16
@@ -88950,10 +89389,10 @@ SQLITE_API int sqlite3_bind_text16(
sqlite3_stmt *pStmt,
int i,
const void *zData,
- int nData,
+ int n,
void (*xDel)(void*)
){
- return bindText(pStmt, i, zData, nData, xDel, SQLITE_UTF16NATIVE);
+ return bindText(pStmt, i, zData, n & ~(u64)1, xDel, SQLITE_UTF16NATIVE);
}
#endif /* SQLITE_OMIT_UTF16 */
SQLITE_API int sqlite3_bind_value(sqlite3_stmt *pStmt, int i, const sqlite3_value *pValue){
@@ -89444,23 +89883,60 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa
/*
** Return status data for a single loop within query pStmt.
*/
-SQLITE_API int sqlite3_stmt_scanstatus(
+SQLITE_API int sqlite3_stmt_scanstatus_v2(
sqlite3_stmt *pStmt, /* Prepared statement being queried */
- int idx, /* Index of loop to report on */
+ int iScan, /* Index of loop to report on */
int iScanStatusOp, /* Which metric to return */
+ int flags,
void *pOut /* OUT: Write the answer here */
){
Vdbe *p = (Vdbe*)pStmt;
ScanStatus *pScan;
- if( idx<0 || idx>=p->nScan ) return 1;
- pScan = &p->aScan[idx];
+ int idx;
+
+ if( iScan<0 ){
+ int ii;
+ if( iScanStatusOp==SQLITE_SCANSTAT_NCYCLE ){
+ i64 res = 0;
+ for(ii=0; ii<p->nOp; ii++){
+ res += p->aOp[ii].nCycle;
+ }
+ *(i64*)pOut = res;
+ return 0;
+ }
+ return 1;
+ }
+ if( flags & SQLITE_SCANSTAT_COMPLEX ){
+ idx = iScan;
+ pScan = &p->aScan[idx];
+ }else{
+ /* If the COMPLEX flag is clear, then this function must ignore any
+ ** ScanStatus structures with ScanStatus.addrLoop set to 0. */
+ for(idx=0; idx<p->nScan; idx++){
+ pScan = &p->aScan[idx];
+ if( pScan->zName ){
+ iScan--;
+ if( iScan<0 ) break;
+ }
+ }
+ }
+ if( idx>=p->nScan ) return 1;
+
switch( iScanStatusOp ){
case SQLITE_SCANSTAT_NLOOP: {
- *(sqlite3_int64*)pOut = p->anExec[pScan->addrLoop];
+ if( pScan->addrLoop>0 ){
+ *(sqlite3_int64*)pOut = p->aOp[pScan->addrLoop].nExec;
+ }else{
+ *(sqlite3_int64*)pOut = -1;
+ }
break;
}
case SQLITE_SCANSTAT_NVISIT: {
- *(sqlite3_int64*)pOut = p->anExec[pScan->addrVisit];
+ if( pScan->addrVisit>0 ){
+ *(sqlite3_int64*)pOut = p->aOp[pScan->addrVisit].nExec;
+ }else{
+ *(sqlite3_int64*)pOut = -1;
+ }
break;
}
case SQLITE_SCANSTAT_EST: {
@@ -89493,6 +89969,45 @@ SQLITE_API int sqlite3_stmt_scanstatus(
}
break;
}
+ case SQLITE_SCANSTAT_PARENTID: {
+ if( pScan->addrExplain ){
+ *(int*)pOut = p->aOp[ pScan->addrExplain ].p2;
+ }else{
+ *(int*)pOut = -1;
+ }
+ break;
+ }
+ case SQLITE_SCANSTAT_NCYCLE: {
+ i64 res = 0;
+ if( pScan->aAddrRange[0]==0 ){
+ res = -1;
+ }else{
+ int ii;
+ for(ii=0; ii<ArraySize(pScan->aAddrRange); ii+=2){
+ int iIns = pScan->aAddrRange[ii];
+ int iEnd = pScan->aAddrRange[ii+1];
+ if( iIns==0 ) break;
+ if( iIns>0 ){
+ while( iIns<=iEnd ){
+ res += p->aOp[iIns].nCycle;
+ iIns++;
+ }
+ }else{
+ int iOp;
+ for(iOp=0; iOp<p->nOp; iOp++){
+ Op *pOp = &p->aOp[iOp];
+ if( pOp->p1!=iEnd ) continue;
+ if( (sqlite3OpcodeProperty[pOp->opcode] & OPFLG_NCYCLE)==0 ){
+ continue;
+ }
+ res += p->aOp[iOp].nCycle;
+ }
+ }
+ }
+ }
+ *(i64*)pOut = res;
+ break;
+ }
default: {
return 1;
}
@@ -89501,11 +90016,28 @@ SQLITE_API int sqlite3_stmt_scanstatus(
}
/*
+** Return status data for a single loop within query pStmt.
+*/
+SQLITE_API int sqlite3_stmt_scanstatus(
+ sqlite3_stmt *pStmt, /* Prepared statement being queried */
+ int iScan, /* Index of loop to report on */
+ int iScanStatusOp, /* Which metric to return */
+ void *pOut /* OUT: Write the answer here */
+){
+ return sqlite3_stmt_scanstatus_v2(pStmt, iScan, iScanStatusOp, 0, pOut);
+}
+
+/*
** Zero all counters associated with the sqlite3_stmt_scanstatus() data.
*/
SQLITE_API void sqlite3_stmt_scanstatus_reset(sqlite3_stmt *pStmt){
Vdbe *p = (Vdbe*)pStmt;
- memset(p->anExec, 0, p->nOp * sizeof(i64));
+ int ii;
+ for(ii=0; ii<p->nOp; ii++){
+ Op *pOp = &p->aOp[ii];
+ pOp->nExec = 0;
+ pOp->nCycle = 0;
+ }
}
#endif /* SQLITE_ENABLE_STMT_SCANSTATUS */
@@ -89841,6 +90373,9 @@ SQLITE_API int sqlite3_found_count = 0;
*/
static void test_trace_breakpoint(int pc, Op *pOp, Vdbe *v){
static int n = 0;
+ (void)pc;
+ (void)pOp;
+ (void)v;
n++;
}
#endif
@@ -90079,6 +90614,10 @@ static void applyNumericAffinity(Mem *pRec, int bTryForInt){
** always preferred, even if the affinity is REAL, because
** an integer representation is more space efficient on disk.
**
+** SQLITE_AFF_FLEXNUM:
+** If the value is text, then try to convert it into a number of
+** some kind (integer or real) but do not make any other changes.
+**
** SQLITE_AFF_TEXT:
** Convert pRec to a text representation.
**
@@ -90093,11 +90632,11 @@ static void applyAffinity(
){
if( affinity>=SQLITE_AFF_NUMERIC ){
assert( affinity==SQLITE_AFF_INTEGER || affinity==SQLITE_AFF_REAL
- || affinity==SQLITE_AFF_NUMERIC );
+ || affinity==SQLITE_AFF_NUMERIC || affinity==SQLITE_AFF_FLEXNUM );
if( (pRec->flags & MEM_Int)==0 ){ /*OPTIMIZATION-IF-FALSE*/
- if( (pRec->flags & MEM_Real)==0 ){
+ if( (pRec->flags & (MEM_Real|MEM_IntReal))==0 ){
if( pRec->flags & MEM_Str ) applyNumericAffinity(pRec,1);
- }else{
+ }else if( affinity<=SQLITE_AFF_REAL ){
sqlite3VdbeIntegerAffinity(pRec);
}
}
@@ -90325,17 +90864,6 @@ SQLITE_PRIVATE void sqlite3VdbeRegisterDump(Vdbe *v){
# define REGISTER_TRACE(R,M)
#endif
-
-#ifdef VDBE_PROFILE
-
-/*
-** hwtime.h contains inline assembler code for implementing
-** high-performance timing routines.
-*/
-/* #include "hwtime.h" */
-
-#endif
-
#ifndef NDEBUG
/*
** This function is only called from within an assert() expression. It
@@ -90395,8 +90923,7 @@ static u64 filterHash(const Mem *aMem, const Op *pOp){
}else if( p->flags & MEM_Real ){
h += sqlite3VdbeIntValue(p);
}else if( p->flags & (MEM_Str|MEM_Blob) ){
- h += p->n;
- if( p->flags & MEM_Zero ) h += p->u.nZero;
+ /* no-op */
}
}
return h;
@@ -90425,11 +90952,10 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
){
Op *aOp = p->aOp; /* Copy of p->aOp */
Op *pOp = aOp; /* Current operation */
-#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
- Op *pOrigOp; /* Value of pOp at the top of the loop */
-#endif
#ifdef SQLITE_DEBUG
+ Op *pOrigOp; /* Value of pOp at the top of the loop */
int nExtraDelete = 0; /* Verifies FORDELETE and AUXDELETE flags */
+ u8 iCompareIsInit = 0; /* iCompare is initialized */
#endif
int rc = SQLITE_OK; /* Value to return */
sqlite3 *db = p->db; /* The database */
@@ -90445,13 +90971,15 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
Mem *pIn2 = 0; /* 2nd input operand */
Mem *pIn3 = 0; /* 3rd input operand */
Mem *pOut = 0; /* Output operand */
-#ifdef VDBE_PROFILE
- u64 start; /* CPU clock count at start of opcode */
+#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE)
+ u64 *pnCycle = 0;
#endif
/*** INSERT STACK UNION HERE ***/
assert( p->eVdbeState==VDBE_RUN_STATE ); /* sqlite3_step() verifies this */
- sqlite3VdbeEnter(p);
+ if( DbMaskNonZero(p->lockMask) ){
+ sqlite3VdbeEnter(p);
+ }
#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
if( db->xProgress ){
u32 iPrior = p->aCounter[SQLITE_STMTSTATUS_VM_STEP];
@@ -90472,7 +91000,6 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
assert( p->bIsReader || p->readOnly!=0 );
p->iCurrentTime = 0;
assert( p->explain==0 );
- p->pResultSet = 0;
db->busyHandler.nBusy = 0;
if( AtomicLoad(&db->u1.isInterrupted) ) goto abort_due_to_interrupt;
sqlite3VdbeIOTraceSql(p);
@@ -90509,12 +91036,14 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
assert( rc==SQLITE_OK );
assert( pOp>=aOp && pOp<&aOp[p->nOp]);
-#ifdef VDBE_PROFILE
- start = sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime();
-#endif
nVmStep++;
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- if( p->anExec ) p->anExec[(int)(pOp-aOp)]++;
+#if defined(SQLITE_ENABLE_STMT_SCANSTATUS) || defined(VDBE_PROFILE)
+ pOp->nExec++;
+ pnCycle = &pOp->nCycle;
+# ifdef VDBE_PROFILE
+ if( sqlite3NProfileCnt==0 )
+# endif
+ *pnCycle -= sqlite3Hwtime();
#endif
/* Only allow tracing if SQLITE_DEBUG is defined.
@@ -90576,7 +91105,7 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
}
}
#endif
-#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
+#ifdef SQLITE_DEBUG
pOrigOp = pOp;
#endif
@@ -90860,6 +91389,12 @@ case OP_Halt: {
#ifdef SQLITE_DEBUG
if( pOp->p2==OE_Abort ){ sqlite3VdbeAssertAbortable(p); }
#endif
+
+ /* A deliberately coded "OP_Halt SQLITE_INTERNAL * * * *" opcode indicates
+ ** something is wrong with the code generator. Raise an assertion in order
+ ** to bring this to the attention of fuzzers and other testing tools. */
+ assert( pOp->p1!=SQLITE_INTERNAL );
+
if( p->pFrame && pOp->p1==SQLITE_OK ){
/* Halt the sub-program. Return control to the parent frame. */
pFrame = p->pFrame;
@@ -91301,10 +91836,10 @@ case OP_ResultRow: {
assert( pOp->p1+pOp->p2<=(p->nMem+1 - p->nCursor)+1 );
p->cacheCtr = (p->cacheCtr + 2)|1;
- p->pResultSet = &aMem[pOp->p1];
+ p->pResultRow = &aMem[pOp->p1];
#ifdef SQLITE_DEBUG
{
- Mem *pMem = p->pResultSet;
+ Mem *pMem = p->pResultRow;
int i;
for(i=0; i<pOp->p2; i++){
assert( memIsValid(&pMem[i]) );
@@ -91834,7 +92369,6 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
flags1 = pIn1->flags;
flags3 = pIn3->flags;
if( (flags1 & flags3 & MEM_Int)!=0 ){
- assert( (pOp->p5 & SQLITE_AFF_MASK)!=SQLITE_AFF_TEXT || CORRUPT_DB );
/* Common case of comparison of two integers */
if( pIn3->u.i > pIn1->u.i ){
if( sqlite3aGTb[pOp->opcode] ){
@@ -91842,18 +92376,21 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
goto jump_to_p2;
}
iCompare = +1;
+ VVA_ONLY( iCompareIsInit = 1; )
}else if( pIn3->u.i < pIn1->u.i ){
if( sqlite3aLTb[pOp->opcode] ){
VdbeBranchTaken(1, (pOp->p5 & SQLITE_NULLEQ)?2:3);
goto jump_to_p2;
}
iCompare = -1;
+ VVA_ONLY( iCompareIsInit = 1; )
}else{
if( sqlite3aEQb[pOp->opcode] ){
VdbeBranchTaken(1, (pOp->p5 & SQLITE_NULLEQ)?2:3);
goto jump_to_p2;
}
iCompare = 0;
+ VVA_ONLY( iCompareIsInit = 1; )
}
VdbeBranchTaken(0, (pOp->p5 & SQLITE_NULLEQ)?2:3);
break;
@@ -91885,6 +92422,7 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
goto jump_to_p2;
}
iCompare = 1; /* Operands are not equal */
+ VVA_ONLY( iCompareIsInit = 1; )
break;
}
}else{
@@ -91895,14 +92433,14 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
if( (flags1 | flags3)&MEM_Str ){
if( (flags1 & (MEM_Int|MEM_IntReal|MEM_Real|MEM_Str))==MEM_Str ){
applyNumericAffinity(pIn1,0);
- testcase( flags3==pIn3->flags );
+ assert( flags3==pIn3->flags || CORRUPT_DB );
flags3 = pIn3->flags;
}
if( (flags3 & (MEM_Int|MEM_IntReal|MEM_Real|MEM_Str))==MEM_Str ){
applyNumericAffinity(pIn3,0);
}
}
- }else if( affinity==SQLITE_AFF_TEXT ){
+ }else if( affinity==SQLITE_AFF_TEXT && ((flags1 | flags3) & MEM_Str)!=0 ){
if( (flags1 & MEM_Str)==0 && (flags1&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){
testcase( pIn1->flags & MEM_Int );
testcase( pIn1->flags & MEM_Real );
@@ -91910,7 +92448,7 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
sqlite3VdbeMemStringify(pIn1, encoding, 1);
testcase( (flags1&MEM_Dyn) != (pIn1->flags&MEM_Dyn) );
flags1 = (pIn1->flags & ~MEM_TypeMask) | (flags1 & MEM_TypeMask);
- if( pIn1==pIn3 ) flags3 = flags1 | MEM_Str;
+ if( NEVER(pIn1==pIn3) ) flags3 = flags1 | MEM_Str;
}
if( (flags3 & MEM_Str)==0 && (flags3&(MEM_Int|MEM_Real|MEM_IntReal))!=0 ){
testcase( pIn3->flags & MEM_Int );
@@ -91941,6 +92479,7 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
res2 = sqlite3aGTb[pOp->opcode];
}
iCompare = res;
+ VVA_ONLY( iCompareIsInit = 1; )
/* Undo any changes made by applyAffinity() to the input registers. */
assert( (pIn3->flags & MEM_Dyn) == (flags3 & MEM_Dyn) );
@@ -91979,6 +92518,7 @@ case OP_ElseEq: { /* same as TK_ESCAPE, jump */
break;
}
#endif /* SQLITE_DEBUG */
+ assert( iCompareIsInit );
VdbeBranchTaken(iCompare==0, 2);
if( iCompare==0 ) goto jump_to_p2;
break;
@@ -92073,6 +92613,7 @@ case OP_Compare: {
pColl = pKeyInfo->aColl[i];
bRev = (pKeyInfo->aSortFlags[i] & KEYINFO_ORDER_DESC);
iCompare = sqlite3MemCompare(&aMem[p1+idx], &aMem[p2+idx], pColl);
+ VVA_ONLY( iCompareIsInit = 1; )
if( iCompare ){
if( (pKeyInfo->aSortFlags[i] & KEYINFO_ORDER_BIGNULL)
&& ((aMem[p1+idx].flags & MEM_Null) || (aMem[p2+idx].flags & MEM_Null))
@@ -92097,6 +92638,7 @@ case OP_Compare: {
*/
case OP_Jump: { /* jump */
assert( pOp>aOp && pOp[-1].opcode==OP_Compare );
+ assert( iCompareIsInit );
if( iCompare<0 ){
VdbeBranchTaken(0,4); pOp = &aOp[pOp->p1 - 1];
}else if( iCompare==0 ){
@@ -92496,7 +93038,7 @@ case OP_Offset: { /* out3 */
** typeof() function or the IS NULL or IS NOT NULL operators or the
** equivalent. In this case, all content loading can be omitted.
*/
-case OP_Column: {
+case OP_Column: { /* ncycle */
u32 p2; /* column number to retrieve */
VdbeCursor *pC; /* The VDBE cursor */
BtCursor *pCrsr; /* The B-Tree cursor corresponding to pC */
@@ -92845,7 +93387,7 @@ case OP_TypeCheck: {
}
case COLTYPE_REAL: {
testcase( (pIn1->flags & (MEM_Real|MEM_IntReal))==MEM_Real );
- testcase( (pIn1->flags & (MEM_Real|MEM_IntReal))==MEM_IntReal );
+ assert( (pIn1->flags & MEM_IntReal)==0 );
if( pIn1->flags & MEM_Int ){
/* When applying REAL affinity, if the result is still an MEM_Int
** that will fit in 6 bytes, then change the type to MEM_IntReal
@@ -93848,7 +94390,7 @@ case OP_SetCookie: {
**
** See also: OP_OpenRead, OP_ReopenIdx
*/
-case OP_ReopenIdx: {
+case OP_ReopenIdx: { /* ncycle */
int nField;
KeyInfo *pKeyInfo;
u32 p2;
@@ -93869,7 +94411,7 @@ case OP_ReopenIdx: {
}
/* If the cursor is not currently open or is open on a different
** index, then fall through into OP_OpenRead to force a reopen */
-case OP_OpenRead:
+case OP_OpenRead: /* ncycle */
case OP_OpenWrite:
assert( pOp->opcode==OP_OpenWrite || pOp->p5==0 || pOp->p5==OPFLAG_SEEKEQ );
@@ -93963,7 +94505,7 @@ open_cursor_set_hints:
**
** Duplicate ephemeral cursors are used for self-joins of materialized views.
*/
-case OP_OpenDup: {
+case OP_OpenDup: { /* ncycle */
VdbeCursor *pOrig; /* The original cursor to be duplicated */
VdbeCursor *pCx; /* The new cursor */
@@ -94025,8 +94567,8 @@ case OP_OpenDup: {
** by this opcode will be used for automatically created transient
** indices in joins.
*/
-case OP_OpenAutoindex:
-case OP_OpenEphemeral: {
+case OP_OpenAutoindex: /* ncycle */
+case OP_OpenEphemeral: { /* ncycle */
VdbeCursor *pCx;
KeyInfo *pKeyInfo;
@@ -94184,7 +94726,7 @@ case OP_OpenPseudo: {
** Close a cursor previously opened as P1. If P1 is not
** currently open, this instruction is a no-op.
*/
-case OP_Close: {
+case OP_Close: { /* ncycle */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
sqlite3VdbeFreeCursor(p, p->apCsr[pOp->p1]);
p->apCsr[pOp->p1] = 0;
@@ -94301,10 +94843,10 @@ case OP_ColumnsUsed: {
**
** See also: Found, NotFound, SeekGt, SeekGe, SeekLt
*/
-case OP_SeekLT: /* jump, in3, group */
-case OP_SeekLE: /* jump, in3, group */
-case OP_SeekGE: /* jump, in3, group */
-case OP_SeekGT: { /* jump, in3, group */
+case OP_SeekLT: /* jump, in3, group, ncycle */
+case OP_SeekLE: /* jump, in3, group, ncycle */
+case OP_SeekGE: /* jump, in3, group, ncycle */
+case OP_SeekGT: { /* jump, in3, group, ncycle */
int res; /* Comparison result */
int oc; /* Opcode */
VdbeCursor *pC; /* The cursor to seek */
@@ -94570,7 +95112,7 @@ seek_not_found:
** jump to SeekOP.P2 if This.P5==0 or to This.P2 if This.P5>0.
** </ol>
*/
-case OP_SeekScan: {
+case OP_SeekScan: { /* ncycle */
VdbeCursor *pC;
int res;
int nStep;
@@ -94692,7 +95234,7 @@ case OP_SeekScan: {
**
** P1 must be a valid b-tree cursor.
*/
-case OP_SeekHit: {
+case OP_SeekHit: { /* ncycle */
VdbeCursor *pC;
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
pC = p->apCsr[pOp->p1];
@@ -94824,7 +95366,7 @@ case OP_IfNotOpen: { /* jump */
**
** See also: NotFound, Found, NotExists
*/
-case OP_IfNoHope: { /* jump, in3 */
+case OP_IfNoHope: { /* jump, in3, ncycle */
VdbeCursor *pC;
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
pC = p->apCsr[pOp->p1];
@@ -94838,9 +95380,9 @@ case OP_IfNoHope: { /* jump, in3 */
/* Fall through into OP_NotFound */
/* no break */ deliberate_fall_through
}
-case OP_NoConflict: /* jump, in3 */
-case OP_NotFound: /* jump, in3 */
-case OP_Found: { /* jump, in3 */
+case OP_NoConflict: /* jump, in3, ncycle */
+case OP_NotFound: /* jump, in3, ncycle */
+case OP_Found: { /* jump, in3, ncycle */
int alreadyExists;
int ii;
VdbeCursor *pC;
@@ -94970,7 +95512,7 @@ case OP_Found: { /* jump, in3 */
**
** See also: Found, NotFound, NoConflict, SeekRowid
*/
-case OP_SeekRowid: { /* jump, in3 */
+case OP_SeekRowid: { /* jump, in3, ncycle */
VdbeCursor *pC;
BtCursor *pCrsr;
int res;
@@ -94995,7 +95537,7 @@ case OP_SeekRowid: { /* jump, in3 */
}
/* Fall through into OP_NotExists */
/* no break */ deliberate_fall_through
-case OP_NotExists: /* jump, in3 */
+case OP_NotExists: /* jump, in3, ncycle */
pIn3 = &aMem[pOp->p3];
assert( (pIn3->flags & MEM_Int)!=0 || pOp->opcode==OP_SeekRowid );
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
@@ -95275,8 +95817,11 @@ case OP_Insert: {
if( pOp->p5 & OPFLAG_ISNOOP ) break;
#endif
- if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++;
- if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = x.nKey;
+ assert( (pOp->p5 & OPFLAG_LASTROWID)==0 || (pOp->p5 & OPFLAG_NCHANGE)!=0 );
+ if( pOp->p5 & OPFLAG_NCHANGE ){
+ p->nChange++;
+ if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = x.nKey;
+ }
assert( (pData->flags & (MEM_Blob|MEM_Str))!=0 || pData->n==0 );
x.pData = pData->z;
x.nData = pData->n;
@@ -95287,6 +95832,7 @@ case OP_Insert: {
x.nZero = 0;
}
x.pKey = 0;
+ assert( BTREE_PREFORMAT==OPFLAG_PREFORMAT );
rc = sqlite3BtreeInsert(pC->uc.pCursor, &x,
(pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION|OPFLAG_PREFORMAT)),
seekResult
@@ -95618,7 +96164,7 @@ case OP_RowData: {
** be a separate OP_VRowid opcode for use with virtual tables, but this
** one opcode now works for both table types.
*/
-case OP_Rowid: { /* out2 */
+case OP_Rowid: { /* out2, ncycle */
VdbeCursor *pC;
i64 v;
sqlite3_vtab *pVtab;
@@ -95717,8 +96263,8 @@ case OP_NullRow: {
** from the end toward the beginning. In other words, the cursor is
** configured to use Prev, not Next.
*/
-case OP_SeekEnd:
-case OP_Last: { /* jump */
+case OP_SeekEnd: /* ncycle */
+case OP_Last: { /* jump, ncycle */
VdbeCursor *pC;
BtCursor *pCrsr;
int res;
@@ -95819,17 +96365,22 @@ case OP_Sort: { /* jump */
** If the table or index is not empty, fall through to the following
** instruction.
**
+** If P2 is zero, that is an assertion that the P1 table is never
+** empty and hence the jump will never be taken.
+**
** This opcode leaves the cursor configured to move in forward order,
** from the beginning toward the end. In other words, the cursor is
** configured to use Next, not Prev.
*/
-case OP_Rewind: { /* jump */
+case OP_Rewind: { /* jump, ncycle */
VdbeCursor *pC;
BtCursor *pCrsr;
int res;
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
assert( pOp->p5==0 );
+ assert( pOp->p2>=0 && pOp->p2<p->nOp );
+
pC = p->apCsr[pOp->p1];
assert( pC!=0 );
assert( isSorter(pC)==(pOp->opcode==OP_SorterSort) );
@@ -95849,9 +96400,10 @@ case OP_Rewind: { /* jump */
}
if( rc ) goto abort_due_to_error;
pC->nullRow = (u8)res;
- assert( pOp->p2>0 && pOp->p2<p->nOp );
- VdbeBranchTaken(res!=0,2);
- if( res ) goto jump_to_p2;
+ if( pOp->p2>0 ){
+ VdbeBranchTaken(res!=0,2);
+ if( res ) goto jump_to_p2;
+ }
break;
}
@@ -95917,7 +96469,7 @@ case OP_SorterNext: { /* jump */
rc = sqlite3VdbeSorterNext(db, pC);
goto next_tail;
-case OP_Prev: /* jump */
+case OP_Prev: /* jump, ncycle */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
assert( pOp->p5==0
|| pOp->p5==SQLITE_STMTSTATUS_FULLSCAN_STEP
@@ -95932,7 +96484,7 @@ case OP_Prev: /* jump */
rc = sqlite3BtreePrevious(pC->uc.pCursor, pOp->p3);
goto next_tail;
-case OP_Next: /* jump */
+case OP_Next: /* jump, ncycle */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
assert( pOp->p5==0
|| pOp->p5==SQLITE_STMTSTATUS_FULLSCAN_STEP
@@ -96124,8 +96676,8 @@ case OP_IdxDelete: {
**
** See also: Rowid, MakeRecord.
*/
-case OP_DeferredSeek:
-case OP_IdxRowid: { /* out2 */
+case OP_DeferredSeek: /* ncycle */
+case OP_IdxRowid: { /* out2, ncycle */
VdbeCursor *pC; /* The P1 index cursor */
VdbeCursor *pTabCur; /* The P2 table cursor (OP_DeferredSeek only) */
i64 rowid; /* Rowid that P1 current points to */
@@ -96187,8 +96739,8 @@ case OP_IdxRowid: { /* out2 */
** seek operation now, without further delay. If the cursor seek has
** already occurred, this instruction is a no-op.
*/
-case OP_FinishSeek: {
- VdbeCursor *pC; /* The P1 index cursor */
+case OP_FinishSeek: { /* ncycle */
+ VdbeCursor *pC; /* The P1 index cursor */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
pC = p->apCsr[pOp->p1];
@@ -96243,10 +96795,10 @@ case OP_FinishSeek: {
** If the P1 index entry is less than or equal to the key value then jump
** to P2. Otherwise fall through to the next instruction.
*/
-case OP_IdxLE: /* jump */
-case OP_IdxGT: /* jump */
-case OP_IdxLT: /* jump */
-case OP_IdxGE: { /* jump */
+case OP_IdxLE: /* jump, ncycle */
+case OP_IdxGT: /* jump, ncycle */
+case OP_IdxLT: /* jump, ncycle */
+case OP_IdxGE: { /* jump, ncycle */
VdbeCursor *pC;
int res;
UnpackedRecord r;
@@ -96657,13 +97209,14 @@ case OP_IntegrityCk: {
pIn1 = &aMem[pOp->p1];
assert( pOp->p5<db->nDb );
assert( DbMaskTest(p->btreeMask, pOp->p5) );
- z = sqlite3BtreeIntegrityCheck(db, db->aDb[pOp->p5].pBt, &aRoot[1], nRoot,
- (int)pnErr->u.i+1, &nErr);
+ rc = sqlite3BtreeIntegrityCheck(db, db->aDb[pOp->p5].pBt, &aRoot[1], nRoot,
+ (int)pnErr->u.i+1, &nErr, &z);
sqlite3VdbeMemSetNull(pIn1);
if( nErr==0 ){
assert( z==0 );
- }else if( z==0 ){
- goto no_mem;
+ }else if( rc ){
+ sqlite3_free(z);
+ goto abort_due_to_error;
}else{
pnErr->u.i -= nErr-1;
sqlite3VdbeMemSetStr(pIn1, z, -1, SQLITE_UTF8, sqlite3_free);
@@ -96867,9 +97420,6 @@ case OP_Program: { /* jump */
pFrame->aOp = p->aOp;
pFrame->nOp = p->nOp;
pFrame->token = pProgram->token;
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- pFrame->anExec = p->anExec;
-#endif
#ifdef SQLITE_DEBUG
pFrame->iFrameMagic = SQLITE_FRAME_MAGIC;
#endif
@@ -96906,9 +97456,6 @@ case OP_Program: { /* jump */
memset(pFrame->aOnce, 0, (pProgram->nOp + 7)/8);
p->aOp = aOp = pProgram->aOp;
p->nOp = pProgram->nOp;
-#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- p->anExec = 0;
-#endif
#ifdef SQLITE_DEBUG
/* Verify that second and subsequent executions of the same trigger do not
** try to reuse register values from the first use. */
@@ -97665,7 +98212,7 @@ case OP_VDestroy: {
** P1 is a cursor number. This opcode opens a cursor to the virtual
** table and stores that cursor in P1.
*/
-case OP_VOpen: {
+case OP_VOpen: { /* ncycle */
VdbeCursor *pCur;
sqlite3_vtab_cursor *pVCur;
sqlite3_vtab *pVtab;
@@ -97712,7 +98259,7 @@ case OP_VOpen: {
** cursor. Register P3 is used to hold the values returned by
** sqlite3_vtab_in_first() and sqlite3_vtab_in_next().
*/
-case OP_VInitIn: { /* out2 */
+case OP_VInitIn: { /* out2, ncycle */
VdbeCursor *pC; /* The cursor containing the RHS values */
ValueList *pRhs; /* New ValueList object to put in reg[P2] */
@@ -97723,7 +98270,7 @@ case OP_VInitIn: { /* out2 */
pRhs->pOut = &aMem[pOp->p3];
pOut = out2Prerelease(p, pOp);
pOut->flags = MEM_Null;
- sqlite3VdbeMemSetPointer(pOut, pRhs, "ValueList", sqlite3_free);
+ sqlite3VdbeMemSetPointer(pOut, pRhs, "ValueList", sqlite3VdbeValueListFree);
break;
}
#endif /* SQLITE_OMIT_VIRTUALTABLE */
@@ -97749,7 +98296,7 @@ case OP_VInitIn: { /* out2 */
**
** A jump is made to P2 if the result set after filtering would be empty.
*/
-case OP_VFilter: { /* jump */
+case OP_VFilter: { /* jump, ncycle */
int nArg;
int iQuery;
const sqlite3_module *pModule;
@@ -97809,7 +98356,7 @@ case OP_VFilter: { /* jump */
** bits (OPFLAG_LENGTHARG or OPFLAG_TYPEOFARG) but those bits are
** unused by OP_VColumn.
*/
-case OP_VColumn: {
+case OP_VColumn: { /* ncycle */
sqlite3_vtab *pVtab;
const sqlite3_module *pModule;
Mem *pDest;
@@ -97861,7 +98408,7 @@ case OP_VColumn: {
** jump to instruction P2. Or, if the virtual table has reached
** the end of its result set, then fall through to the next instruction.
*/
-case OP_VNext: { /* jump */
+case OP_VNext: { /* jump, ncycle */
sqlite3_vtab *pVtab;
const sqlite3_module *pModule;
int res;
@@ -98444,12 +98991,12 @@ default: { /* This is really OP_Noop, OP_Explain */
*****************************************************************************/
}
-#ifdef VDBE_PROFILE
- {
- u64 endTime = sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime();
- if( endTime>start ) pOrigOp->cycles += endTime - start;
- pOrigOp->cnt++;
- }
+#if defined(VDBE_PROFILE)
+ *pnCycle += sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime();
+ pnCycle = 0;
+#elif defined(SQLITE_ENABLE_STMT_SCANSTATUS)
+ *pnCycle += sqlite3Hwtime();
+ pnCycle = 0;
#endif
/* The following code adds nothing to the actual functionality
@@ -98525,6 +99072,18 @@ abort_due_to_error:
** release the mutexes on btrees that were acquired at the
** top. */
vdbe_return:
+#if defined(VDBE_PROFILE)
+ if( pnCycle ){
+ *pnCycle += sqlite3NProfileCnt ? sqlite3NProfileCnt : sqlite3Hwtime();
+ pnCycle = 0;
+ }
+#elif defined(SQLITE_ENABLE_STMT_SCANSTATUS)
+ if( pnCycle ){
+ *pnCycle += sqlite3Hwtime();
+ pnCycle = 0;
+ }
+#endif
+
#ifndef SQLITE_OMIT_PROGRESS_CALLBACK
while( nVmStep>=nProgressLimit && db->xProgress!=0 ){
nProgressLimit += db->nProgressOps;
@@ -98536,7 +99095,9 @@ vdbe_return:
}
#endif
p->aCounter[SQLITE_STMTSTATUS_VM_STEP] += (int)nVmStep;
- sqlite3VdbeLeave(p);
+ if( DbMaskNonZero(p->lockMask) ){
+ sqlite3VdbeLeave(p);
+ }
assert( rc!=SQLITE_OK || nExtraDelete==0
|| sqlite3_strlike("DELETE%",p->zSql,0)!=0
);
@@ -101943,6 +102504,9 @@ static int bytecodevtabConnect(
");"
};
+ (void)argc;
+ (void)argv;
+ (void)pzErr;
rc = sqlite3_declare_vtab(db, azSchema[isTabUsed]);
if( rc==SQLITE_OK ){
pNew = sqlite3_malloc( sizeof(*pNew) );
@@ -102178,6 +102742,7 @@ static int bytecodevtabFilter(
bytecodevtab_cursor *pCur = (bytecodevtab_cursor *)pVtabCursor;
bytecodevtab *pVTab = (bytecodevtab *)pVtabCursor->pVtab;
int rc = SQLITE_OK;
+ (void)idxStr;
bytecodevtabCursorClear(pCur);
pCur->iRowid = 0;
@@ -103193,6 +103758,32 @@ static void extendFJMatch(
}
/*
+** Return TRUE (non-zero) if zTab is a valid name for the schema table pTab.
+*/
+static SQLITE_NOINLINE int isValidSchemaTableName(
+ const char *zTab, /* Name as it appears in the SQL */
+ Table *pTab, /* The schema table we are trying to match */
+ Schema *pSchema /* non-NULL if a database qualifier is present */
+){
+ const char *zLegacy;
+ assert( pTab!=0 );
+ assert( pTab->tnum==1 );
+ if( sqlite3StrNICmp(zTab, "sqlite_", 7)!=0 ) return 0;
+ zLegacy = pTab->zName;
+ if( strcmp(zLegacy+7, &LEGACY_TEMP_SCHEMA_TABLE[7])==0 ){
+ if( sqlite3StrICmp(zTab+7, &PREFERRED_TEMP_SCHEMA_TABLE[7])==0 ){
+ return 1;
+ }
+ if( pSchema==0 ) return 0;
+ if( sqlite3StrICmp(zTab+7, &LEGACY_SCHEMA_TABLE[7])==0 ) return 1;
+ if( sqlite3StrICmp(zTab+7, &PREFERRED_SCHEMA_TABLE[7])==0 ) return 1;
+ }else{
+ if( sqlite3StrICmp(zTab+7, &PREFERRED_SCHEMA_TABLE[7])==0 ) return 1;
+ }
+ return 0;
+}
+
+/*
** Given the name of a column of the form X.Y.Z or Y.Z or just Z, look up
** that name in the set of source tables in pSrcList and make the pExpr
** expression node refer back to that source column. The following changes
@@ -103345,15 +103936,17 @@ static int lookupName(
}
assert( zDb==0 || zTab!=0 );
if( zTab ){
- const char *zTabName;
if( zDb ){
if( pTab->pSchema!=pSchema ) continue;
if( pSchema==0 && strcmp(zDb,"*")!=0 ) continue;
}
- zTabName = pItem->zAlias ? pItem->zAlias : pTab->zName;
- assert( zTabName!=0 );
- if( sqlite3StrICmp(zTabName, zTab)!=0 ){
- continue;
+ if( pItem->zAlias!=0 ){
+ if( sqlite3StrICmp(zTab, pItem->zAlias)!=0 ){
+ continue;
+ }
+ }else if( sqlite3StrICmp(zTab, pTab->zName)!=0 ){
+ if( pTab->tnum!=1 ) continue;
+ if( !isValidSchemaTableName(zTab, pTab, pSchema) ) continue;
}
assert( ExprUseYTab(pExpr) );
if( IN_RENAME_OBJECT && pItem->zAlias ){
@@ -103496,6 +104089,7 @@ static int lookupName(
if( pParse->bReturning ){
eNewExprOp = TK_REGISTER;
pExpr->op2 = TK_COLUMN;
+ pExpr->iColumn = iCol;
pExpr->iTable = pNC->uNC.iBaseReg + (pTab->nCol+1)*pExpr->iTable +
sqlite3TableColumnToStorage(pTab, iCol) + 1;
}else{
@@ -103908,14 +104502,10 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
if( 0==sqlite3ExprCanBeNull(pExpr->pLeft) && !IN_RENAME_OBJECT ){
testcase( ExprHasProperty(pExpr, EP_OuterON) );
assert( !ExprHasProperty(pExpr, EP_IntValue) );
- if( pExpr->op==TK_NOTNULL ){
- pExpr->u.zToken = "true";
- ExprSetProperty(pExpr, EP_IsTrue);
- }else{
- pExpr->u.zToken = "false";
- ExprSetProperty(pExpr, EP_IsFalse);
- }
- pExpr->op = TK_TRUEFALSE;
+ pExpr->u.iValue = (pExpr->op==TK_NOTNULL);
+ pExpr->flags |= EP_IntValue;
+ pExpr->op = TK_INTEGER;
+
for(i=0, p=pNC; p && i<ArraySize(anRef); p=p->pNext, i++){
p->nRef = anRef[i];
}
@@ -104217,8 +104807,8 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
assert( pNC->nRef>=nRef );
if( nRef!=pNC->nRef ){
ExprSetProperty(pExpr, EP_VarSelect);
- pNC->ncFlags |= NC_VarSelect;
}
+ pNC->ncFlags |= NC_Subquery;
}
break;
}
@@ -105172,49 +105762,122 @@ SQLITE_PRIVATE char sqlite3TableColumnAffinity(const Table *pTab, int iCol){
*/
SQLITE_PRIVATE char sqlite3ExprAffinity(const Expr *pExpr){
int op;
- while( ExprHasProperty(pExpr, EP_Skip|EP_IfNullRow) ){
- assert( pExpr->op==TK_COLLATE
- || pExpr->op==TK_IF_NULL_ROW
- || (pExpr->op==TK_REGISTER && pExpr->op2==TK_IF_NULL_ROW) );
- pExpr = pExpr->pLeft;
- assert( pExpr!=0 );
- }
op = pExpr->op;
- if( op==TK_REGISTER ) op = pExpr->op2;
- if( op==TK_COLUMN || op==TK_AGG_COLUMN ){
- assert( ExprUseYTab(pExpr) );
- assert( pExpr->y.pTab!=0 );
- return sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn);
- }
- if( op==TK_SELECT ){
- assert( ExprUseXSelect(pExpr) );
- assert( pExpr->x.pSelect!=0 );
- assert( pExpr->x.pSelect->pEList!=0 );
- assert( pExpr->x.pSelect->pEList->a[0].pExpr!=0 );
- return sqlite3ExprAffinity(pExpr->x.pSelect->pEList->a[0].pExpr);
- }
+ while( 1 /* exit-by-break */ ){
+ if( op==TK_COLUMN || (op==TK_AGG_COLUMN && pExpr->y.pTab!=0) ){
+ assert( ExprUseYTab(pExpr) );
+ assert( pExpr->y.pTab!=0 );
+ return sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn);
+ }
+ if( op==TK_SELECT ){
+ assert( ExprUseXSelect(pExpr) );
+ assert( pExpr->x.pSelect!=0 );
+ assert( pExpr->x.pSelect->pEList!=0 );
+ assert( pExpr->x.pSelect->pEList->a[0].pExpr!=0 );
+ return sqlite3ExprAffinity(pExpr->x.pSelect->pEList->a[0].pExpr);
+ }
#ifndef SQLITE_OMIT_CAST
- if( op==TK_CAST ){
- assert( !ExprHasProperty(pExpr, EP_IntValue) );
- return sqlite3AffinityType(pExpr->u.zToken, 0);
- }
+ if( op==TK_CAST ){
+ assert( !ExprHasProperty(pExpr, EP_IntValue) );
+ return sqlite3AffinityType(pExpr->u.zToken, 0);
+ }
#endif
- if( op==TK_SELECT_COLUMN ){
- assert( pExpr->pLeft!=0 && ExprUseXSelect(pExpr->pLeft) );
- assert( pExpr->iColumn < pExpr->iTable );
- assert( pExpr->iTable==pExpr->pLeft->x.pSelect->pEList->nExpr );
- return sqlite3ExprAffinity(
- pExpr->pLeft->x.pSelect->pEList->a[pExpr->iColumn].pExpr
- );
- }
- if( op==TK_VECTOR ){
- assert( ExprUseXList(pExpr) );
- return sqlite3ExprAffinity(pExpr->x.pList->a[0].pExpr);
+ if( op==TK_SELECT_COLUMN ){
+ assert( pExpr->pLeft!=0 && ExprUseXSelect(pExpr->pLeft) );
+ assert( pExpr->iColumn < pExpr->iTable );
+ assert( pExpr->iTable==pExpr->pLeft->x.pSelect->pEList->nExpr );
+ return sqlite3ExprAffinity(
+ pExpr->pLeft->x.pSelect->pEList->a[pExpr->iColumn].pExpr
+ );
+ }
+ if( op==TK_VECTOR ){
+ assert( ExprUseXList(pExpr) );
+ return sqlite3ExprAffinity(pExpr->x.pList->a[0].pExpr);
+ }
+ if( ExprHasProperty(pExpr, EP_Skip|EP_IfNullRow) ){
+ assert( pExpr->op==TK_COLLATE
+ || pExpr->op==TK_IF_NULL_ROW
+ || (pExpr->op==TK_REGISTER && pExpr->op2==TK_IF_NULL_ROW) );
+ pExpr = pExpr->pLeft;
+ op = pExpr->op;
+ continue;
+ }
+ if( op!=TK_REGISTER || (op = pExpr->op2)==TK_REGISTER ) break;
}
return pExpr->affExpr;
}
/*
+** Make a guess at all the possible datatypes of the result that could
+** be returned by an expression. Return a bitmask indicating the answer:
+**
+** 0x01 Numeric
+** 0x02 Text
+** 0x04 Blob
+**
+** If the expression must return NULL, then 0x00 is returned.
+*/
+SQLITE_PRIVATE int sqlite3ExprDataType(const Expr *pExpr){
+ while( pExpr ){
+ switch( pExpr->op ){
+ case TK_COLLATE:
+ case TK_IF_NULL_ROW:
+ case TK_UPLUS: {
+ pExpr = pExpr->pLeft;
+ break;
+ }
+ case TK_NULL: {
+ pExpr = 0;
+ break;
+ }
+ case TK_STRING: {
+ return 0x02;
+ }
+ case TK_BLOB: {
+ return 0x04;
+ }
+ case TK_CONCAT: {
+ return 0x06;
+ }
+ case TK_VARIABLE:
+ case TK_AGG_FUNCTION:
+ case TK_FUNCTION: {
+ return 0x07;
+ }
+ case TK_COLUMN:
+ case TK_AGG_COLUMN:
+ case TK_SELECT:
+ case TK_CAST:
+ case TK_SELECT_COLUMN:
+ case TK_VECTOR: {
+ int aff = sqlite3ExprAffinity(pExpr);
+ if( aff>=SQLITE_AFF_NUMERIC ) return 0x05;
+ if( aff==SQLITE_AFF_TEXT ) return 0x06;
+ return 0x07;
+ }
+ case TK_CASE: {
+ int res = 0;
+ int ii;
+ ExprList *pList = pExpr->x.pList;
+ assert( ExprUseXList(pExpr) && pList!=0 );
+ assert( pList->nExpr > 0);
+ for(ii=1; ii<pList->nExpr; ii+=2){
+ res |= sqlite3ExprDataType(pList->a[ii].pExpr);
+ }
+ if( pList->nExpr % 2 ){
+ res |= sqlite3ExprDataType(pList->a[pList->nExpr-1].pExpr);
+ }
+ return res;
+ }
+ default: {
+ return 0x01;
+ }
+ } /* End of switch(op) */
+ } /* End of while(pExpr) */
+ return 0x00;
+}
+
+/*
** Set the collating sequence for expression pExpr to be the collating
** sequence named by pToken. Return a pointer to a new Expr node that
** implements the COLLATE operator.
@@ -105301,7 +105964,9 @@ SQLITE_PRIVATE CollSeq *sqlite3ExprCollSeq(Parse *pParse, const Expr *pExpr){
while( p ){
int op = p->op;
if( op==TK_REGISTER ) op = p->op2;
- if( op==TK_AGG_COLUMN || op==TK_COLUMN || op==TK_TRIGGER ){
+ if( (op==TK_AGG_COLUMN && p->y.pTab!=0)
+ || op==TK_COLUMN || op==TK_TRIGGER
+ ){
int j;
assert( ExprUseYTab(p) );
assert( p->y.pTab!=0 );
@@ -108383,6 +109048,9 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){
SelectDest dest; /* How to deal with SELECT result */
int nReg; /* Registers to allocate */
Expr *pLimit; /* New limit expression */
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ int addrExplain; /* Address of OP_Explain instruction */
+#endif
Vdbe *v = pParse->pVdbe;
assert( v!=0 );
@@ -108435,8 +109103,9 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){
** In both cases, the query is augmented with "LIMIT 1". Any
** preexisting limit is discarded in place of the new LIMIT 1.
*/
- ExplainQueryPlan((pParse, 1, "%sSCALAR SUBQUERY %d",
+ ExplainQueryPlan2(addrExplain, (pParse, 1, "%sSCALAR SUBQUERY %d",
addrOnce?"":"CORRELATED ", pSel->selId));
+ sqlite3VdbeScanStatusCounters(v, addrExplain, addrExplain, -1);
nReg = pExpr->op==TK_SELECT ? pSel->pEList->nExpr : 1;
sqlite3SelectDestInit(&dest, 0, pParse->nMem+1);
pParse->nMem += nReg;
@@ -108479,6 +109148,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(Parse *pParse, Expr *pExpr){
if( addrOnce ){
sqlite3VdbeJumpHere(v, addrOnce);
}
+ sqlite3VdbeScanStatusRange(v, addrExplain, addrExplain, -1);
/* Subroutine return */
assert( ExprUseYSub(pExpr) );
@@ -108887,6 +109557,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeGeneratedColumn(
){
int iAddr;
Vdbe *v = pParse->pVdbe;
+ int nErr = pParse->nErr;
assert( v!=0 );
assert( pParse->iSelfTab!=0 );
if( pParse->iSelfTab>0 ){
@@ -108899,6 +109570,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeGeneratedColumn(
sqlite3VdbeAddOp4(v, OP_Affinity, regOut, 1, 0, &pCol->affinity, 1);
}
if( iAddr ) sqlite3VdbeJumpHere(v, iAddr);
+ if( pParse->nErr>nErr ) pParse->db->errByteOffset = -1;
}
#endif /* SQLITE_OMIT_GENERATED_COLUMNS */
@@ -108915,6 +109587,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeGetColumnOfTable(
Column *pCol;
assert( v!=0 );
assert( pTab!=0 );
+ assert( iCol!=XN_EXPR );
if( iCol<0 || iCol==pTab->iPKey ){
sqlite3VdbeAddOp2(v, OP_Rowid, iTabCur, regOut);
VdbeComment((v, "%s.rowid", pTab->zName));
@@ -109151,10 +109824,13 @@ static int exprCodeInlineFunction(
** the type affinity of the argument. This is used for testing of
** the SQLite type logic.
*/
- const char *azAff[] = { "blob", "text", "numeric", "integer", "real" };
+ const char *azAff[] = { "blob", "text", "numeric", "integer",
+ "real", "flexnum" };
char aff;
assert( nFarg==1 );
aff = sqlite3ExprAffinity(pFarg->a[0].pExpr);
+ assert( aff<=SQLITE_AFF_NONE
+ || (aff>=SQLITE_AFF_BLOB && aff<=SQLITE_AFF_FLEXNUM) );
sqlite3VdbeLoadString(v, target,
(aff<=SQLITE_AFF_NONE) ? "none" : azAff[aff-SQLITE_AFF_BLOB]);
break;
@@ -109165,7 +109841,7 @@ static int exprCodeInlineFunction(
}
/*
-** Check to see if pExpr is one of the indexed expressions on pParse->pIdxExpr.
+** Check to see if pExpr is one of the indexed expressions on pParse->pIdxEpr.
** If it is, then resolve the expression by reading from the index and
** return the register into which the value has been read. If pExpr is
** not an indexed expression, then return negative.
@@ -109177,7 +109853,8 @@ static SQLITE_NOINLINE int sqlite3IndexedExprLookup(
){
IndexedExpr *p;
Vdbe *v;
- for(p=pParse->pIdxExpr; p; p=p->pIENext){
+ for(p=pParse->pIdxEpr; p; p=p->pIENext){
+ u8 exprAff;
int iDataCur = p->iDataCur;
if( iDataCur<0 ) continue;
if( pParse->iSelfTab ){
@@ -109185,6 +109862,16 @@ static SQLITE_NOINLINE int sqlite3IndexedExprLookup(
iDataCur = -1;
}
if( sqlite3ExprCompare(0, pExpr, p->pExpr, iDataCur)!=0 ) continue;
+ assert( p->aff>=SQLITE_AFF_BLOB && p->aff<=SQLITE_AFF_NUMERIC );
+ exprAff = sqlite3ExprAffinity(pExpr);
+ if( (exprAff<=SQLITE_AFF_BLOB && p->aff!=SQLITE_AFF_BLOB)
+ || (exprAff==SQLITE_AFF_TEXT && p->aff!=SQLITE_AFF_TEXT)
+ || (exprAff>=SQLITE_AFF_NUMERIC && p->aff!=SQLITE_AFF_NUMERIC)
+ ){
+ /* Affinity mismatch on a generated column */
+ continue;
+ }
+
v = pParse->pVdbe;
assert( v!=0 );
if( p->bMaybeNullRow ){
@@ -109197,10 +109884,10 @@ static SQLITE_NOINLINE int sqlite3IndexedExprLookup(
sqlite3VdbeAddOp3(v, OP_Column, p->iIdxCur, p->iIdxCol, target);
VdbeComment((v, "%s expr-column %d", p->zIdxName, p->iIdxCol));
sqlite3VdbeGoto(v, 0);
- p = pParse->pIdxExpr;
- pParse->pIdxExpr = 0;
+ p = pParse->pIdxEpr;
+ pParse->pIdxEpr = 0;
sqlite3ExprCode(pParse, pExpr, target);
- pParse->pIdxExpr = p;
+ pParse->pIdxEpr = p;
sqlite3VdbeJumpHere(v, addr+2);
}else{
sqlite3VdbeAddOp3(v, OP_Column, p->iIdxCur, p->iIdxCol, target);
@@ -109239,7 +109926,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
expr_code_doover:
if( pExpr==0 ){
op = TK_NULL;
- }else if( pParse->pIdxExpr!=0
+ }else if( pParse->pIdxEpr!=0
&& !ExprHasProperty(pExpr, EP_Leaf)
&& (r1 = sqlite3IndexedExprLookup(pParse, pExpr, target))>=0
){
@@ -109256,15 +109943,16 @@ expr_code_doover:
assert( pExpr->iAgg>=0 && pExpr->iAgg<pAggInfo->nColumn );
pCol = &pAggInfo->aCol[pExpr->iAgg];
if( !pAggInfo->directMode ){
- assert( pCol->iMem>0 );
- return pCol->iMem;
+ return AggInfoColumnReg(pAggInfo, pExpr->iAgg);
}else if( pAggInfo->useSortingIdx ){
Table *pTab = pCol->pTab;
sqlite3VdbeAddOp3(v, OP_Column, pAggInfo->sortingIdxPTab,
pCol->iSorterColumn, target);
- if( pCol->iColumn<0 ){
+ if( pTab==0 ){
+ /* No comment added */
+ }else if( pCol->iColumn<0 ){
VdbeComment((v,"%s.rowid",pTab->zName));
- }else if( ALWAYS(pTab!=0) ){
+ }else{
VdbeComment((v,"%s.%s",
pTab->zName, pTab->aCol[pCol->iColumn].zCnName));
if( pTab->aCol[pCol->iColumn].affinity==SQLITE_AFF_REAL ){
@@ -109272,6 +109960,11 @@ expr_code_doover:
}
}
return target;
+ }else if( pExpr->y.pTab==0 ){
+ /* This case happens when the argument to an aggregate function
+ ** is rewritten by aggregateConvertIndexedExprRefToColumn() */
+ sqlite3VdbeAddOp3(v, OP_Column, pExpr->iTable, pExpr->iColumn, target);
+ return target;
}
/* Otherwise, fall thru into the TK_COLUMN case */
/* no break */ deliberate_fall_through
@@ -109292,7 +109985,7 @@ expr_code_doover:
assert( pExpr->y.pTab!=0 );
aff = sqlite3TableColumnAffinity(pExpr->y.pTab, pExpr->iColumn);
if( aff>SQLITE_AFF_BLOB ){
- static const char zAff[] = "B\000C\000D\000E";
+ static const char zAff[] = "B\000C\000D\000E\000F";
assert( SQLITE_AFF_BLOB=='A' );
assert( SQLITE_AFF_TEXT=='B' );
sqlite3VdbeAddOp4(v, OP_Affinity, iReg, 1, 0,
@@ -109569,7 +110262,7 @@ expr_code_doover:
assert( !ExprHasProperty(pExpr, EP_IntValue) );
sqlite3ErrorMsg(pParse, "misuse of aggregate: %#T()", pExpr);
}else{
- return pInfo->aFunc[pExpr->iAgg].iMem;
+ return AggInfoFuncReg(pInfo, pExpr->iAgg);
}
break;
}
@@ -109758,10 +110451,13 @@ expr_code_doover:
return target;
}
case TK_COLLATE: {
- if( !ExprHasProperty(pExpr, EP_Collate)
- && ALWAYS(pExpr->pLeft)
- && pExpr->pLeft->op==TK_FUNCTION
- ){
+ if( !ExprHasProperty(pExpr, EP_Collate) ){
+ /* A TK_COLLATE Expr node without the EP_Collate tag is a so-called
+ ** "SOFT-COLLATE" that is added to constraints that are pushed down
+ ** from outer queries into sub-queries by the push-down optimization.
+ ** Clear subtypes as subtypes may not cross a subquery boundary.
+ */
+ assert( pExpr->pLeft );
inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target);
if( inReg!=target ){
sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target);
@@ -109858,7 +110554,7 @@ expr_code_doover:
if( pAggInfo ){
assert( pExpr->iAgg>=0 && pExpr->iAgg<pAggInfo->nColumn );
if( !pAggInfo->directMode ){
- inReg = pAggInfo->aCol[pExpr->iAgg].iMem;
+ inReg = AggInfoColumnReg(pAggInfo, pExpr->iAgg);
break;
}
if( pExpr->pAggInfo->useSortingIdx ){
@@ -109869,16 +110565,22 @@ expr_code_doover:
break;
}
}
- addrINR = sqlite3VdbeAddOp1(v, OP_IfNullRow, pExpr->iTable);
- /* Temporarily disable factoring of constant expressions, since
- ** even though expressions may appear to be constant, they are not
- ** really constant because they originate from the right-hand side
- ** of a LEFT JOIN. */
- pParse->okConstFactor = 0;
+ addrINR = sqlite3VdbeAddOp3(v, OP_IfNullRow, pExpr->iTable, 0, target);
+ /* The OP_IfNullRow opcode above can overwrite the result register with
+ ** NULL. So we have to ensure that the result register is not a value
+ ** that is suppose to be a constant. Two defenses are needed:
+ ** (1) Temporarily disable factoring of constant expressions
+ ** (2) Make sure the computed value really is stored in register
+ ** "target" and not someplace else.
+ */
+ pParse->okConstFactor = 0; /* note (1) above */
inReg = sqlite3ExprCodeTarget(pParse, pExpr->pLeft, target);
pParse->okConstFactor = okConstFactor;
+ if( inReg!=target ){ /* note (2) above */
+ sqlite3VdbeAddOp2(v, OP_SCopy, inReg, target);
+ inReg = target;
+ }
sqlite3VdbeJumpHere(v, addrINR);
- sqlite3VdbeChangeP3(v, addrINR, inReg);
break;
}
@@ -111289,10 +111991,8 @@ SQLITE_PRIVATE int sqlite3ReferencesSrcList(Parse *pParse, Expr *pExpr, SrcList
** it does, make a copy. This is done because the pExpr argument is
** subject to change.
**
-** The copy is stored on pParse->pConstExpr with a register number of 0.
-** This will cause the expression to be deleted automatically when the
-** Parse object is destroyed, but the zero register number means that it
-** will not generate any code in the preamble.
+** The copy is scheduled for deletion using the sqlite3ExprDeferredDelete()
+** which builds on the sqlite3ParserAddCleanup() mechanism.
*/
static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){
if( ALWAYS(!ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced))
@@ -111303,7 +112003,6 @@ static int agginfoPersistExprCb(Walker *pWalker, Expr *pExpr){
Parse *pParse = pWalker->pParse;
sqlite3 *db = pParse->db;
if( pExpr->op!=TK_AGG_FUNCTION ){
- assert( pExpr->op==TK_AGG_COLUMN || pExpr->op==TK_IF_NULL_ROW );
assert( iAgg>=0 && iAgg<pAggInfo->nColumn );
if( pAggInfo->aCol[iAgg].pCExpr==pExpr ){
pExpr = sqlite3ExprDup(db, pExpr, 0);
@@ -111371,6 +112070,73 @@ static int addAggInfoFunc(sqlite3 *db, AggInfo *pInfo){
}
/*
+** Search the AggInfo object for an aCol[] entry that has iTable and iColumn.
+** Return the index in aCol[] of the entry that describes that column.
+**
+** If no prior entry is found, create a new one and return -1. The
+** new column will have an idex of pAggInfo->nColumn-1.
+*/
+static void findOrCreateAggInfoColumn(
+ Parse *pParse, /* Parsing context */
+ AggInfo *pAggInfo, /* The AggInfo object to search and/or modify */
+ Expr *pExpr /* Expr describing the column to find or insert */
+){
+ struct AggInfo_col *pCol;
+ int k;
+
+ assert( pAggInfo->iFirstReg==0 );
+ pCol = pAggInfo->aCol;
+ for(k=0; k<pAggInfo->nColumn; k++, pCol++){
+ if( pCol->iTable==pExpr->iTable
+ && pCol->iColumn==pExpr->iColumn
+ && pExpr->op!=TK_IF_NULL_ROW
+ ){
+ goto fix_up_expr;
+ }
+ }
+ k = addAggInfoColumn(pParse->db, pAggInfo);
+ if( k<0 ){
+ /* OOM on resize */
+ assert( pParse->db->mallocFailed );
+ return;
+ }
+ pCol = &pAggInfo->aCol[k];
+ assert( ExprUseYTab(pExpr) );
+ pCol->pTab = pExpr->y.pTab;
+ pCol->iTable = pExpr->iTable;
+ pCol->iColumn = pExpr->iColumn;
+ pCol->iSorterColumn = -1;
+ pCol->pCExpr = pExpr;
+ if( pAggInfo->pGroupBy && pExpr->op!=TK_IF_NULL_ROW ){
+ int j, n;
+ ExprList *pGB = pAggInfo->pGroupBy;
+ struct ExprList_item *pTerm = pGB->a;
+ n = pGB->nExpr;
+ for(j=0; j<n; j++, pTerm++){
+ Expr *pE = pTerm->pExpr;
+ if( pE->op==TK_COLUMN
+ && pE->iTable==pExpr->iTable
+ && pE->iColumn==pExpr->iColumn
+ ){
+ pCol->iSorterColumn = j;
+ break;
+ }
+ }
+ }
+ if( pCol->iSorterColumn<0 ){
+ pCol->iSorterColumn = pAggInfo->nSortingColumn++;
+ }
+fix_up_expr:
+ ExprSetVVAProperty(pExpr, EP_NoReduce);
+ assert( pExpr->pAggInfo==0 || pExpr->pAggInfo==pAggInfo );
+ pExpr->pAggInfo = pAggInfo;
+ if( pExpr->op==TK_COLUMN ){
+ pExpr->op = TK_AGG_COLUMN;
+ }
+ pExpr->iAgg = (i16)k;
+}
+
+/*
** This is the xExprCallback for a tree walker. It is used to
** implement sqlite3ExprAnalyzeAggregates(). See sqlite3ExprAnalyzeAggregates
** for additional information.
@@ -111383,7 +112149,37 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){
AggInfo *pAggInfo = pNC->uNC.pAggInfo;
assert( pNC->ncFlags & NC_UAggInfo );
+ assert( pAggInfo->iFirstReg==0 );
switch( pExpr->op ){
+ default: {
+ IndexedExpr *pIEpr;
+ Expr tmp;
+ assert( pParse->iSelfTab==0 );
+ if( (pNC->ncFlags & NC_InAggFunc)==0 ) break;
+ if( pParse->pIdxEpr==0 ) break;
+ for(pIEpr=pParse->pIdxEpr; pIEpr; pIEpr=pIEpr->pIENext){
+ int iDataCur = pIEpr->iDataCur;
+ if( iDataCur<0 ) continue;
+ if( sqlite3ExprCompare(0, pExpr, pIEpr->pExpr, iDataCur)==0 ) break;
+ }
+ if( pIEpr==0 ) break;
+ if( NEVER(!ExprUseYTab(pExpr)) ) break;
+ if( pExpr->pAggInfo!=0 ) break; /* Already resolved by outer context */
+
+ /* If we reach this point, it means that expression pExpr can be
+ ** translated into a reference to an index column as described by
+ ** pIEpr.
+ */
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.op = TK_AGG_COLUMN;
+ tmp.iTable = pIEpr->iIdxCur;
+ tmp.iColumn = pIEpr->iIdxCol;
+ findOrCreateAggInfoColumn(pParse, pAggInfo, &tmp);
+ pAggInfo->aCol[tmp.iAgg].pCExpr = pExpr;
+ pExpr->pAggInfo = pAggInfo;
+ pExpr->iAgg = tmp.iAgg;
+ return WRC_Prune;
+ }
case TK_IF_NULL_ROW:
case TK_AGG_COLUMN:
case TK_COLUMN: {
@@ -111395,67 +112191,9 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){
if( ALWAYS(pSrcList!=0) ){
SrcItem *pItem = pSrcList->a;
for(i=0; i<pSrcList->nSrc; i++, pItem++){
- struct AggInfo_col *pCol;
assert( !ExprHasProperty(pExpr, EP_TokenOnly|EP_Reduced) );
if( pExpr->iTable==pItem->iCursor ){
- /* If we reach this point, it means that pExpr refers to a table
- ** that is in the FROM clause of the aggregate query.
- **
- ** Make an entry for the column in pAggInfo->aCol[] if there
- ** is not an entry there already.
- */
- int k;
- pCol = pAggInfo->aCol;
- for(k=0; k<pAggInfo->nColumn; k++, pCol++){
- if( pCol->iTable==pExpr->iTable
- && pCol->iColumn==pExpr->iColumn
- && pExpr->op!=TK_IF_NULL_ROW
- ){
- break;
- }
- }
- if( (k>=pAggInfo->nColumn)
- && (k = addAggInfoColumn(pParse->db, pAggInfo))>=0
- ){
- pCol = &pAggInfo->aCol[k];
- assert( ExprUseYTab(pExpr) );
- pCol->pTab = pExpr->y.pTab;
- pCol->iTable = pExpr->iTable;
- pCol->iColumn = pExpr->iColumn;
- pCol->iMem = ++pParse->nMem;
- pCol->iSorterColumn = -1;
- pCol->pCExpr = pExpr;
- if( pAggInfo->pGroupBy && pExpr->op!=TK_IF_NULL_ROW ){
- int j, n;
- ExprList *pGB = pAggInfo->pGroupBy;
- struct ExprList_item *pTerm = pGB->a;
- n = pGB->nExpr;
- for(j=0; j<n; j++, pTerm++){
- Expr *pE = pTerm->pExpr;
- if( pE->op==TK_COLUMN
- && pE->iTable==pExpr->iTable
- && pE->iColumn==pExpr->iColumn
- ){
- pCol->iSorterColumn = j;
- break;
- }
- }
- }
- if( pCol->iSorterColumn<0 ){
- pCol->iSorterColumn = pAggInfo->nSortingColumn++;
- }
- }
- /* There is now an entry for pExpr in pAggInfo->aCol[] (either
- ** because it was there before or because we just created it).
- ** Convert the pExpr to be a TK_AGG_COLUMN referring to that
- ** pAggInfo->aCol[] entry.
- */
- ExprSetVVAProperty(pExpr, EP_NoReduce);
- pExpr->pAggInfo = pAggInfo;
- if( pExpr->op==TK_COLUMN ){
- pExpr->op = TK_AGG_COLUMN;
- }
- pExpr->iAgg = (i16)k;
+ findOrCreateAggInfoColumn(pParse, pAggInfo, pExpr);
break;
} /* endif pExpr->iTable==pItem->iCursor */
} /* end loop over pSrcList */
@@ -111485,7 +112223,6 @@ static int analyzeAggregate(Walker *pWalker, Expr *pExpr){
assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
pItem = &pAggInfo->aFunc[i];
pItem->pFExpr = pExpr;
- pItem->iMem = ++pParse->nMem;
assert( ExprUseUToken(pExpr) );
pItem->pFunc = sqlite3FindFunction(pParse->db,
pExpr->u.zToken,
@@ -112382,13 +113119,14 @@ static void renameTokenCheckAll(Parse *pParse, const void *pPtr){
assert( pParse->db->mallocFailed==0 || pParse->nErr!=0 );
if( pParse->nErr==0 ){
const RenameToken *p;
- u8 i = 0;
+ u32 i = 1;
for(p=pParse->pRename; p; p=p->pNext){
if( p->p ){
assert( p->p!=pPtr );
- i += *(u8*)(p->p);
+ i += *(u8*)(p->p) | 1;
}
}
+ assert( i>0 );
}
}
#else
@@ -115518,6 +116256,8 @@ static int analysisLoader(void *pData, int argc, char **argv, char **NotUsed){
** and its contents.
*/
SQLITE_PRIVATE void sqlite3DeleteIndexSamples(sqlite3 *db, Index *pIdx){
+ assert( db!=0 );
+ assert( pIdx!=0 );
#ifdef SQLITE_ENABLE_STAT4
if( pIdx->aSample ){
int j;
@@ -115527,7 +116267,7 @@ SQLITE_PRIVATE void sqlite3DeleteIndexSamples(sqlite3 *db, Index *pIdx){
}
sqlite3DbFree(db, pIdx->aSample);
}
- if( db && db->pnBytesFreed==0 ){
+ if( db->pnBytesFreed==0 ){
pIdx->nSample = 0;
pIdx->aSample = 0;
}
@@ -115946,7 +116686,7 @@ static void attachFunc(
char *zErr = 0;
unsigned int flags;
Db *aNew; /* New array of Db pointers */
- Db *pNew; /* Db object for the newly attached database */
+ Db *pNew = 0; /* Db object for the newly attached database */
char *zErrDyn = 0;
sqlite3_vfs *pVfs;
@@ -115966,13 +116706,26 @@ static void attachFunc(
/* This is not a real ATTACH. Instead, this routine is being called
** from sqlite3_deserialize() to close database db->init.iDb and
** reopen it as a MemDB */
+ Btree *pNewBt = 0;
pVfs = sqlite3_vfs_find("memdb");
if( pVfs==0 ) return;
- pNew = &db->aDb[db->init.iDb];
- if( pNew->pBt ) sqlite3BtreeClose(pNew->pBt);
- pNew->pBt = 0;
- pNew->pSchema = 0;
- rc = sqlite3BtreeOpen(pVfs, "x\0", db, &pNew->pBt, 0, SQLITE_OPEN_MAIN_DB);
+ rc = sqlite3BtreeOpen(pVfs, "x\0", db, &pNewBt, 0, SQLITE_OPEN_MAIN_DB);
+ if( rc==SQLITE_OK ){
+ Schema *pNewSchema = sqlite3SchemaGet(db, pNewBt);
+ if( pNewSchema ){
+ /* Both the Btree and the new Schema were allocated successfully.
+ ** Close the old db and update the aDb[] slot with the new memdb
+ ** values. */
+ pNew = &db->aDb[db->init.iDb];
+ if( ALWAYS(pNew->pBt) ) sqlite3BtreeClose(pNew->pBt);
+ pNew->pBt = pNewBt;
+ pNew->pSchema = pNewSchema;
+ }else{
+ sqlite3BtreeClose(pNewBt);
+ rc = SQLITE_NOMEM;
+ }
+ }
+ if( rc ) goto attach_error;
}else{
/* This is a real ATTACH
**
@@ -116085,7 +116838,7 @@ static void attachFunc(
}
#endif
if( rc ){
- if( !REOPEN_AS_MEMDB(db) ){
+ if( ALWAYS(!REOPEN_AS_MEMDB(db)) ){
int iDb = db->nDb - 1;
assert( iDb>=2 );
if( db->aDb[iDb].pBt ){
@@ -116202,6 +116955,8 @@ static void codeAttach(
sqlite3* db = pParse->db;
int regArgs;
+ if( SQLITE_OK!=sqlite3ReadSchema(pParse) ) goto attach_end;
+
if( pParse->nErr ) goto attach_end;
memset(&sName, 0, sizeof(NameContext));
sName.pParse = pParse;
@@ -117044,6 +117799,7 @@ SQLITE_PRIVATE void sqlite3NestedParse(Parse *pParse, const char *zFormat, ...){
char saveBuf[PARSE_TAIL_SZ];
if( pParse->nErr ) return;
+ if( pParse->eParseMode ) return;
assert( pParse->nested<10 ); /* Nesting should only be of limited depth */
va_start(ap, zFormat);
zSql = sqlite3VMPrintf(db, zFormat, ap);
@@ -118187,7 +118943,7 @@ SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){
if( pParse->pNewTrigger ){
sqlite3ErrorMsg(pParse, "cannot use RETURNING in a trigger");
}else{
- assert( pParse->bReturning==0 );
+ assert( pParse->bReturning==0 || pParse->ifNotExists );
}
pParse->bReturning = 1;
pRet = sqlite3DbMallocZero(db, sizeof(*pRet));
@@ -118213,7 +118969,8 @@ SQLITE_PRIVATE void sqlite3AddReturning(Parse *pParse, ExprList *pList){
pRet->retTStep.pTrig = &pRet->retTrig;
pRet->retTStep.pExprList = pList;
pHash = &(db->aDb[1].pSchema->trigHash);
- assert( sqlite3HashFind(pHash, RETURNING_TRIGGER_NAME)==0 || pParse->nErr );
+ assert( sqlite3HashFind(pHash, RETURNING_TRIGGER_NAME)==0
+ || pParse->nErr || pParse->ifNotExists );
if( sqlite3HashInsert(pHash, RETURNING_TRIGGER_NAME, &pRet->retTrig)
==&pRet->retTrig ){
sqlite3OomFault(db);
@@ -118741,6 +119498,14 @@ SQLITE_PRIVATE void sqlite3AddGenerated(Parse *pParse, Expr *pExpr, Token *pType
if( pCol->colFlags & COLFLAG_PRIMKEY ){
makeColumnPartOfPrimaryKey(pParse, pCol); /* For the error message */
}
+ if( ALWAYS(pExpr) && pExpr->op==TK_ID ){
+ /* The value of a generated column needs to be a real expression, not
+ ** just a reference to another column, in order for covering index
+ ** optimizations to work correctly. So if the value is not an expression,
+ ** turn it into one by adding a unary "+" operator. */
+ pExpr = sqlite3PExpr(pParse, TK_UPLUS, pExpr, 0);
+ }
+ if( pExpr && pExpr->op!=TK_RAISE ) pExpr->affExpr = pCol->affinity;
sqlite3ColumnSetExpr(pParse, pTab, pCol, pExpr);
pExpr = 0;
goto generated_done;
@@ -118877,7 +119642,8 @@ static char *createTableStmt(sqlite3 *db, Table *p){
/* SQLITE_AFF_TEXT */ " TEXT",
/* SQLITE_AFF_NUMERIC */ " NUM",
/* SQLITE_AFF_INTEGER */ " INT",
- /* SQLITE_AFF_REAL */ " REAL"
+ /* SQLITE_AFF_REAL */ " REAL",
+ /* SQLITE_AFF_FLEXNUM */ " NUM",
};
int len;
const char *zType;
@@ -118893,10 +119659,12 @@ static char *createTableStmt(sqlite3 *db, Table *p){
testcase( pCol->affinity==SQLITE_AFF_NUMERIC );
testcase( pCol->affinity==SQLITE_AFF_INTEGER );
testcase( pCol->affinity==SQLITE_AFF_REAL );
+ testcase( pCol->affinity==SQLITE_AFF_FLEXNUM );
zType = azType[pCol->affinity - SQLITE_AFF_BLOB];
len = sqlite3Strlen30(zType);
assert( pCol->affinity==SQLITE_AFF_BLOB
+ || pCol->affinity==SQLITE_AFF_FLEXNUM
|| pCol->affinity==sqlite3AffinityType(zType, 0) );
memcpy(&zStmt[k], zType, len);
k += len;
@@ -119311,6 +120079,7 @@ SQLITE_PRIVATE int sqlite3ShadowTableName(sqlite3 *db, const char *zName){
** not pass them into code generator routines by mistake.
*/
static int markImmutableExprStep(Walker *pWalker, Expr *pExpr){
+ (void)pWalker;
ExprSetVVAProperty(pExpr, EP_Immutable);
return WRC_Continue;
}
@@ -119877,8 +120646,7 @@ static SQLITE_NOINLINE int viewGetColumnNames(Parse *pParse, Table *pTable){
&& pTable->nCol==pSel->pEList->nExpr
){
assert( db->mallocFailed==0 );
- sqlite3SelectAddColumnTypeAndCollation(pParse, pTable, pSel,
- SQLITE_AFF_NONE);
+ sqlite3SubqueryColumnTypes(pParse, pTable, pSel, SQLITE_AFF_NONE);
}
}else{
/* CREATE VIEW name AS... without an argument list. Construct
@@ -123444,7 +124212,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
#endif /* SQLITE_OMIT_TRUNCATE_OPTIMIZATION */
{
u16 wcf = WHERE_ONEPASS_DESIRED|WHERE_DUPLICATES_OK;
- if( sNC.ncFlags & NC_VarSelect ) bComplex = 1;
+ if( sNC.ncFlags & NC_Subquery ) bComplex = 1;
wcf |= (bComplex ? 0 : WHERE_ONEPASS_MULTIROW);
if( HasRowid(pTab) ){
/* For a rowid table, initialize the RowSet to an empty set */
@@ -125062,7 +125830,7 @@ SQLITE_PRIVATE void sqlite3QuoteValue(StrAccum *pStr, sqlite3_value *pValue){
}
case SQLITE_BLOB: {
char const *zBlob = sqlite3_value_blob(pValue);
- int nBlob = sqlite3_value_bytes(pValue);
+ i64 nBlob = sqlite3_value_bytes(pValue);
assert( zBlob==sqlite3_value_blob(pValue) ); /* No encoding change */
sqlite3StrAccumEnlarge(pStr, nBlob*2 + 4);
if( pStr->accError==0 ){
@@ -125204,6 +125972,96 @@ static void hexFunc(
}
/*
+** Buffer zStr contains nStr bytes of utf-8 encoded text. Return 1 if zStr
+** contains character ch, or 0 if it does not.
+*/
+static int strContainsChar(const u8 *zStr, int nStr, u32 ch){
+ const u8 *zEnd = &zStr[nStr];
+ const u8 *z = zStr;
+ while( z<zEnd ){
+ u32 tst = Utf8Read(z);
+ if( tst==ch ) return 1;
+ }
+ return 0;
+}
+
+/*
+** The unhex() function. This function may be invoked with either one or
+** two arguments. In both cases the first argument is interpreted as text
+** a text value containing a set of pairs of hexadecimal digits which are
+** decoded and returned as a blob.
+**
+** If there is only a single argument, then it must consist only of an
+** even number of hexadeximal digits. Otherwise, return NULL.
+**
+** Or, if there is a second argument, then any character that appears in
+** the second argument is also allowed to appear between pairs of hexadecimal
+** digits in the first argument. If any other character appears in the
+** first argument, or if one of the allowed characters appears between
+** two hexadecimal digits that make up a single byte, NULL is returned.
+**
+** The following expressions are all true:
+**
+** unhex('ABCD') IS x'ABCD'
+** unhex('AB CD') IS NULL
+** unhex('AB CD', ' ') IS x'ABCD'
+** unhex('A BCD', ' ') IS NULL
+*/
+static void unhexFunc(
+ sqlite3_context *pCtx,
+ int argc,
+ sqlite3_value **argv
+){
+ const u8 *zPass = (const u8*)"";
+ int nPass = 0;
+ const u8 *zHex = sqlite3_value_text(argv[0]);
+ int nHex = sqlite3_value_bytes(argv[0]);
+#ifdef SQLITE_DEBUG
+ const u8 *zEnd = zHex ? &zHex[nHex] : 0;
+#endif
+ u8 *pBlob = 0;
+ u8 *p = 0;
+
+ assert( argc==1 || argc==2 );
+ if( argc==2 ){
+ zPass = sqlite3_value_text(argv[1]);
+ nPass = sqlite3_value_bytes(argv[1]);
+ }
+ if( !zHex || !zPass ) return;
+
+ p = pBlob = contextMalloc(pCtx, (nHex/2)+1);
+ if( pBlob ){
+ u8 c; /* Most significant digit of next byte */
+ u8 d; /* Least significant digit of next byte */
+
+ while( (c = *zHex)!=0x00 ){
+ while( !sqlite3Isxdigit(c) ){
+ u32 ch = Utf8Read(zHex);
+ assert( zHex<=zEnd );
+ if( !strContainsChar(zPass, nPass, ch) ) goto unhex_null;
+ c = *zHex;
+ if( c==0x00 ) goto unhex_done;
+ }
+ zHex++;
+ assert( *zEnd==0x00 );
+ assert( zHex<=zEnd );
+ d = *(zHex++);
+ if( !sqlite3Isxdigit(d) ) goto unhex_null;
+ *(p++) = (sqlite3HexToInt(c)<<4) | sqlite3HexToInt(d);
+ }
+ }
+
+ unhex_done:
+ sqlite3_result_blob(pCtx, pBlob, (p - pBlob), sqlite3_free);
+ return;
+
+ unhex_null:
+ sqlite3_free(pBlob);
+ return;
+}
+
+
+/*
** The zeroblob(N) function returns a zero-filled blob of size N bytes.
*/
static void zeroblobFunc(
@@ -125420,6 +126278,9 @@ static void unknownFunc(
sqlite3_value **argv
){
/* no-op */
+ (void)context;
+ (void)argc;
+ (void)argv;
}
#endif /*SQLITE_ENABLE_UNKNOWN_SQL_FUNCTION*/
@@ -126049,6 +126910,18 @@ static double xCeil(double x){ return ceil(x); }
static double xFloor(double x){ return floor(x); }
/*
+** Some systems do not have log2() and log10() in their standard math
+** libraries.
+*/
+#if defined(HAVE_LOG10) && HAVE_LOG10==0
+# define log10(X) (0.4342944819032517867*log(X))
+#endif
+#if defined(HAVE_LOG2) && HAVE_LOG2==0
+# define log2(X) (1.442695040888963456*log(X))
+#endif
+
+
+/*
** Implementation of SQL functions:
**
** ln(X) - natural logarithm
@@ -126086,17 +126959,15 @@ static void logFunc(
}
ans = log(x)/b;
}else{
- ans = log(x);
switch( SQLITE_PTR_TO_INT(sqlite3_user_data(context)) ){
case 1:
- /* Convert from natural logarithm to log base 10 */
- ans /= M_LN10;
+ ans = log10(x);
break;
case 2:
- /* Convert from natural logarithm to log base 2 */
- ans /= M_LN2;
+ ans = log2(x);
break;
default:
+ ans = log(x);
break;
}
}
@@ -126165,6 +127036,7 @@ static void piFunc(
sqlite3_value **argv
){
assert( argc==0 );
+ (void)argv;
sqlite3_result_double(context, M_PI);
}
@@ -126265,6 +127137,8 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){
FUNCTION(upper, 1, 0, 0, upperFunc ),
FUNCTION(lower, 1, 0, 0, lowerFunc ),
FUNCTION(hex, 1, 0, 0, hexFunc ),
+ FUNCTION(unhex, 1, 0, 0, unhexFunc ),
+ FUNCTION(unhex, 2, 0, 0, unhexFunc ),
INLINE_FUNC(ifnull, 2, INLINEFUNC_coalesce, 0 ),
VFUNCTION(random, 0, 0, 0, randomFunc ),
VFUNCTION(randomblob, 1, 0, 0, randomBlob ),
@@ -129649,6 +130523,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
case OE_Fail: {
char *zMsg = sqlite3MPrintf(db, "%s.%s", pTab->zName,
pCol->zCnName);
+ testcase( zMsg==0 && db->mallocFailed==0 );
sqlite3VdbeAddOp3(v, OP_HaltIfNull, SQLITE_CONSTRAINT_NOTNULL,
onError, iReg);
sqlite3VdbeAppendP4(v, zMsg, P4_DYNAMIC);
@@ -131540,6 +132415,8 @@ struct sqlite3_api_routines {
const char *(*db_name)(sqlite3*,int);
/* Version 3.40.0 and later */
int (*value_encoding)(sqlite3_value*);
+ /* Version 3.41.0 and later */
+ int (*is_interrupted)(sqlite3*);
};
/*
@@ -131866,6 +132743,8 @@ typedef int (*sqlite3_loadext_entry)(
#define sqlite3_db_name sqlite3_api->db_name
/* Version 3.40.0 and later */
#define sqlite3_value_encoding sqlite3_api->value_encoding
+/* Version 3.41.0 and later */
+#define sqlite3_is_interrupted sqlite3_api->is_interrupted
#endif /* !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION) */
#if !defined(SQLITE_CORE) && !defined(SQLITE_OMIT_LOAD_EXTENSION)
@@ -132380,7 +133259,9 @@ static const sqlite3_api_routines sqlite3Apis = {
#endif
sqlite3_db_name,
/* Version 3.40.0 and later */
- sqlite3_value_encoding
+ sqlite3_value_encoding,
+ /* Version 3.41.0 and later */
+ sqlite3_is_interrupted
};
/* True if x is the directory separator character
@@ -135217,12 +136098,21 @@ SQLITE_PRIVATE void sqlite3Pragma(
** will also prepopulate the cursor column cache that is used
** by the OP_IsType code, so it is a required step.
*/
- mxCol = pTab->nCol-1;
- while( mxCol>=0
- && ((pTab->aCol[mxCol].colFlags & COLFLAG_VIRTUAL)!=0
- || pTab->iPKey==mxCol) ) mxCol--;
+ assert( !IsVirtual(pTab) );
+ if( HasRowid(pTab) ){
+ mxCol = -1;
+ for(j=0; j<pTab->nCol; j++){
+ if( (pTab->aCol[j].colFlags & COLFLAG_VIRTUAL)==0 ) mxCol++;
+ }
+ if( mxCol==pTab->iPKey ) mxCol--;
+ }else{
+ /* COLFLAG_VIRTUAL columns are not included in the WITHOUT ROWID
+ ** PK index column-count, so there is no need to account for them
+ ** in this case. */
+ mxCol = sqlite3PrimaryKeyIndex(pTab)->nColumn-1;
+ }
if( mxCol>=0 ){
- sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, mxCol, 3);
+ sqlite3VdbeAddOp3(v, OP_Column, iDataCur, mxCol, 3);
sqlite3VdbeTypeofColumn(v, 3);
}
@@ -135390,7 +136280,8 @@ SQLITE_PRIVATE void sqlite3Pragma(
if( !isQuick ){ /* Omit the remaining tests for quick_check */
/* Validate index entries for the current row */
for(j=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, j++){
- int jmp2, jmp3, jmp4, jmp5;
+ int jmp2, jmp3, jmp4, jmp5, label6;
+ int kk;
int ckUniq = sqlite3VdbeMakeLabel(pParse);
if( pPk==pIdx ) continue;
r1 = sqlite3GenerateIndexKey(pParse, pIdx, iDataCur, 0, 0, &jmp3,
@@ -135408,13 +136299,49 @@ SQLITE_PRIVATE void sqlite3Pragma(
sqlite3VdbeAddOp3(v, OP_Concat, 4, 3, 3);
jmp4 = integrityCheckResultRow(v);
sqlite3VdbeJumpHere(v, jmp2);
+
+ /* The OP_IdxRowid opcode is an optimized version of OP_Column
+ ** that extracts the rowid off the end of the index record.
+ ** But it only works correctly if index record does not have
+ ** any extra bytes at the end. Verify that this is the case. */
+ if( HasRowid(pTab) ){
+ int jmp7;
+ sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur+j, 3);
+ jmp7 = sqlite3VdbeAddOp3(v, OP_Eq, 3, 0, r1+pIdx->nColumn-1);
+ VdbeCoverage(v);
+ sqlite3VdbeLoadString(v, 3,
+ "rowid not at end-of-record for row ");
+ sqlite3VdbeAddOp3(v, OP_Concat, 7, 3, 3);
+ sqlite3VdbeLoadString(v, 4, " of index ");
+ sqlite3VdbeGoto(v, jmp5-1);
+ sqlite3VdbeJumpHere(v, jmp7);
+ }
+
+ /* Any indexed columns with non-BINARY collations must still hold
+ ** the exact same text value as the table. */
+ label6 = 0;
+ for(kk=0; kk<pIdx->nKeyCol; kk++){
+ if( pIdx->azColl[kk]==sqlite3StrBINARY ) continue;
+ if( label6==0 ) label6 = sqlite3VdbeMakeLabel(pParse);
+ sqlite3VdbeAddOp3(v, OP_Column, iIdxCur+j, kk, 3);
+ sqlite3VdbeAddOp3(v, OP_Ne, 3, label6, r1+kk); VdbeCoverage(v);
+ }
+ if( label6 ){
+ int jmp6 = sqlite3VdbeAddOp0(v, OP_Goto);
+ sqlite3VdbeResolveLabel(v, label6);
+ sqlite3VdbeLoadString(v, 3, "row ");
+ sqlite3VdbeAddOp3(v, OP_Concat, 7, 3, 3);
+ sqlite3VdbeLoadString(v, 4, " values differ from index ");
+ sqlite3VdbeGoto(v, jmp5-1);
+ sqlite3VdbeJumpHere(v, jmp6);
+ }
+
/* For UNIQUE indexes, verify that only one entry exists with the
** current key. The entry is unique if (1) any column is NULL
** or (2) the next entry has a different key */
if( IsUniqueIndex(pIdx) ){
int uniqOk = sqlite3VdbeMakeLabel(pParse);
int jmp6;
- int kk;
for(kk=0; kk<pIdx->nKeyCol; kk++){
int iCol = pIdx->aiColumn[kk];
assert( iCol!=XN_ROWID && iCol<pTab->nCol );
@@ -136587,7 +137514,12 @@ SQLITE_PRIVATE int sqlite3InitOne(sqlite3 *db, int iDb, char **pzErrMsg, u32 mFl
#else
encoding = SQLITE_UTF8;
#endif
- sqlite3SetTextEncoding(db, encoding);
+ if( db->nVdbeActive>0 && encoding!=ENC(db) ){
+ rc = SQLITE_LOCKED;
+ goto initone_error_out;
+ }else{
+ sqlite3SetTextEncoding(db, encoding);
+ }
}else{
/* If opening an attached database, the encoding much match ENC(db) */
if( (meta[BTREE_TEXT_ENCODING-1] & 3)!=ENC(db) ){
@@ -136801,8 +137733,8 @@ static void schemaIsValid(Parse *pParse){
sqlite3BtreeGetMeta(pBt, BTREE_SCHEMA_VERSION, (u32 *)&cookie);
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
if( cookie!=db->aDb[iDb].pSchema->schema_cookie ){
+ if( DbHasProperty(db, iDb, DB_SchemaLoaded) ) pParse->rc = SQLITE_SCHEMA;
sqlite3ResetOneSchema(db, iDb);
- pParse->rc = SQLITE_SCHEMA;
}
/* Close the transaction, if one was opened. */
@@ -137413,6 +138345,10 @@ struct SortCtx {
} aDefer[4];
#endif
struct RowLoadInfo *pDeferredRowLoad; /* Deferred row loading info or NULL */
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ int addrPush; /* First instruction to push data into sorter */
+ int addrPushEnd; /* Last instruction that pushes data into sorter */
+#endif
};
#define SORTFLAG_UseSorter 0x01 /* Use SorterOpen instead of OpenEphemeral */
@@ -138069,6 +139005,10 @@ static void pushOntoSorter(
*/
assert( nData==1 || regData==regOrigData || regOrigData==0 );
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ pSort->addrPush = sqlite3VdbeCurrentAddr(v);
+#endif
+
if( nPrefixReg ){
assert( nPrefixReg==nExpr+bSeq );
regBase = regData - nPrefixReg;
@@ -138169,6 +139109,9 @@ static void pushOntoSorter(
sqlite3VdbeChangeP2(v, iSkip,
pSort->labelOBLopt ? pSort->labelOBLopt : sqlite3VdbeCurrentAddr(v));
}
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ pSort->addrPushEnd = sqlite3VdbeCurrentAddr(v)-1;
+#endif
}
/*
@@ -138635,9 +139578,6 @@ static void selectInnerLoop(
testcase( eDest==SRT_Fifo );
testcase( eDest==SRT_DistFifo );
sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1+nPrefixReg);
- if( pDest->zAffSdst ){
- sqlite3VdbeChangeP4(v, -1, pDest->zAffSdst, nResultCol);
- }
#ifndef SQLITE_OMIT_CTE
if( eDest==SRT_DistFifo ){
/* If the destination is DistFifo, then cursor (iParm+1) is open
@@ -138995,6 +139935,16 @@ static void generateSortTail(
int bSeq; /* True if sorter record includes seq. no. */
int nRefKey = 0;
struct ExprList_item *aOutEx = p->pEList->a;
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ int addrExplain; /* Address of OP_Explain instruction */
+#endif
+
+ ExplainQueryPlan2(addrExplain, (pParse, 0,
+ "USE TEMP B-TREE FOR %sORDER BY", pSort->nOBSat>0?"RIGHT PART OF ":"")
+ );
+ sqlite3VdbeScanStatusRange(v, addrExplain,pSort->addrPush,pSort->addrPushEnd);
+ sqlite3VdbeScanStatusCounters(v, addrExplain, addrExplain, pSort->addrPush);
+
assert( addrBreak<0 );
if( pSort->labelBkOut ){
@@ -139107,6 +140057,7 @@ static void generateSortTail(
VdbeComment((v, "%s", aOutEx[i].zEName));
}
}
+ sqlite3VdbeScanStatusRange(v, addrExplain, addrExplain, -1);
switch( eDest ){
case SRT_Table:
case SRT_EphemTab: {
@@ -139168,6 +140119,7 @@ static void generateSortTail(
}else{
sqlite3VdbeAddOp2(v, OP_Next, iTab, addr); VdbeCoverage(v);
}
+ sqlite3VdbeScanStatusRange(v, addrExplain, sqlite3VdbeCurrentAddr(v)-1, -1);
if( pSort->regReturn ) sqlite3VdbeAddOp1(v, OP_Return, pSort->regReturn);
sqlite3VdbeResolveLabel(v, addrBreak);
}
@@ -139439,7 +140391,7 @@ SQLITE_PRIVATE void sqlite3GenerateColumnNames(
if( pParse->colNamesSet ) return;
/* Column names are determined by the left-most term of a compound select */
while( pSelect->pPrior ) pSelect = pSelect->pPrior;
- SELECTTRACE(1,pParse,pSelect,("generating column names\n"));
+ TREETRACE(0x80,pParse,pSelect,("generating column names\n"));
pTabList = pSelect->pSrc;
pEList = pSelect->pEList;
assert( v!=0 );
@@ -139539,7 +140491,7 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList(
*pnCol = nCol;
*paCol = aCol;
- for(i=0, pCol=aCol; i<nCol && !db->mallocFailed; i++, pCol++){
+ for(i=0, pCol=aCol; i<nCol && !pParse->nErr; i++, pCol++){
struct ExprList_item *pX = &pEList->a[i];
struct ExprList_item *pCollide;
/* Get an appropriate name for the column
@@ -139589,7 +140541,10 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList(
if( zName[j]==':' ) nName = j;
}
zName = sqlite3MPrintf(db, "%.*z:%u", nName, zName, ++cnt);
- if( cnt>3 ) sqlite3_randomness(sizeof(cnt), &cnt);
+ sqlite3ProgressCheck(pParse);
+ if( cnt>3 ){
+ sqlite3_randomness(sizeof(cnt), &cnt);
+ }
}
pCol->zCnName = zName;
pCol->hName = sqlite3StrIHash(zName);
@@ -139602,71 +140557,106 @@ SQLITE_PRIVATE int sqlite3ColumnsFromExprList(
}
}
sqlite3HashClear(&ht);
- if( db->mallocFailed ){
+ if( pParse->nErr ){
for(j=0; j<i; j++){
sqlite3DbFree(db, aCol[j].zCnName);
}
sqlite3DbFree(db, aCol);
*paCol = 0;
*pnCol = 0;
- return SQLITE_NOMEM_BKPT;
+ return pParse->rc;
}
return SQLITE_OK;
}
/*
-** Add type and collation information to a column list based on
-** a SELECT statement.
+** pTab is a transient Table object that represents a subquery of some
+** kind (maybe a parenthesized subquery in the FROM clause of a larger
+** query, or a VIEW, or a CTE). This routine computes type information
+** for that Table object based on the Select object that implements the
+** subquery. For the purposes of this routine, "type infomation" means:
**
-** The column list presumably came from selectColumnNamesFromExprList().
-** The column list has only names, not types or collations. This
-** routine goes through and adds the types and collations.
-**
-** This routine requires that all identifiers in the SELECT
-** statement be resolved.
+** * The datatype name, as it might appear in a CREATE TABLE statement
+** * Which collating sequence to use for the column
+** * The affinity of the column
*/
-SQLITE_PRIVATE void sqlite3SelectAddColumnTypeAndCollation(
- Parse *pParse, /* Parsing contexts */
- Table *pTab, /* Add column type information to this table */
- Select *pSelect, /* SELECT used to determine types and collations */
- char aff /* Default affinity for columns */
+SQLITE_PRIVATE void sqlite3SubqueryColumnTypes(
+ Parse *pParse, /* Parsing contexts */
+ Table *pTab, /* Add column type information to this table */
+ Select *pSelect, /* SELECT used to determine types and collations */
+ char aff /* Default affinity. */
){
sqlite3 *db = pParse->db;
- NameContext sNC;
Column *pCol;
CollSeq *pColl;
- int i;
+ int i,j;
Expr *p;
struct ExprList_item *a;
+ NameContext sNC;
assert( pSelect!=0 );
assert( (pSelect->selFlags & SF_Resolved)!=0 );
- assert( pTab->nCol==pSelect->pEList->nExpr || db->mallocFailed );
+ assert( pTab->nCol==pSelect->pEList->nExpr || pParse->nErr>0 );
+ assert( aff==SQLITE_AFF_NONE || aff==SQLITE_AFF_BLOB );
if( db->mallocFailed ) return;
+ while( pSelect->pPrior ) pSelect = pSelect->pPrior;
+ a = pSelect->pEList->a;
memset(&sNC, 0, sizeof(sNC));
sNC.pSrcList = pSelect->pSrc;
- a = pSelect->pEList->a;
for(i=0, pCol=pTab->aCol; i<pTab->nCol; i++, pCol++){
const char *zType;
- i64 n, m;
+ i64 n;
pTab->tabFlags |= (pCol->colFlags & COLFLAG_NOINSERT);
p = a[i].pExpr;
- zType = columnType(&sNC, p, 0, 0, 0);
/* pCol->szEst = ... // Column size est for SELECT tables never used */
pCol->affinity = sqlite3ExprAffinity(p);
- if( zType ){
- m = sqlite3Strlen30(zType);
- n = sqlite3Strlen30(pCol->zCnName);
- pCol->zCnName = sqlite3DbReallocOrFree(db, pCol->zCnName, n+m+2);
- if( pCol->zCnName ){
- memcpy(&pCol->zCnName[n+1], zType, m+1);
- pCol->colFlags |= COLFLAG_HASTYPE;
+ if( pCol->affinity<=SQLITE_AFF_NONE ){
+ pCol->affinity = aff;
+ }
+ if( pCol->affinity>=SQLITE_AFF_TEXT && pSelect->pNext ){
+ int m = 0;
+ Select *pS2;
+ for(m=0, pS2=pSelect->pNext; pS2; pS2=pS2->pNext){
+ m |= sqlite3ExprDataType(pS2->pEList->a[i].pExpr);
+ }
+ if( pCol->affinity==SQLITE_AFF_TEXT && (m&0x01)!=0 ){
+ pCol->affinity = SQLITE_AFF_BLOB;
+ }else
+ if( pCol->affinity>=SQLITE_AFF_NUMERIC && (m&0x02)!=0 ){
+ pCol->affinity = SQLITE_AFF_BLOB;
+ }
+ if( pCol->affinity>=SQLITE_AFF_NUMERIC && p->op==TK_CAST ){
+ pCol->affinity = SQLITE_AFF_FLEXNUM;
+ }
+ }
+ zType = columnType(&sNC, p, 0, 0, 0);
+ if( zType==0 || pCol->affinity!=sqlite3AffinityType(zType, 0) ){
+ if( pCol->affinity==SQLITE_AFF_NUMERIC
+ || pCol->affinity==SQLITE_AFF_FLEXNUM
+ ){
+ zType = "NUM";
}else{
- testcase( pCol->colFlags & COLFLAG_HASTYPE );
+ zType = 0;
+ for(j=1; j<SQLITE_N_STDTYPE; j++){
+ if( sqlite3StdTypeAffinity[j]==pCol->affinity ){
+ zType = sqlite3StdType[j];
+ break;
+ }
+ }
+ }
+ }
+ if( zType ){
+ i64 m = sqlite3Strlen30(zType);
+ n = sqlite3Strlen30(pCol->zCnName);
+ pCol->zCnName = sqlite3DbReallocOrFree(db, pCol->zCnName, n+m+2);
+ if( pCol->zCnName ){
+ memcpy(&pCol->zCnName[n+1], zType, m+1);
+ pCol->colFlags |= COLFLAG_HASTYPE;
+ }else{
+ testcase( pCol->colFlags & COLFLAG_HASTYPE );
pCol->colFlags &= ~(COLFLAG_HASTYPE|COLFLAG_HASCOLL);
}
}
- if( pCol->affinity<=SQLITE_AFF_NONE ) pCol->affinity = aff;
pColl = sqlite3ExprCollSeq(pParse, p);
if( pColl ){
assert( pTab->pIndex==0 );
@@ -139700,7 +140690,7 @@ SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse *pParse, Select *pSelect, c
pTab->zName = 0;
pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
sqlite3ColumnsFromExprList(pParse, pSelect->pEList, &pTab->nCol, &pTab->aCol);
- sqlite3SelectAddColumnTypeAndCollation(pParse, pTab, pSelect, aff);
+ sqlite3SubqueryColumnTypes(pParse, pTab, pSelect, aff);
pTab->iPKey = -1;
if( db->mallocFailed ){
sqlite3DeleteTable(db, pTab);
@@ -140225,7 +141215,7 @@ static int multiSelect(
pPrior->iLimit = p->iLimit;
pPrior->iOffset = p->iOffset;
pPrior->pLimit = p->pLimit;
- SELECTTRACE(1, pParse, p, ("multiSelect UNION ALL left...\n"));
+ TREETRACE(0x200, pParse, p, ("multiSelect UNION ALL left...\n"));
rc = sqlite3Select(pParse, pPrior, &dest);
pPrior->pLimit = 0;
if( rc ){
@@ -140243,7 +141233,7 @@ static int multiSelect(
}
}
ExplainQueryPlan((pParse, 1, "UNION ALL"));
- SELECTTRACE(1, pParse, p, ("multiSelect UNION ALL right...\n"));
+ TREETRACE(0x200, pParse, p, ("multiSelect UNION ALL right...\n"));
rc = sqlite3Select(pParse, p, &dest);
testcase( rc!=SQLITE_OK );
pDelete = p->pPrior;
@@ -140296,7 +141286,7 @@ static int multiSelect(
*/
assert( !pPrior->pOrderBy );
sqlite3SelectDestInit(&uniondest, priorOp, unionTab);
- SELECTTRACE(1, pParse, p, ("multiSelect EXCEPT/UNION left...\n"));
+ TREETRACE(0x200, pParse, p, ("multiSelect EXCEPT/UNION left...\n"));
rc = sqlite3Select(pParse, pPrior, &uniondest);
if( rc ){
goto multi_select_end;
@@ -140316,7 +141306,7 @@ static int multiSelect(
uniondest.eDest = op;
ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE",
sqlite3SelectOpName(p->op)));
- SELECTTRACE(1, pParse, p, ("multiSelect EXCEPT/UNION right...\n"));
+ TREETRACE(0x200, pParse, p, ("multiSelect EXCEPT/UNION right...\n"));
rc = sqlite3Select(pParse, p, &uniondest);
testcase( rc!=SQLITE_OK );
assert( p->pOrderBy==0 );
@@ -140377,7 +141367,7 @@ static int multiSelect(
/* Code the SELECTs to our left into temporary table "tab1".
*/
sqlite3SelectDestInit(&intersectdest, SRT_Union, tab1);
- SELECTTRACE(1, pParse, p, ("multiSelect INTERSECT left...\n"));
+ TREETRACE(0x400, pParse, p, ("multiSelect INTERSECT left...\n"));
rc = sqlite3Select(pParse, pPrior, &intersectdest);
if( rc ){
goto multi_select_end;
@@ -140394,7 +141384,7 @@ static int multiSelect(
intersectdest.iSDParm = tab2;
ExplainQueryPlan((pParse, 1, "%s USING TEMP B-TREE",
sqlite3SelectOpName(p->op)));
- SELECTTRACE(1, pParse, p, ("multiSelect INTERSECT right...\n"));
+ TREETRACE(0x400, pParse, p, ("multiSelect INTERSECT right...\n"));
rc = sqlite3Select(pParse, p, &intersectdest);
testcase( rc!=SQLITE_OK );
pDelete = p->pPrior;
@@ -141152,7 +142142,9 @@ static Expr *substExpr(
sqlite3VectorErrorMsg(pSubst->pParse, pCopy);
}else{
sqlite3 *db = pSubst->pParse->db;
- if( pSubst->isOuterJoin && pCopy->op!=TK_COLUMN ){
+ if( pSubst->isOuterJoin
+ && (pCopy->op!=TK_COLUMN || pCopy->iTable!=pSubst->iNewTable)
+ ){
memset(&ifNullRow, 0, sizeof(ifNullRow));
ifNullRow.op = TK_IF_NULL_ROW;
ifNullRow.pLeft = pCopy;
@@ -141398,6 +142390,34 @@ static ExprList *findLeftmostExprlist(Select *pSel){
return pSel->pEList;
}
+/*
+** Return true if any of the result-set columns in the compound query
+** have incompatible affinities on one or more arms of the compound.
+*/
+static int compoundHasDifferentAffinities(Select *p){
+ int ii;
+ ExprList *pList;
+ assert( p!=0 );
+ assert( p->pEList!=0 );
+ assert( p->pPrior!=0 );
+ pList = p->pEList;
+ for(ii=0; ii<pList->nExpr; ii++){
+ char aff;
+ Select *pSub1;
+ assert( pList->a[ii].pExpr!=0 );
+ aff = sqlite3ExprAffinity(pList->a[ii].pExpr);
+ for(pSub1=p->pPrior; pSub1; pSub1=pSub1->pPrior){
+ assert( pSub1->pEList!=0 );
+ assert( pSub1->pEList->nExpr>ii );
+ assert( pSub1->pEList->a[ii].pExpr!=0 );
+ if( sqlite3ExprAffinity(pSub1->pEList->a[ii].pExpr)!=aff ){
+ return 1;
+ }
+ }
+ }
+ return 0;
+}
+
#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
/*
** This routine attempts to flatten subqueries as a performance optimization.
@@ -141501,7 +142521,8 @@ static ExprList *findLeftmostExprlist(Select *pSel){
** query or there are no RIGHT or FULL JOINs in any arm
** of the subquery. (This is a duplicate of condition (27b).)
** (17h) The corresponding result set expressions in all arms of the
-** compound must have the same affinity.
+** compound must have the same affinity. (See restriction (9)
+** on the push-down optimization.)
**
** The parent and sub-query may contain WHERE clauses. Subject to
** rules (11), (13) and (14), they may also contain ORDER BY,
@@ -141720,19 +142741,7 @@ static int flattenSubquery(
if( (p->selFlags & SF_Recursive) ) return 0;
/* Restriction (17h) */
- for(ii=0; ii<pSub->pEList->nExpr; ii++){
- char aff;
- assert( pSub->pEList->a[ii].pExpr!=0 );
- aff = sqlite3ExprAffinity(pSub->pEList->a[ii].pExpr);
- for(pSub1=pSub->pPrior; pSub1; pSub1=pSub1->pPrior){
- assert( pSub1->pEList!=0 );
- assert( pSub1->pEList->nExpr>ii );
- assert( pSub1->pEList->a[ii].pExpr!=0 );
- if( sqlite3ExprAffinity(pSub1->pEList->a[ii].pExpr)!=aff ){
- return 0;
- }
- }
- }
+ if( compoundHasDifferentAffinities(pSub) ) return 0;
if( pSrc->nSrc>1 ){
if( pParse->nSelect>500 ) return 0;
@@ -141743,7 +142752,7 @@ static int flattenSubquery(
}
/***** If we reach this point, flattening is permitted. *****/
- SELECTTRACE(1,pParse,p,("flatten %u.%p from term %d\n",
+ TREETRACE(0x4,pParse,p,("flatten %u.%p from term %d\n",
pSub->selId, pSub, iFrom));
/* Authorize the subquery */
@@ -141822,7 +142831,7 @@ static int flattenSubquery(
if( pPrior ) pPrior->pNext = pNew;
pNew->pNext = p;
p->pPrior = pNew;
- SELECTTRACE(2,pParse,p,("compound-subquery flattener"
+ TREETRACE(0x4,pParse,p,("compound-subquery flattener"
" creates %u as peer\n",pNew->selId));
}
assert( pSubitem->pSelect==0 );
@@ -142002,8 +143011,8 @@ static int flattenSubquery(
sqlite3SelectDelete(db, pSub1);
#if TREETRACE_ENABLED
- if( sqlite3TreeTrace & 0x100 ){
- SELECTTRACE(0x100,pParse,p,("After flattening:\n"));
+ if( sqlite3TreeTrace & 0x4 ){
+ TREETRACE(0x4,pParse,p,("After flattening:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
@@ -142377,12 +143386,14 @@ static int pushDownWindowCheck(Parse *pParse, Select *pSubq, Expr *pExpr){
** be materialized. (This restriction is implemented in the calling
** routine.)
**
-** (8) The subquery may not be a compound that uses UNION, INTERSECT,
-** or EXCEPT. (We could, perhaps, relax this restriction to allow
-** this case if none of the comparisons operators between left and
-** right arms of the compound use a collation other than BINARY.
-** But it is a lot of work to check that case for an obscure and
-** minor optimization, so we omit it for now.)
+** (8) If the subquery is a compound that uses UNION, INTERSECT,
+** or EXCEPT, then all of the result set columns for all arms of
+** the compound must use the BINARY collating sequence.
+**
+** (9) If the subquery is a compound, then all arms of the compound must
+** have the same affinity. (This is the same as restriction (17h)
+** for query flattening.)
+**
**
** Return 0 if no changes are made and non-zero if one or more WHERE clause
** terms are duplicated into the subquery.
@@ -142399,20 +143410,44 @@ static int pushDownWhereTerms(
if( pSubq->selFlags & (SF_Recursive|SF_MultiPart) ) return 0;
if( pSrc->fg.jointype & (JT_LTORJ|JT_RIGHT) ) return 0;
-#ifndef SQLITE_OMIT_WINDOWFUNC
if( pSubq->pPrior ){
Select *pSel;
+ int notUnionAll = 0;
for(pSel=pSubq; pSel; pSel=pSel->pPrior){
u8 op = pSel->op;
assert( op==TK_ALL || op==TK_SELECT
|| op==TK_UNION || op==TK_INTERSECT || op==TK_EXCEPT );
- if( op!=TK_ALL && op!=TK_SELECT ) return 0; /* restriction (8) */
+ if( op!=TK_ALL && op!=TK_SELECT ){
+ notUnionAll = 1;
+ }
+#ifndef SQLITE_OMIT_WINDOWFUNC
if( pSel->pWin ) return 0; /* restriction (6b) */
+#endif
+ }
+ if( compoundHasDifferentAffinities(pSubq) ){
+ return 0; /* restriction (9) */
+ }
+ if( notUnionAll ){
+ /* If any of the compound arms are connected using UNION, INTERSECT,
+ ** or EXCEPT, then we must ensure that none of the columns use a
+ ** non-BINARY collating sequence. */
+ for(pSel=pSubq; pSel; pSel=pSel->pPrior){
+ int ii;
+ const ExprList *pList = pSel->pEList;
+ assert( pList!=0 );
+ for(ii=0; ii<pList->nExpr; ii++){
+ CollSeq *pColl = sqlite3ExprCollSeq(pParse, pList->a[ii].pExpr);
+ if( !sqlite3IsBinary(pColl) ){
+ return 0; /* Restriction (8) */
+ }
+ }
+ }
}
}else{
+#ifndef SQLITE_OMIT_WINDOWFUNC
if( pSubq->pWin && pSubq->pWin->pPartition==0 ) return 0;
- }
#endif
+ }
#ifdef SQLITE_DEBUG
/* Only the first term of a compound can have a WITH clause. But make
@@ -142874,9 +143909,6 @@ static int resolveFromTermToCte(
pFrom->fg.isCte = 1;
pFrom->u2.pCteUse = pCteUse;
pCteUse->nUse++;
- if( pCteUse->nUse>=2 && pCteUse->eM10d==M10d_Any ){
- pCteUse->eM10d = M10d_Yes;
- }
/* Check if this is a recursive CTE. */
pRecTerm = pSel = pFrom->pSelect;
@@ -143416,8 +144448,8 @@ static int selectExpander(Walker *pWalker, Select *p){
}
}
#if TREETRACE_ENABLED
- if( sqlite3TreeTrace & 0x100 ){
- SELECTTRACE(0x100,pParse,p,("After result-set wildcard expansion:\n"));
+ if( sqlite3TreeTrace & 0x8 ){
+ TREETRACE(0x8,pParse,p,("After result-set wildcard expansion:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
@@ -143468,14 +144500,14 @@ static void sqlite3SelectExpand(Parse *pParse, Select *pSelect){
** This is a Walker.xSelectCallback callback for the sqlite3SelectTypeInfo()
** interface.
**
-** For each FROM-clause subquery, add Column.zType and Column.zColl
-** information to the Table structure that represents the result set
-** of that subquery.
+** For each FROM-clause subquery, add Column.zType, Column.zColl, and
+** Column.affinity information to the Table structure that represents
+** the result set of that subquery.
**
** The Table structure that represents the result set was constructed
-** by selectExpander() but the type and collation information was omitted
-** at that point because identifiers had not yet been resolved. This
-** routine is called after identifier resolution.
+** by selectExpander() but the type and collation and affinity information
+** was omitted at that point because identifiers had not yet been resolved.
+** This routine is called after identifier resolution.
*/
static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){
Parse *pParse;
@@ -143495,9 +144527,7 @@ static void selectAddSubqueryTypeInfo(Walker *pWalker, Select *p){
/* A sub-query in the FROM clause of a SELECT */
Select *pSel = pFrom->pSelect;
if( pSel ){
- while( pSel->pPrior ) pSel = pSel->pPrior;
- sqlite3SelectAddColumnTypeAndCollation(pParse, pTab, pSel,
- SQLITE_AFF_NONE);
+ sqlite3SubqueryColumnTypes(pParse, pTab, pSel, SQLITE_AFF_NONE);
}
}
}
@@ -143552,6 +144582,175 @@ SQLITE_PRIVATE void sqlite3SelectPrep(
sqlite3SelectAddTypeInfo(pParse, p);
}
+#if TREETRACE_ENABLED
+/*
+** Display all information about an AggInfo object
+*/
+static void printAggInfo(AggInfo *pAggInfo){
+ int ii;
+ for(ii=0; ii<pAggInfo->nColumn; ii++){
+ struct AggInfo_col *pCol = &pAggInfo->aCol[ii];
+ sqlite3DebugPrintf(
+ "agg-column[%d] pTab=%s iTable=%d iColumn=%d iMem=%d"
+ " iSorterColumn=%d %s\n",
+ ii, pCol->pTab ? pCol->pTab->zName : "NULL",
+ pCol->iTable, pCol->iColumn, pAggInfo->iFirstReg+ii,
+ pCol->iSorterColumn,
+ ii>=pAggInfo->nAccumulator ? "" : " Accumulator");
+ sqlite3TreeViewExpr(0, pAggInfo->aCol[ii].pCExpr, 0);
+ }
+ for(ii=0; ii<pAggInfo->nFunc; ii++){
+ sqlite3DebugPrintf("agg-func[%d]: iMem=%d\n",
+ ii, pAggInfo->iFirstReg+pAggInfo->nColumn+ii);
+ sqlite3TreeViewExpr(0, pAggInfo->aFunc[ii].pFExpr, 0);
+ }
+}
+#endif /* TREETRACE_ENABLED */
+
+/*
+** Analyze the arguments to aggregate functions. Create new pAggInfo->aCol[]
+** entries for columns that are arguments to aggregate functions but which
+** are not otherwise used.
+**
+** The aCol[] entries in AggInfo prior to nAccumulator are columns that
+** are referenced outside of aggregate functions. These might be columns
+** that are part of the GROUP by clause, for example. Other database engines
+** would throw an error if there is a column reference that is not in the
+** GROUP BY clause and that is not part of an aggregate function argument.
+** But SQLite allows this.
+**
+** The aCol[] entries beginning with the aCol[nAccumulator] and following
+** are column references that are used exclusively as arguments to
+** aggregate functions. This routine is responsible for computing
+** (or recomputing) those aCol[] entries.
+*/
+static void analyzeAggFuncArgs(
+ AggInfo *pAggInfo,
+ NameContext *pNC
+){
+ int i;
+ assert( pAggInfo!=0 );
+ assert( pAggInfo->iFirstReg==0 );
+ pNC->ncFlags |= NC_InAggFunc;
+ for(i=0; i<pAggInfo->nFunc; i++){
+ Expr *pExpr = pAggInfo->aFunc[i].pFExpr;
+ assert( ExprUseXList(pExpr) );
+ sqlite3ExprAnalyzeAggList(pNC, pExpr->x.pList);
+#ifndef SQLITE_OMIT_WINDOWFUNC
+ assert( !IsWindowFunc(pExpr) );
+ if( ExprHasProperty(pExpr, EP_WinFunc) ){
+ sqlite3ExprAnalyzeAggregates(pNC, pExpr->y.pWin->pFilter);
+ }
+#endif
+ }
+ pNC->ncFlags &= ~NC_InAggFunc;
+}
+
+/*
+** An index on expressions is being used in the inner loop of an
+** aggregate query with a GROUP BY clause. This routine attempts
+** to adjust the AggInfo object to take advantage of index and to
+** perhaps use the index as a covering index.
+**
+*/
+static void optimizeAggregateUseOfIndexedExpr(
+ Parse *pParse, /* Parsing context */
+ Select *pSelect, /* The SELECT statement being processed */
+ AggInfo *pAggInfo, /* The aggregate info */
+ NameContext *pNC /* Name context used to resolve agg-func args */
+){
+ assert( pAggInfo->iFirstReg==0 );
+ assert( pSelect!=0 );
+ assert( pSelect->pGroupBy!=0 );
+ pAggInfo->nColumn = pAggInfo->nAccumulator;
+ if( ALWAYS(pAggInfo->nSortingColumn>0) ){
+ if( pAggInfo->nColumn==0 ){
+ pAggInfo->nSortingColumn = pSelect->pGroupBy->nExpr;
+ }else{
+ pAggInfo->nSortingColumn =
+ pAggInfo->aCol[pAggInfo->nColumn-1].iSorterColumn+1;
+ }
+ }
+ analyzeAggFuncArgs(pAggInfo, pNC);
+#if TREETRACE_ENABLED
+ if( sqlite3TreeTrace & 0x20 ){
+ IndexedExpr *pIEpr;
+ TREETRACE(0x20, pParse, pSelect,
+ ("AggInfo (possibly) adjusted for Indexed Exprs\n"));
+ sqlite3TreeViewSelect(0, pSelect, 0);
+ for(pIEpr=pParse->pIdxEpr; pIEpr; pIEpr=pIEpr->pIENext){
+ printf("data-cursor=%d index={%d,%d}\n",
+ pIEpr->iDataCur, pIEpr->iIdxCur, pIEpr->iIdxCol);
+ sqlite3TreeViewExpr(0, pIEpr->pExpr, 0);
+ }
+ printAggInfo(pAggInfo);
+ }
+#else
+ UNUSED_PARAMETER(pSelect);
+ UNUSED_PARAMETER(pParse);
+#endif
+}
+
+/*
+** Walker callback for aggregateConvertIndexedExprRefToColumn().
+*/
+static int aggregateIdxEprRefToColCallback(Walker *pWalker, Expr *pExpr){
+ AggInfo *pAggInfo;
+ struct AggInfo_col *pCol;
+ UNUSED_PARAMETER(pWalker);
+ if( pExpr->pAggInfo==0 ) return WRC_Continue;
+ if( pExpr->op==TK_AGG_COLUMN ) return WRC_Continue;
+ if( pExpr->op==TK_AGG_FUNCTION ) return WRC_Continue;
+ if( pExpr->op==TK_IF_NULL_ROW ) return WRC_Continue;
+ pAggInfo = pExpr->pAggInfo;
+ assert( pExpr->iAgg>=0 && pExpr->iAgg<pAggInfo->nColumn );
+ pCol = &pAggInfo->aCol[pExpr->iAgg];
+ pExpr->op = TK_AGG_COLUMN;
+ pExpr->iTable = pCol->iTable;
+ pExpr->iColumn = pCol->iColumn;
+ return WRC_Prune;
+}
+
+/*
+** Convert every pAggInfo->aFunc[].pExpr such that any node within
+** those expressions that has pAppInfo set is changed into a TK_AGG_COLUMN
+** opcode.
+*/
+static void aggregateConvertIndexedExprRefToColumn(AggInfo *pAggInfo){
+ int i;
+ Walker w;
+ memset(&w, 0, sizeof(w));
+ w.xExprCallback = aggregateIdxEprRefToColCallback;
+ for(i=0; i<pAggInfo->nFunc; i++){
+ sqlite3WalkExpr(&w, pAggInfo->aFunc[i].pFExpr);
+ }
+}
+
+
+/*
+** Allocate a block of registers so that there is one register for each
+** pAggInfo->aCol[] and pAggInfo->aFunc[] entry in pAggInfo. The first
+** register in this block is stored in pAggInfo->iFirstReg.
+**
+** This routine may only be called once for each AggInfo object. Prior
+** to calling this routine:
+**
+** * The aCol[] and aFunc[] arrays may be modified
+** * The AggInfoColumnReg() and AggInfoFuncReg() macros may not be used
+**
+** After clling this routine:
+**
+** * The aCol[] and aFunc[] arrays are fixed
+** * The AggInfoColumnReg() and AggInfoFuncReg() macros may be used
+**
+*/
+static void assignAggregateRegisters(Parse *pParse, AggInfo *pAggInfo){
+ assert( pAggInfo!=0 );
+ assert( pAggInfo->iFirstReg==0 );
+ pAggInfo->iFirstReg = pParse->nMem + 1;
+ pParse->nMem += pAggInfo->nColumn + pAggInfo->nFunc;
+}
+
/*
** Reset the aggregate accumulator.
**
@@ -143565,24 +144764,13 @@ static void resetAccumulator(Parse *pParse, AggInfo *pAggInfo){
int i;
struct AggInfo_func *pFunc;
int nReg = pAggInfo->nFunc + pAggInfo->nColumn;
+ assert( pAggInfo->iFirstReg>0 );
assert( pParse->db->pParse==pParse );
assert( pParse->db->mallocFailed==0 || pParse->nErr!=0 );
if( nReg==0 ) return;
if( pParse->nErr ) return;
-#ifdef SQLITE_DEBUG
- /* Verify that all AggInfo registers are within the range specified by
- ** AggInfo.mnReg..AggInfo.mxReg */
- assert( nReg==pAggInfo->mxReg-pAggInfo->mnReg+1 );
- for(i=0; i<pAggInfo->nColumn; i++){
- assert( pAggInfo->aCol[i].iMem>=pAggInfo->mnReg
- && pAggInfo->aCol[i].iMem<=pAggInfo->mxReg );
- }
- for(i=0; i<pAggInfo->nFunc; i++){
- assert( pAggInfo->aFunc[i].iMem>=pAggInfo->mnReg
- && pAggInfo->aFunc[i].iMem<=pAggInfo->mxReg );
- }
-#endif
- sqlite3VdbeAddOp3(v, OP_Null, 0, pAggInfo->mnReg, pAggInfo->mxReg);
+ sqlite3VdbeAddOp3(v, OP_Null, 0, pAggInfo->iFirstReg,
+ pAggInfo->iFirstReg+nReg-1);
for(pFunc=pAggInfo->aFunc, i=0; i<pAggInfo->nFunc; i++, pFunc++){
if( pFunc->iDistinct>=0 ){
Expr *pE = pFunc->pFExpr;
@@ -143614,15 +144802,16 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){
ExprList *pList;
assert( ExprUseXList(pF->pFExpr) );
pList = pF->pFExpr->x.pList;
- sqlite3VdbeAddOp2(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0);
+ sqlite3VdbeAddOp2(v, OP_AggFinal, AggInfoFuncReg(pAggInfo,i),
+ pList ? pList->nExpr : 0);
sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
}
}
/*
-** Update the accumulator memory cells for an aggregate based on
-** the current cursor position.
+** Generate code that will update the accumulator memory cells for an
+** aggregate based on the current cursor position.
**
** If regAcc is non-zero and there are no min() or max() aggregates
** in pAggInfo, then only populate the pAggInfo->nAccumulator accumulator
@@ -143642,6 +144831,8 @@ static void updateAccumulator(
struct AggInfo_func *pF;
struct AggInfo_col *pC;
+ assert( pAggInfo->iFirstReg>0 );
+ if( pParse->nErr ) return;
pAggInfo->directMode = 1;
for(i=0, pF=pAggInfo->aFunc; i<pAggInfo->nFunc; i++, pF++){
int nArg;
@@ -143702,7 +144893,7 @@ static void updateAccumulator(
if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem;
sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, (char *)pColl, P4_COLLSEQ);
}
- sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, pF->iMem);
+ sqlite3VdbeAddOp3(v, OP_AggStep, 0, regAgg, AggInfoFuncReg(pAggInfo,i));
sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
sqlite3VdbeChangeP5(v, (u8)nArg);
sqlite3ReleaseTempRange(pParse, regAgg, nArg);
@@ -143717,7 +144908,7 @@ static void updateAccumulator(
addrHitTest = sqlite3VdbeAddOp1(v, OP_If, regHit); VdbeCoverage(v);
}
for(i=0, pC=pAggInfo->aCol; i<pAggInfo->nAccumulator; i++, pC++){
- sqlite3ExprCode(pParse, pC->pCExpr, pC->iMem);
+ sqlite3ExprCode(pParse, pC->pCExpr, AggInfoColumnReg(pAggInfo,i));
}
pAggInfo->directMode = 0;
@@ -143813,26 +145004,31 @@ static void havingToWhere(Parse *pParse, Select *p){
sqlite3WalkExpr(&sWalker, p->pHaving);
#if TREETRACE_ENABLED
if( sWalker.eCode && (sqlite3TreeTrace & 0x100)!=0 ){
- SELECTTRACE(0x100,pParse,p,("Move HAVING terms into WHERE:\n"));
+ TREETRACE(0x100,pParse,p,("Move HAVING terms into WHERE:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
}
/*
-** Check to see if the pThis entry of pTabList is a self-join of a prior view.
-** If it is, then return the SrcItem for the prior view. If it is not,
-** then return 0.
+** Check to see if the pThis entry of pTabList is a self-join of another view.
+** Search FROM-clause entries in the range of iFirst..iEnd, including iFirst
+** but stopping before iEnd.
+**
+** If pThis is a self-join, then return the SrcItem for the first other
+** instance of that view found. If pThis is not a self-join then return 0.
*/
static SrcItem *isSelfJoinView(
SrcList *pTabList, /* Search for self-joins in this FROM clause */
- SrcItem *pThis /* Search for prior reference to this subquery */
+ SrcItem *pThis, /* Search for prior reference to this subquery */
+ int iFirst, int iEnd /* Range of FROM-clause entries to search. */
){
SrcItem *pItem;
assert( pThis->pSelect!=0 );
if( pThis->pSelect->selFlags & SF_PushDown ) return 0;
- for(pItem = pTabList->a; pItem<pThis; pItem++){
+ while( iFirst<iEnd ){
Select *pS1;
+ pItem = &pTabList->a[iFirst++];
if( pItem->pSelect==0 ) continue;
if( pItem->fg.viaCoroutine ) continue;
if( pItem->zName==0 ) continue;
@@ -143894,6 +145090,7 @@ static int countOfViewOptimization(Parse *pParse, Select *p){
if( p->pEList->nExpr!=1 ) return 0; /* Single result column */
if( p->pWhere ) return 0;
if( p->pGroupBy ) return 0;
+ if( p->pOrderBy ) return 0;
pExpr = p->pEList->a[0].pExpr;
if( pExpr->op!=TK_AGG_FUNCTION ) return 0; /* Result is an aggregate */
assert( ExprUseUToken(pExpr) );
@@ -143901,9 +145098,11 @@ static int countOfViewOptimization(Parse *pParse, Select *p){
assert( ExprUseXList(pExpr) );
if( pExpr->x.pList!=0 ) return 0; /* Must be count(*) */
if( p->pSrc->nSrc!=1 ) return 0; /* One table in FROM */
+ if( ExprHasProperty(pExpr, EP_WinFunc) ) return 0;/* Not a window function */
pSub = p->pSrc->a[0].pSelect;
if( pSub==0 ) return 0; /* The FROM is a subquery */
- if( pSub->pPrior==0 ) return 0; /* Must be a compound ry */
+ if( pSub->pPrior==0 ) return 0; /* Must be a compound */
+ if( pSub->selFlags & SF_CopyCte ) return 0; /* Not a CTE */
do{
if( pSub->op!=TK_ALL && pSub->pPrior ) return 0; /* Must be UNION ALL */
if( pSub->pWhere ) return 0; /* No WHERE clause */
@@ -143945,8 +145144,8 @@ static int countOfViewOptimization(Parse *pParse, Select *p){
p->selFlags &= ~SF_Aggregate;
#if TREETRACE_ENABLED
- if( sqlite3TreeTrace & 0x400 ){
- SELECTTRACE(0x400,pParse,p,("After count-of-view optimization:\n"));
+ if( sqlite3TreeTrace & 0x200 ){
+ TREETRACE(0x200,pParse,p,("After count-of-view optimization:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
@@ -143978,6 +145177,68 @@ static int sameSrcAlias(SrcItem *p0, SrcList *pSrc){
}
/*
+** Return TRUE (non-zero) if the i-th entry in the pTabList SrcList can
+** be implemented as a co-routine. The i-th entry is guaranteed to be
+** a subquery.
+**
+** The subquery is implemented as a co-routine if all of the following are
+** true:
+**
+** (1) The subquery will likely be implemented in the outer loop of
+** the query. This will be the case if any one of the following
+** conditions hold:
+** (a) The subquery is the only term in the FROM clause
+** (b) The subquery is the left-most term and a CROSS JOIN or similar
+** requires it to be the outer loop
+** (c) All of the following are true:
+** (i) The subquery is the left-most subquery in the FROM clause
+** (ii) There is nothing that would prevent the subquery from
+** being used as the outer loop if the sqlite3WhereBegin()
+** routine nominates it to that position.
+** (iii) The query is not a UPDATE ... FROM
+** (2) The subquery is not a CTE that should be materialized because
+** (a) the AS MATERIALIZED keyword is used, or
+** (b) the CTE is used multiple times and does not have the
+** NOT MATERIALIZED keyword
+** (3) The subquery is not part of a left operand for a RIGHT JOIN
+** (4) The SQLITE_Coroutine optimization disable flag is not set
+** (5) The subquery is not self-joined
+*/
+static int fromClauseTermCanBeCoroutine(
+ Parse *pParse, /* Parsing context */
+ SrcList *pTabList, /* FROM clause */
+ int i, /* Which term of the FROM clause holds the subquery */
+ int selFlags /* Flags on the SELECT statement */
+){
+ SrcItem *pItem = &pTabList->a[i];
+ if( pItem->fg.isCte ){
+ const CteUse *pCteUse = pItem->u2.pCteUse;
+ if( pCteUse->eM10d==M10d_Yes ) return 0; /* (2a) */
+ if( pCteUse->nUse>=2 && pCteUse->eM10d!=M10d_No ) return 0; /* (2b) */
+ }
+ if( pTabList->a[0].fg.jointype & JT_LTORJ ) return 0; /* (3) */
+ if( OptimizationDisabled(pParse->db, SQLITE_Coroutines) ) return 0; /* (4) */
+ if( isSelfJoinView(pTabList, pItem, i+1, pTabList->nSrc)!=0 ){
+ return 0; /* (5) */
+ }
+ if( i==0 ){
+ if( pTabList->nSrc==1 ) return 1; /* (1a) */
+ if( pTabList->a[1].fg.jointype & JT_CROSS ) return 1; /* (1b) */
+ if( selFlags & SF_UpdateFrom ) return 0; /* (1c-iii) */
+ return 1;
+ }
+ if( selFlags & SF_UpdateFrom ) return 0; /* (1c-iii) */
+ while( 1 /*exit-by-break*/ ){
+ if( pItem->fg.jointype & (JT_OUTER|JT_CROSS) ) return 0; /* (1c-ii) */
+ if( i==0 ) break;
+ i--;
+ pItem--;
+ if( pItem->pSelect!=0 ) return 0; /* (1c-i) */
+ }
+ return 1;
+}
+
+/*
** Generate code for the SELECT statement given in the p argument.
**
** The results are returned according to the SelectDest structure.
@@ -144022,8 +145283,8 @@ SQLITE_PRIVATE int sqlite3Select(
assert( db->mallocFailed==0 );
if( sqlite3AuthCheck(pParse, SQLITE_SELECT, 0, 0, 0) ) return 1;
#if TREETRACE_ENABLED
- SELECTTRACE(1,pParse,p, ("begin processing:\n", pParse->addrExplain));
- if( sqlite3TreeTrace & 0x10100 ){
+ TREETRACE(0x1,pParse,p, ("begin processing:\n", pParse->addrExplain));
+ if( sqlite3TreeTrace & 0x10000 ){
if( (sqlite3TreeTrace & 0x10001)==0x10000 ){
sqlite3TreeViewLine(0, "In sqlite3Select() at %s:%d",
__FILE__, __LINE__);
@@ -144043,8 +145304,8 @@ SQLITE_PRIVATE int sqlite3Select(
/* All of these destinations are also able to ignore the ORDER BY clause */
if( p->pOrderBy ){
#if TREETRACE_ENABLED
- SELECTTRACE(1,pParse,p, ("dropping superfluous ORDER BY:\n"));
- if( sqlite3TreeTrace & 0x100 ){
+ TREETRACE(0x800,pParse,p, ("dropping superfluous ORDER BY:\n"));
+ if( sqlite3TreeTrace & 0x800 ){
sqlite3TreeViewExprList(0, p->pOrderBy, 0, "ORDERBY");
}
#endif
@@ -144064,8 +145325,8 @@ SQLITE_PRIVATE int sqlite3Select(
assert( db->mallocFailed==0 );
assert( p->pEList!=0 );
#if TREETRACE_ENABLED
- if( sqlite3TreeTrace & 0x104 ){
- SELECTTRACE(0x104,pParse,p, ("after name resolution:\n"));
+ if( sqlite3TreeTrace & 0x10 ){
+ TREETRACE(0x10,pParse,p, ("after name resolution:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
@@ -144106,8 +145367,8 @@ SQLITE_PRIVATE int sqlite3Select(
goto select_end;
}
#if TREETRACE_ENABLED
- if( p->pWin && (sqlite3TreeTrace & 0x108)!=0 ){
- SELECTTRACE(0x104,pParse,p, ("after window rewrite:\n"));
+ if( p->pWin && (sqlite3TreeTrace & 0x40)!=0 ){
+ TREETRACE(0x40,pParse,p, ("after window rewrite:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
@@ -144138,7 +145399,7 @@ SQLITE_PRIVATE int sqlite3Select(
&& sqlite3ExprImpliesNonNullRow(p->pWhere, pItem->iCursor)
&& OptimizationEnabled(db, SQLITE_SimplifyJoin)
){
- SELECTTRACE(0x100,pParse,p,
+ TREETRACE(0x1000,pParse,p,
("LEFT-JOIN simplifies to JOIN on term %d\n",i));
pItem->fg.jointype &= ~(JT_LEFT|JT_OUTER);
assert( pItem->iCursor>=0 );
@@ -144194,7 +145455,7 @@ SQLITE_PRIVATE int sqlite3Select(
&& (p->selFlags & SF_OrderByReqd)==0 /* Condition (3) and (4) */
&& OptimizationEnabled(db, SQLITE_OmitOrderBy)
){
- SELECTTRACE(0x100,pParse,p,
+ TREETRACE(0x800,pParse,p,
("omit superfluous ORDER BY on %r FROM-clause subquery\n",i+1));
sqlite3ParserAddCleanup(pParse,
(void(*)(sqlite3*,void*))sqlite3ExprListDelete,
@@ -144249,8 +145510,8 @@ SQLITE_PRIVATE int sqlite3Select(
if( p->pPrior ){
rc = multiSelect(pParse, p, pDest);
#if TREETRACE_ENABLED
- SELECTTRACE(0x1,pParse,p,("end compound-select processing\n"));
- if( (sqlite3TreeTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){
+ TREETRACE(0x400,pParse,p,("end compound-select processing\n"));
+ if( (sqlite3TreeTrace & 0x400)!=0 && ExplainQueryPlanParent(pParse)==0 ){
sqlite3TreeViewSelect(0, p, 0);
}
#endif
@@ -144270,13 +145531,13 @@ SQLITE_PRIVATE int sqlite3Select(
&& propagateConstants(pParse, p)
){
#if TREETRACE_ENABLED
- if( sqlite3TreeTrace & 0x100 ){
- SELECTTRACE(0x100,pParse,p,("After constant propagation:\n"));
+ if( sqlite3TreeTrace & 0x2000 ){
+ TREETRACE(0x2000,pParse,p,("After constant propagation:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
}else{
- SELECTTRACE(0x100,pParse,p,("Constant propagation not helpful\n"));
+ TREETRACE(0x2000,pParse,p,("Constant propagation not helpful\n"));
}
#ifdef SQLITE_COUNTOFVIEW_OPTIMIZATION
@@ -144284,7 +145545,6 @@ SQLITE_PRIVATE int sqlite3Select(
&& countOfViewOptimization(pParse, p)
){
if( db->mallocFailed ) goto select_end;
- pEList = p->pEList;
pTabList = p->pSrc;
}
#endif
@@ -144349,36 +145609,23 @@ SQLITE_PRIVATE int sqlite3Select(
&& pushDownWhereTerms(pParse, pSub, p->pWhere, pItem)
){
#if TREETRACE_ENABLED
- if( sqlite3TreeTrace & 0x100 ){
- SELECTTRACE(0x100,pParse,p,
+ if( sqlite3TreeTrace & 0x4000 ){
+ TREETRACE(0x4000,pParse,p,
("After WHERE-clause push-down into subquery %d:\n", pSub->selId));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
assert( pItem->pSelect && (pItem->pSelect->selFlags & SF_PushDown)!=0 );
}else{
- SELECTTRACE(0x100,pParse,p,("Push-down not possible\n"));
+ TREETRACE(0x4000,pParse,p,("Push-down not possible\n"));
}
zSavedAuthContext = pParse->zAuthContext;
pParse->zAuthContext = pItem->zName;
/* Generate code to implement the subquery
- **
- ** The subquery is implemented as a co-routine if all of the following are
- ** true:
- **
- ** (1) the subquery is guaranteed to be the outer loop (so that
- ** it does not need to be computed more than once), and
- ** (2) the subquery is not a CTE that should be materialized
- ** (3) the subquery is not part of a left operand for a RIGHT JOIN
*/
- if( i==0
- && (pTabList->nSrc==1
- || (pTabList->a[1].fg.jointype&(JT_OUTER|JT_CROSS))!=0) /* (1) */
- && (pItem->fg.isCte==0 || pItem->u2.pCteUse->eM10d!=M10d_Yes) /* (2) */
- && (pTabList->a[0].fg.jointype & JT_LTORJ)==0 /* (3) */
- ){
+ if( fromClauseTermCanBeCoroutine(pParse, pTabList, i, p->selFlags) ){
/* Implement a co-routine that will return a single row of the result
** set on each invocation.
*/
@@ -144409,7 +145656,7 @@ SQLITE_PRIVATE int sqlite3Select(
VdbeComment((v, "%!S", pItem));
}
pSub->nSelectRow = pCteUse->nRowEst;
- }else if( (pPrior = isSelfJoinView(pTabList, pItem))!=0 ){
+ }else if( (pPrior = isSelfJoinView(pTabList, pItem, 0, i))!=0 ){
/* This view has already been materialized by a prior entry in
** this same FROM clause. Reuse it. */
if( pPrior->addrFillSub ){
@@ -144423,6 +145670,9 @@ SQLITE_PRIVATE int sqlite3Select(
** the same view can reuse the materialization. */
int topAddr;
int onceAddr = 0;
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ int addrExplain;
+#endif
pItem->regReturn = ++pParse->nMem;
topAddr = sqlite3VdbeAddOp0(v, OP_Goto);
@@ -144438,15 +145688,14 @@ SQLITE_PRIVATE int sqlite3Select(
VdbeNoopComment((v, "materialize %!S", pItem));
}
sqlite3SelectDestInit(&dest, SRT_EphemTab, pItem->iCursor);
- ExplainQueryPlan((pParse, 1, "MATERIALIZE %!S", pItem));
- dest.zAffSdst = sqlite3TableAffinityStr(db, pItem->pTab);
+
+ ExplainQueryPlan2(addrExplain, (pParse, 1, "MATERIALIZE %!S", pItem));
sqlite3Select(pParse, pSub, &dest);
- sqlite3DbFree(db, dest.zAffSdst);
- dest.zAffSdst = 0;
pItem->pTab->nRowLogEst = pSub->nSelectRow;
if( onceAddr ) sqlite3VdbeJumpHere(v, onceAddr);
sqlite3VdbeAddOp2(v, OP_Return, pItem->regReturn, topAddr+1);
VdbeComment((v, "end %!S", pItem));
+ sqlite3VdbeScanStatusRange(v, addrExplain, addrExplain, -1);
sqlite3VdbeJumpHere(v, topAddr);
sqlite3ClearTempRegCache(pParse);
if( pItem->fg.isCte && pItem->fg.isCorrelated==0 ){
@@ -144472,8 +145721,8 @@ SQLITE_PRIVATE int sqlite3Select(
sDistinct.isTnct = (p->selFlags & SF_Distinct)!=0;
#if TREETRACE_ENABLED
- if( sqlite3TreeTrace & 0x400 ){
- SELECTTRACE(0x400,pParse,p,("After all FROM-clause analysis:\n"));
+ if( sqlite3TreeTrace & 0x8000 ){
+ TREETRACE(0x8000,pParse,p,("After all FROM-clause analysis:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
@@ -144509,8 +145758,8 @@ SQLITE_PRIVATE int sqlite3Select(
sDistinct.isTnct = 2;
#if TREETRACE_ENABLED
- if( sqlite3TreeTrace & 0x400 ){
- SELECTTRACE(0x400,pParse,p,("Transform DISTINCT into GROUP BY:\n"));
+ if( sqlite3TreeTrace & 0x20000 ){
+ TREETRACE(0x20000,pParse,p,("Transform DISTINCT into GROUP BY:\n"));
sqlite3TreeViewSelect(0, p, 0);
}
#endif
@@ -144596,7 +145845,7 @@ SQLITE_PRIVATE int sqlite3Select(
/* Begin the database scan. */
- SELECTTRACE(1,pParse,p,("WhereBegin\n"));
+ TREETRACE(0x2,pParse,p,("WhereBegin\n"));
pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, sSort.pOrderBy,
p->pEList, p, wctrlFlags, p->nSelectRow);
if( pWInfo==0 ) goto select_end;
@@ -144613,7 +145862,7 @@ SQLITE_PRIVATE int sqlite3Select(
sSort.pOrderBy = 0;
}
}
- SELECTTRACE(1,pParse,p,("WhereBegin returns\n"));
+ TREETRACE(0x2,pParse,p,("WhereBegin returns\n"));
/* If sorting index that was created by a prior OP_OpenEphemeral
** instruction ended up not being needed, then change the OP_OpenEphemeral
@@ -144652,7 +145901,7 @@ SQLITE_PRIVATE int sqlite3Select(
/* End the database scan loop.
*/
- SELECTTRACE(1,pParse,p,("WhereEnd\n"));
+ TREETRACE(0x2,pParse,p,("WhereEnd\n"));
sqlite3WhereEnd(pWInfo);
}
}else{
@@ -144733,12 +145982,14 @@ SQLITE_PRIVATE int sqlite3Select(
goto select_end;
}
pAggInfo->selId = p->selId;
+#ifdef SQLITE_DEBUG
+ pAggInfo->pSelect = p;
+#endif
memset(&sNC, 0, sizeof(sNC));
sNC.pParse = pParse;
sNC.pSrcList = pTabList;
sNC.uNC.pAggInfo = pAggInfo;
VVA_ONLY( sNC.ncFlags = NC_UAggInfo; )
- pAggInfo->mnReg = pParse->nMem+1;
pAggInfo->nSortingColumn = pGroupBy ? pGroupBy->nExpr : 0;
pAggInfo->pGroupBy = pGroupBy;
sqlite3ExprAnalyzeAggList(&sNC, pEList);
@@ -144759,45 +146010,17 @@ SQLITE_PRIVATE int sqlite3Select(
}else{
minMaxFlag = WHERE_ORDERBY_NORMAL;
}
- for(i=0; i<pAggInfo->nFunc; i++){
- Expr *pExpr = pAggInfo->aFunc[i].pFExpr;
- assert( ExprUseXList(pExpr) );
- sNC.ncFlags |= NC_InAggFunc;
- sqlite3ExprAnalyzeAggList(&sNC, pExpr->x.pList);
-#ifndef SQLITE_OMIT_WINDOWFUNC
- assert( !IsWindowFunc(pExpr) );
- if( ExprHasProperty(pExpr, EP_WinFunc) ){
- sqlite3ExprAnalyzeAggregates(&sNC, pExpr->y.pWin->pFilter);
- }
-#endif
- sNC.ncFlags &= ~NC_InAggFunc;
- }
- pAggInfo->mxReg = pParse->nMem;
+ analyzeAggFuncArgs(pAggInfo, &sNC);
if( db->mallocFailed ) goto select_end;
#if TREETRACE_ENABLED
- if( sqlite3TreeTrace & 0x400 ){
- int ii;
- SELECTTRACE(0x400,pParse,p,("After aggregate analysis %p:\n", pAggInfo));
+ if( sqlite3TreeTrace & 0x20 ){
+ TREETRACE(0x20,pParse,p,("After aggregate analysis %p:\n", pAggInfo));
sqlite3TreeViewSelect(0, p, 0);
if( minMaxFlag ){
sqlite3DebugPrintf("MIN/MAX Optimization (0x%02x) adds:\n", minMaxFlag);
sqlite3TreeViewExprList(0, pMinMaxOrderBy, 0, "ORDERBY");
}
- for(ii=0; ii<pAggInfo->nColumn; ii++){
- struct AggInfo_col *pCol = &pAggInfo->aCol[ii];
- sqlite3DebugPrintf(
- "agg-column[%d] pTab=%s iTable=%d iColumn=%d iMem=%d"
- " iSorterColumn=%d\n",
- ii, pCol->pTab ? pCol->pTab->zName : "NULL",
- pCol->iTable, pCol->iColumn, pCol->iMem,
- pCol->iSorterColumn);
- sqlite3TreeViewExpr(0, pAggInfo->aCol[ii].pCExpr, 0);
- }
- for(ii=0; ii<pAggInfo->nFunc; ii++){
- sqlite3DebugPrintf("agg-func[%d]: iMem=%d\n",
- ii, pAggInfo->aFunc[ii].iMem);
- sqlite3TreeViewExpr(0, pAggInfo->aFunc[ii].pFExpr, 0);
- }
+ printAggInfo(pAggInfo);
}
#endif
@@ -144866,7 +146089,7 @@ SQLITE_PRIVATE int sqlite3Select(
** in the right order to begin with.
*/
sqlite3VdbeAddOp2(v, OP_Gosub, regReset, addrReset);
- SELECTTRACE(1,pParse,p,("WhereBegin\n"));
+ TREETRACE(0x2,pParse,p,("WhereBegin\n"));
pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pGroupBy, pDistinct,
p, (sDistinct.isTnct==2 ? WHERE_DISTINCTBY : WHERE_GROUPBY)
| (orderByGrp ? WHERE_SORTBYGROUP : 0) | distFlag, 0
@@ -144875,8 +146098,12 @@ SQLITE_PRIVATE int sqlite3Select(
sqlite3ExprListDelete(db, pDistinct);
goto select_end;
}
+ if( pParse->pIdxEpr ){
+ optimizeAggregateUseOfIndexedExpr(pParse, p, pAggInfo, &sNC);
+ }
+ assignAggregateRegisters(pParse, pAggInfo);
eDist = sqlite3WhereIsDistinct(pWInfo);
- SELECTTRACE(1,pParse,p,("WhereBegin returns\n"));
+ TREETRACE(0x2,pParse,p,("WhereBegin returns\n"));
if( sqlite3WhereIsOrdered(pWInfo)==pGroupBy->nExpr ){
/* The optimizer is able to deliver rows in group by order so
** we do not have to sort. The OP_OpenEphemeral table will be
@@ -144925,7 +146152,7 @@ SQLITE_PRIVATE int sqlite3Select(
sqlite3VdbeAddOp2(v, OP_SorterInsert, pAggInfo->sortingIdx, regRecord);
sqlite3ReleaseTempReg(pParse, regRecord);
sqlite3ReleaseTempRange(pParse, regBase, nCol);
- SELECTTRACE(1,pParse,p,("WhereEnd\n"));
+ TREETRACE(0x2,pParse,p,("WhereEnd\n"));
sqlite3WhereEnd(pWInfo);
pAggInfo->sortingIdxPTab = sortPTab = pParse->nTab++;
sortOut = sqlite3GetTempReg(pParse);
@@ -144935,6 +146162,23 @@ SQLITE_PRIVATE int sqlite3Select(
pAggInfo->useSortingIdx = 1;
}
+ /* If there are entries in pAgggInfo->aFunc[] that contain subexpressions
+ ** that are indexed (and that were previously identified and tagged
+ ** in optimizeAggregateUseOfIndexedExpr()) then those subexpressions
+ ** must now be converted into a TK_AGG_COLUMN node so that the value
+ ** is correctly pulled from the index rather than being recomputed. */
+ if( pParse->pIdxEpr ){
+ aggregateConvertIndexedExprRefToColumn(pAggInfo);
+#if TREETRACE_ENABLED
+ if( sqlite3TreeTrace & 0x20 ){
+ TREETRACE(0x20, pParse, p,
+ ("AggInfo function expressions converted to reference index\n"));
+ sqlite3TreeViewSelect(0, p, 0);
+ printAggInfo(pAggInfo);
+ }
+#endif
+ }
+
/* If the index or temporary table used by the GROUP BY sort
** will naturally deliver rows in the order required by the ORDER BY
** clause, cancel the ephemeral table open coded earlier.
@@ -145003,7 +146247,7 @@ SQLITE_PRIVATE int sqlite3Select(
sqlite3VdbeAddOp2(v, OP_SorterNext, pAggInfo->sortingIdx,addrTopOfLoop);
VdbeCoverage(v);
}else{
- SELECTTRACE(1,pParse,p,("WhereEnd\n"));
+ TREETRACE(0x2,pParse,p,("WhereEnd\n"));
sqlite3WhereEnd(pWInfo);
sqlite3VdbeChangeToNoop(v, addrSortingIdx);
}
@@ -145113,7 +146357,8 @@ SQLITE_PRIVATE int sqlite3Select(
if( pKeyInfo ){
sqlite3VdbeChangeP4(v, -1, (char *)pKeyInfo, P4_KEYINFO);
}
- sqlite3VdbeAddOp2(v, OP_Count, iCsr, pAggInfo->aFunc[0].iMem);
+ assignAggregateRegisters(pParse, pAggInfo);
+ sqlite3VdbeAddOp2(v, OP_Count, iCsr, AggInfoFuncReg(pAggInfo,0));
sqlite3VdbeAddOp1(v, OP_Close, iCsr);
explainSimpleCount(pParse, pTab, pBest);
}else{
@@ -145149,6 +146394,7 @@ SQLITE_PRIVATE int sqlite3Select(
pDistinct = pAggInfo->aFunc[0].pFExpr->x.pList;
distFlag = pDistinct ? (WHERE_WANT_DISTINCT|WHERE_AGG_DISTINCT) : 0;
}
+ assignAggregateRegisters(pParse, pAggInfo);
/* This case runs if the aggregate has no GROUP BY clause. The
** processing is much simpler since there is only a single row
@@ -145165,13 +146411,13 @@ SQLITE_PRIVATE int sqlite3Select(
assert( minMaxFlag==WHERE_ORDERBY_NORMAL || pMinMaxOrderBy!=0 );
assert( pMinMaxOrderBy==0 || pMinMaxOrderBy->nExpr==1 );
- SELECTTRACE(1,pParse,p,("WhereBegin\n"));
+ TREETRACE(0x2,pParse,p,("WhereBegin\n"));
pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMaxOrderBy,
pDistinct, p, minMaxFlag|distFlag, 0);
if( pWInfo==0 ){
goto select_end;
}
- SELECTTRACE(1,pParse,p,("WhereBegin returns\n"));
+ TREETRACE(0x2,pParse,p,("WhereBegin returns\n"));
eDist = sqlite3WhereIsDistinct(pWInfo);
updateAccumulator(pParse, regAcc, pAggInfo, eDist);
if( eDist!=WHERE_DISTINCT_NOOP ){
@@ -145185,7 +146431,7 @@ SQLITE_PRIVATE int sqlite3Select(
if( minMaxFlag ){
sqlite3WhereMinMaxOptEarlyOut(v, pWInfo);
}
- SELECTTRACE(1,pParse,p,("WhereEnd\n"));
+ TREETRACE(0x2,pParse,p,("WhereEnd\n"));
sqlite3WhereEnd(pWInfo);
finalizeAggFunctions(pParse, pAggInfo);
}
@@ -145207,8 +146453,6 @@ SQLITE_PRIVATE int sqlite3Select(
** and send them to the callback one by one.
*/
if( sSort.pOrderBy ){
- explainTempTable(pParse,
- sSort.nOBSat>0 ? "RIGHT PART OF ORDER BY":"ORDER BY");
assert( p->pEList==pEList );
generateSortTail(pParse, p, &sSort, pEList->nExpr, pDest);
}
@@ -145232,7 +146476,7 @@ select_end:
if( pAggInfo && !db->mallocFailed ){
for(i=0; i<pAggInfo->nColumn; i++){
Expr *pExpr = pAggInfo->aCol[i].pCExpr;
- assert( pExpr!=0 );
+ if( pExpr==0 ) continue;
assert( pExpr->pAggInfo==pAggInfo );
assert( pExpr->iAgg==i );
}
@@ -145246,8 +146490,8 @@ select_end:
#endif
#if TREETRACE_ENABLED
- SELECTTRACE(0x1,pParse,p,("end processing\n"));
- if( (sqlite3TreeTrace & 0x2000)!=0 && ExplainQueryPlanParent(pParse)==0 ){
+ TREETRACE(0x1,pParse,p,("end processing\n"));
+ if( (sqlite3TreeTrace & 0x40000)!=0 && ExplainQueryPlanParent(pParse)==0 ){
sqlite3TreeViewSelect(0, p, 0);
}
#endif
@@ -145662,6 +146906,7 @@ SQLITE_PRIVATE void sqlite3BeginTrigger(
}else{
assert( !db->init.busy );
sqlite3CodeVerifySchema(pParse, iDb);
+ VVA_ONLY( pParse->ifNotExists = 1; )
}
goto trigger_cleanup;
}
@@ -146443,7 +147688,7 @@ static void codeReturningTrigger(
}
sqlite3ExprListDelete(db, sSelect.pEList);
pNew = sqlite3ExpandReturning(pParse, pReturning->pReturnEL, pTab);
- if( !db->mallocFailed ){
+ if( pParse->nErr==0 ){
NameContext sNC;
memset(&sNC, 0, sizeof(sNC));
if( pReturning->nRetCol==0 ){
@@ -147201,7 +148446,8 @@ static void updateFromSelect(
}
}
pSelect = sqlite3SelectNew(pParse, pList,
- pSrc, pWhere2, pGrp, 0, pOrderBy2, SF_UFSrcCheck|SF_IncludeHidden, pLimit2
+ pSrc, pWhere2, pGrp, 0, pOrderBy2,
+ SF_UFSrcCheck|SF_IncludeHidden|SF_UpdateFrom, pLimit2
);
if( pSelect ) pSelect->selFlags |= SF_OrderByReqd;
sqlite3SelectDestInit(&dest, eDest, iEph);
@@ -147664,12 +148910,22 @@ SQLITE_PRIVATE void sqlite3Update(
/* Begin the database scan.
**
** Do not consider a single-pass strategy for a multi-row update if
- ** there are any triggers or foreign keys to process, or rows may
- ** be deleted as a result of REPLACE conflict handling. Any of these
- ** things might disturb a cursor being used to scan through the table
- ** or index, causing a single-pass approach to malfunction. */
+ ** there is anything that might disrupt the cursor being used to do
+ ** the UPDATE:
+ ** (1) This is a nested UPDATE
+ ** (2) There are triggers
+ ** (3) There are FOREIGN KEY constraints
+ ** (4) There are REPLACE conflict handlers
+ ** (5) There are subqueries in the WHERE clause
+ */
flags = WHERE_ONEPASS_DESIRED;
- if( !pParse->nested && !pTrigger && !hasFK && !chngKey && !bReplace ){
+ if( !pParse->nested
+ && !pTrigger
+ && !hasFK
+ && !chngKey
+ && !bReplace
+ && (sNC.ncFlags & NC_Subquery)==0
+ ){
flags |= WHERE_ONEPASS_MULTIROW;
}
pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere,0,0,0,flags,iIdxCur);
@@ -149235,10 +150491,10 @@ SQLITE_PRIVATE void sqlite3VtabUnlock(VTable *pVTab){
pVTab->nRef--;
if( pVTab->nRef==0 ){
sqlite3_vtab *p = pVTab->pVtab;
- sqlite3VtabModuleUnref(pVTab->db, pVTab->pMod);
if( p ){
p->pModule->xDisconnect(p);
}
+ sqlite3VtabModuleUnref(pVTab->db, pVTab->pMod);
sqlite3DbFree(db, pVTab);
}
}
@@ -149634,7 +150890,9 @@ static int vtabCallConstructor(
sCtx.pPrior = db->pVtabCtx;
sCtx.bDeclared = 0;
db->pVtabCtx = &sCtx;
+ pTab->nTabRef++;
rc = xConstruct(db, pMod->pAux, nArg, azArg, &pVTable->pVtab, &zErr);
+ sqlite3DeleteTable(db, pTab);
db->pVtabCtx = sCtx.pPrior;
if( rc==SQLITE_NOMEM ) sqlite3OomFault(db);
assert( sCtx.pTab==pTab );
@@ -151028,6 +152286,7 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(Parse*, SrcItem*, WhereClause*);
#define WHERE_SELFCULL 0x00800000 /* nOut reduced by extra WHERE terms */
#define WHERE_OMIT_OFFSET 0x01000000 /* Set offset counter to zero */
#define WHERE_VIEWSCAN 0x02000000 /* A full-scan of a VIEW or subquery */
+#define WHERE_EXPRIDX 0x04000000 /* Uses an index-on-expressions */
#endif /* !defined(SQLITE_WHEREINT_H) */
@@ -151284,6 +152543,8 @@ SQLITE_PRIVATE int sqlite3WhereExplainBloomFilter(
zMsg = sqlite3StrAccumFinish(&str);
ret = sqlite3VdbeAddOp4(v, OP_Explain, sqlite3VdbeCurrentAddr(v),
pParse->addrExplain, 0, zMsg,P4_DYNAMIC);
+
+ sqlite3VdbeScanStatus(v, sqlite3VdbeCurrentAddr(v)-1, 0, 0, 0, 0);
return ret;
}
#endif /* SQLITE_OMIT_EXPLAIN */
@@ -151306,14 +152567,27 @@ SQLITE_PRIVATE void sqlite3WhereAddScanStatus(
){
const char *zObj = 0;
WhereLoop *pLoop = pLvl->pWLoop;
- if( (pLoop->wsFlags & WHERE_VIRTUALTABLE)==0 && pLoop->u.btree.pIndex!=0 ){
+ int wsFlags = pLoop->wsFlags;
+ int viaCoroutine = 0;
+
+ if( (wsFlags & WHERE_VIRTUALTABLE)==0 && pLoop->u.btree.pIndex!=0 ){
zObj = pLoop->u.btree.pIndex->zName;
}else{
zObj = pSrclist->a[pLvl->iFrom].zName;
+ viaCoroutine = pSrclist->a[pLvl->iFrom].fg.viaCoroutine;
}
sqlite3VdbeScanStatus(
v, addrExplain, pLvl->addrBody, pLvl->addrVisit, pLoop->nOut, zObj
);
+
+ if( viaCoroutine==0 ){
+ if( (wsFlags & (WHERE_MULTI_OR|WHERE_AUTO_INDEX))==0 ){
+ sqlite3VdbeScanStatusRange(v, addrExplain, -1, pLvl->iTabCur);
+ }
+ if( wsFlags & WHERE_INDEXED ){
+ sqlite3VdbeScanStatusRange(v, addrExplain, -1, pLvl->iIdxCur);
+ }
+ }
}
#endif
@@ -151373,7 +152647,7 @@ static void disableTerm(WhereLevel *pLevel, WhereTerm *pTerm){
pTerm->wtFlags |= TERM_CODED;
}
#ifdef WHERETRACE_ENABLED
- if( sqlite3WhereTrace & 0x20000 ){
+ if( (sqlite3WhereTrace & 0x4001)==0x4001 ){
sqlite3DebugPrintf("DISABLE-");
sqlite3WhereTermPrint(pTerm, (int)(pTerm - (pTerm->pWC->a)));
}
@@ -151488,68 +152762,75 @@ static Expr *removeUnindexableInClauseTerms(
Expr *pX /* The IN expression to be reduced */
){
sqlite3 *db = pParse->db;
+ Select *pSelect; /* Pointer to the SELECT on the RHS */
Expr *pNew;
pNew = sqlite3ExprDup(db, pX, 0);
if( db->mallocFailed==0 ){
- ExprList *pOrigRhs; /* Original unmodified RHS */
- ExprList *pOrigLhs; /* Original unmodified LHS */
- ExprList *pRhs = 0; /* New RHS after modifications */
- ExprList *pLhs = 0; /* New LHS after mods */
- int i; /* Loop counter */
- Select *pSelect; /* Pointer to the SELECT on the RHS */
-
- assert( ExprUseXSelect(pNew) );
- pOrigRhs = pNew->x.pSelect->pEList;
- assert( pNew->pLeft!=0 );
- assert( ExprUseXList(pNew->pLeft) );
- pOrigLhs = pNew->pLeft->x.pList;
- for(i=iEq; i<pLoop->nLTerm; i++){
- if( pLoop->aLTerm[i]->pExpr==pX ){
- int iField;
- assert( (pLoop->aLTerm[i]->eOperator & (WO_OR|WO_AND))==0 );
- iField = pLoop->aLTerm[i]->u.x.iField - 1;
- if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */
- pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr);
- pOrigRhs->a[iField].pExpr = 0;
- assert( pOrigLhs->a[iField].pExpr!=0 );
- pLhs = sqlite3ExprListAppend(pParse, pLhs, pOrigLhs->a[iField].pExpr);
- pOrigLhs->a[iField].pExpr = 0;
- }
- }
- sqlite3ExprListDelete(db, pOrigRhs);
- sqlite3ExprListDelete(db, pOrigLhs);
- pNew->pLeft->x.pList = pLhs;
- pNew->x.pSelect->pEList = pRhs;
- if( pLhs && pLhs->nExpr==1 ){
- /* Take care here not to generate a TK_VECTOR containing only a
- ** single value. Since the parser never creates such a vector, some
- ** of the subroutines do not handle this case. */
- Expr *p = pLhs->a[0].pExpr;
- pLhs->a[0].pExpr = 0;
- sqlite3ExprDelete(db, pNew->pLeft);
- pNew->pLeft = p;
- }
- pSelect = pNew->x.pSelect;
- if( pSelect->pOrderBy ){
- /* If the SELECT statement has an ORDER BY clause, zero the
- ** iOrderByCol variables. These are set to non-zero when an
- ** ORDER BY term exactly matches one of the terms of the
- ** result-set. Since the result-set of the SELECT statement may
- ** have been modified or reordered, these variables are no longer
- ** set correctly. Since setting them is just an optimization,
- ** it's easiest just to zero them here. */
- ExprList *pOrderBy = pSelect->pOrderBy;
- for(i=0; i<pOrderBy->nExpr; i++){
- pOrderBy->a[i].u.x.iOrderByCol = 0;
+ for(pSelect=pNew->x.pSelect; pSelect; pSelect=pSelect->pPrior){
+ ExprList *pOrigRhs; /* Original unmodified RHS */
+ ExprList *pOrigLhs = 0; /* Original unmodified LHS */
+ ExprList *pRhs = 0; /* New RHS after modifications */
+ ExprList *pLhs = 0; /* New LHS after mods */
+ int i; /* Loop counter */
+
+ assert( ExprUseXSelect(pNew) );
+ pOrigRhs = pSelect->pEList;
+ assert( pNew->pLeft!=0 );
+ assert( ExprUseXList(pNew->pLeft) );
+ if( pSelect==pNew->x.pSelect ){
+ pOrigLhs = pNew->pLeft->x.pList;
+ }
+ for(i=iEq; i<pLoop->nLTerm; i++){
+ if( pLoop->aLTerm[i]->pExpr==pX ){
+ int iField;
+ assert( (pLoop->aLTerm[i]->eOperator & (WO_OR|WO_AND))==0 );
+ iField = pLoop->aLTerm[i]->u.x.iField - 1;
+ if( pOrigRhs->a[iField].pExpr==0 ) continue; /* Duplicate PK column */
+ pRhs = sqlite3ExprListAppend(pParse, pRhs, pOrigRhs->a[iField].pExpr);
+ pOrigRhs->a[iField].pExpr = 0;
+ if( pOrigLhs ){
+ assert( pOrigLhs->a[iField].pExpr!=0 );
+ pLhs = sqlite3ExprListAppend(pParse,pLhs,pOrigLhs->a[iField].pExpr);
+ pOrigLhs->a[iField].pExpr = 0;
+ }
+ }
+ }
+ sqlite3ExprListDelete(db, pOrigRhs);
+ if( pOrigLhs ){
+ sqlite3ExprListDelete(db, pOrigLhs);
+ pNew->pLeft->x.pList = pLhs;
+ }
+ pSelect->pEList = pRhs;
+ if( pLhs && pLhs->nExpr==1 ){
+ /* Take care here not to generate a TK_VECTOR containing only a
+ ** single value. Since the parser never creates such a vector, some
+ ** of the subroutines do not handle this case. */
+ Expr *p = pLhs->a[0].pExpr;
+ pLhs->a[0].pExpr = 0;
+ sqlite3ExprDelete(db, pNew->pLeft);
+ pNew->pLeft = p;
+ }
+ if( pSelect->pOrderBy ){
+ /* If the SELECT statement has an ORDER BY clause, zero the
+ ** iOrderByCol variables. These are set to non-zero when an
+ ** ORDER BY term exactly matches one of the terms of the
+ ** result-set. Since the result-set of the SELECT statement may
+ ** have been modified or reordered, these variables are no longer
+ ** set correctly. Since setting them is just an optimization,
+ ** it's easiest just to zero them here. */
+ ExprList *pOrderBy = pSelect->pOrderBy;
+ for(i=0; i<pOrderBy->nExpr; i++){
+ pOrderBy->a[i].u.x.iOrderByCol = 0;
+ }
}
- }
#if 0
- printf("For indexing, change the IN expr:\n");
- sqlite3TreeViewExpr(0, pX, 0);
- printf("Into:\n");
- sqlite3TreeViewExpr(0, pNew, 0);
+ printf("For indexing, change the IN expr:\n");
+ sqlite3TreeViewExpr(0, pX, 0);
+ printf("Into:\n");
+ sqlite3TreeViewExpr(0, pNew, 0);
#endif
+ }
}
return pNew;
}
@@ -152360,13 +153641,15 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
pLevel->notReady = notReady & ~sqlite3WhereGetMask(&pWInfo->sMaskSet, iCur);
bRev = (pWInfo->revMask>>iLevel)&1;
VdbeModuleComment((v, "Begin WHERE-loop%d: %s",iLevel,pTabItem->pTab->zName));
-#if WHERETRACE_ENABLED /* 0x20800 */
- if( sqlite3WhereTrace & 0x800 ){
+#if WHERETRACE_ENABLED /* 0x4001 */
+ if( sqlite3WhereTrace & 0x1 ){
sqlite3DebugPrintf("Coding level %d of %d: notReady=%llx iFrom=%d\n",
iLevel, pWInfo->nLevel, (u64)notReady, pLevel->iFrom);
- sqlite3WhereLoopPrint(pLoop, pWC);
+ if( sqlite3WhereTrace & 0x1000 ){
+ sqlite3WhereLoopPrint(pLoop, pWC);
+ }
}
- if( sqlite3WhereTrace & 0x20000 ){
+ if( (sqlite3WhereTrace & 0x4001)==0x4001 ){
if( iLevel==0 ){
sqlite3DebugPrintf("WHERE clause being coded:\n");
sqlite3TreeViewExpr(0, pWInfo->pWhere, 0);
@@ -153290,7 +154573,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
}
/* Loop through table entries that match term pOrTerm. */
ExplainQueryPlan((pParse, 1, "INDEX %d", ii+1));
- WHERETRACE(0xffff, ("Subplan for OR-clause:\n"));
+ WHERETRACE(0xffffffff, ("Subplan for OR-clause:\n"));
pSubWInfo = sqlite3WhereBegin(pParse, pOrTab, pOrExpr, 0, 0, 0,
WHERE_OR_SUBCLAUSE, iCovCur);
assert( pSubWInfo || pParse->nErr );
@@ -153527,12 +154810,12 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
}
#endif
}
-#ifdef WHERETRACE_ENABLED /* 0xffff */
+#ifdef WHERETRACE_ENABLED /* 0xffffffff */
if( sqlite3WhereTrace ){
VdbeNoopComment((v, "WhereTerm[%d] (%p) priority=%d",
pWC->nTerm-j, pTerm, iLoop));
}
- if( sqlite3WhereTrace & 0x800 ){
+ if( sqlite3WhereTrace & 0x4000 ){
sqlite3DebugPrintf("Coding auxiliary constraint:\n");
sqlite3WhereTermPrint(pTerm, pWC->nTerm-j);
}
@@ -153561,8 +154844,8 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
if( pTerm->leftCursor!=iCur ) continue;
if( pTabItem->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT) ) continue;
pE = pTerm->pExpr;
-#ifdef WHERETRACE_ENABLED /* 0x800 */
- if( sqlite3WhereTrace & 0x800 ){
+#ifdef WHERETRACE_ENABLED /* 0x4001 */
+ if( (sqlite3WhereTrace & 0x4001)==0x4001 ){
sqlite3DebugPrintf("Coding transitive constraint:\n");
sqlite3WhereTermPrint(pTerm, pWC->nTerm-j);
}
@@ -153677,13 +154960,13 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
}
}
-#if WHERETRACE_ENABLED /* 0x20800 */
- if( sqlite3WhereTrace & 0x20000 ){
+#if WHERETRACE_ENABLED /* 0x4001 */
+ if( sqlite3WhereTrace & 0x4000 ){
sqlite3DebugPrintf("All WHERE-clause terms after coding level %d:\n",
iLevel);
sqlite3WhereClausePrint(pWC);
}
- if( sqlite3WhereTrace & 0x800 ){
+ if( sqlite3WhereTrace & 0x1 ){
sqlite3DebugPrintf("End Coding level %d: notReady=%llx\n",
iLevel, (u64)pLevel->notReady);
}
@@ -154759,36 +156042,40 @@ static Bitmask exprSelectUsage(WhereMaskSet *pMaskSet, Select *pS){
*/
static SQLITE_NOINLINE int exprMightBeIndexed2(
SrcList *pFrom, /* The FROM clause */
- Bitmask mPrereq, /* Bitmask of FROM clause terms referenced by pExpr */
int *aiCurCol, /* Write the referenced table cursor and column here */
- Expr *pExpr /* An operand of a comparison operator */
+ Expr *pExpr, /* An operand of a comparison operator */
+ int j /* Start looking with the j-th pFrom entry */
){
Index *pIdx;
int i;
int iCur;
- for(i=0; mPrereq>1; i++, mPrereq>>=1){}
- iCur = pFrom->a[i].iCursor;
- for(pIdx=pFrom->a[i].pTab->pIndex; pIdx; pIdx=pIdx->pNext){
- if( pIdx->aColExpr==0 ) continue;
- for(i=0; i<pIdx->nKeyCol; i++){
- if( pIdx->aiColumn[i]!=XN_EXPR ) continue;
- assert( pIdx->bHasExpr );
- if( sqlite3ExprCompareSkip(pExpr, pIdx->aColExpr->a[i].pExpr, iCur)==0 ){
- aiCurCol[0] = iCur;
- aiCurCol[1] = XN_EXPR;
- return 1;
+ do{
+ iCur = pFrom->a[j].iCursor;
+ for(pIdx=pFrom->a[j].pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ if( pIdx->aColExpr==0 ) continue;
+ for(i=0; i<pIdx->nKeyCol; i++){
+ if( pIdx->aiColumn[i]!=XN_EXPR ) continue;
+ assert( pIdx->bHasExpr );
+ if( sqlite3ExprCompareSkip(pExpr,pIdx->aColExpr->a[i].pExpr,iCur)==0
+ && pExpr->op!=TK_STRING
+ ){
+ aiCurCol[0] = iCur;
+ aiCurCol[1] = XN_EXPR;
+ return 1;
+ }
}
}
- }
+ }while( ++j < pFrom->nSrc );
return 0;
}
static int exprMightBeIndexed(
SrcList *pFrom, /* The FROM clause */
- Bitmask mPrereq, /* Bitmask of FROM clause terms referenced by pExpr */
int *aiCurCol, /* Write the referenced table cursor & column here */
Expr *pExpr, /* An operand of a comparison operator */
int op /* The specific comparison operator */
){
+ int i;
+
/* If this expression is a vector to the left or right of a
** inequality constraint (>, <, >= or <=), perform the processing
** on the first element of the vector. */
@@ -154798,7 +156085,6 @@ static int exprMightBeIndexed(
if( pExpr->op==TK_VECTOR && (op>=TK_GT && ALWAYS(op<=TK_GE)) ){
assert( ExprUseXList(pExpr) );
pExpr = pExpr->x.pList->a[0].pExpr;
-
}
if( pExpr->op==TK_COLUMN ){
@@ -154806,9 +156092,16 @@ static int exprMightBeIndexed(
aiCurCol[1] = pExpr->iColumn;
return 1;
}
- if( mPrereq==0 ) return 0; /* No table references */
- if( (mPrereq&(mPrereq-1))!=0 ) return 0; /* Refs more than one table */
- return exprMightBeIndexed2(pFrom,mPrereq,aiCurCol,pExpr);
+
+ for(i=0; i<pFrom->nSrc; i++){
+ Index *pIdx;
+ for(pIdx=pFrom->a[i].pTab->pIndex; pIdx; pIdx=pIdx->pNext){
+ if( pIdx->aColExpr ){
+ return exprMightBeIndexed2(pFrom,aiCurCol,pExpr,i);
+ }
+ }
+ }
+ return 0;
}
@@ -154934,7 +156227,7 @@ static void exprAnalyze(
pLeft = pLeft->x.pList->a[pTerm->u.x.iField-1].pExpr;
}
- if( exprMightBeIndexed(pSrc, prereqLeft, aiCurCol, pLeft, op) ){
+ if( exprMightBeIndexed(pSrc, aiCurCol, pLeft, op) ){
pTerm->leftCursor = aiCurCol[0];
assert( (pTerm->eOperator & (WO_OR|WO_AND))==0 );
pTerm->u.x.leftColumn = aiCurCol[1];
@@ -154942,7 +156235,7 @@ static void exprAnalyze(
}
if( op==TK_IS ) pTerm->wtFlags |= TERM_IS;
if( pRight
- && exprMightBeIndexed(pSrc, pTerm->prereqRight, aiCurCol, pRight, op)
+ && exprMightBeIndexed(pSrc, aiCurCol, pRight, op)
&& !ExprHasProperty(pRight, EP_FixedCol)
){
WhereTerm *pNew;
@@ -155153,7 +156446,6 @@ static void exprAnalyze(
transferJoinMarkings(pNewExpr1, pExpr);
idxNew1 = whereClauseInsert(pWC, pNewExpr1, wtFlags);
testcase( idxNew1==0 );
- exprAnalyze(pSrc, pWC, idxNew1);
pNewExpr2 = sqlite3ExprDup(db, pLeft, 0);
pNewExpr2 = sqlite3PExpr(pParse, TK_LT,
sqlite3ExprAddCollateString(pParse,pNewExpr2,zCollSeqName),
@@ -155161,6 +156453,7 @@ static void exprAnalyze(
transferJoinMarkings(pNewExpr2, pExpr);
idxNew2 = whereClauseInsert(pWC, pNewExpr2, wtFlags);
testcase( idxNew2==0 );
+ exprAnalyze(pSrc, pWC, idxNew1);
exprAnalyze(pSrc, pWC, idxNew2);
pTerm = &pWC->a[idxTerm];
if( isComplete ){
@@ -155217,7 +156510,7 @@ static void exprAnalyze(
&& pTerm->u.x.iField==0
&& pExpr->pLeft->op==TK_VECTOR
&& ALWAYS( ExprUseXSelect(pExpr) )
- && pExpr->x.pSelect->pPrior==0
+ && (pExpr->x.pSelect->pPrior==0 || (pExpr->x.pSelect->selFlags & SF_Values))
#ifndef SQLITE_OMIT_WINDOWFUNC
&& pExpr->x.pSelect->pWin==0
#endif
@@ -155405,6 +156698,13 @@ SQLITE_PRIVATE void SQLITE_NOINLINE sqlite3WhereAddLimit(WhereClause *pWC, Selec
assert( pWC->a[ii].eOperator==WO_ROWVAL );
continue;
}
+ if( pWC->a[ii].nChild ){
+ /* If this term has child terms, then they are also part of the
+ ** pWC->a[] array. So this term can be ignored, as a LIMIT clause
+ ** will only be added if each of the child terms passes the
+ ** (leftCursor==iCsr) test below. */
+ continue;
+ }
if( pWC->a[ii].leftCursor!=iCsr ) return;
}
@@ -155624,7 +156924,7 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(
pRhs = sqlite3PExpr(pParse, TK_UPLUS,
sqlite3ExprDup(pParse->db, pArgs->a[j].pExpr, 0), 0);
pTerm = sqlite3PExpr(pParse, TK_EQ, pColRef, pRhs);
- if( pItem->fg.jointype & (JT_LEFT|JT_LTORJ) ){
+ if( pItem->fg.jointype & (JT_LEFT|JT_LTORJ|JT_RIGHT) ){
joinType = EP_OuterON;
}else{
joinType = EP_InnerON;
@@ -156343,7 +157643,7 @@ static void translateColumnToCopy(
#if !defined(SQLITE_OMIT_VIRTUALTABLE) && defined(WHERETRACE_ENABLED)
static void whereTraceIndexInfoInputs(sqlite3_index_info *p){
int i;
- if( !sqlite3WhereTrace ) return;
+ if( (sqlite3WhereTrace & 0x10)==0 ) return;
for(i=0; i<p->nConstraint; i++){
sqlite3DebugPrintf(
" constraint[%d]: col=%d termid=%d op=%d usabled=%d collseq=%s\n",
@@ -156363,7 +157663,7 @@ static void whereTraceIndexInfoInputs(sqlite3_index_info *p){
}
static void whereTraceIndexInfoOutputs(sqlite3_index_info *p){
int i;
- if( !sqlite3WhereTrace ) return;
+ if( (sqlite3WhereTrace & 0x10)==0 ) return;
for(i=0; i<p->nConstraint; i++){
sqlite3DebugPrintf(" usage[%d]: argvIdx=%d omit=%d\n",
i,
@@ -156450,6 +157750,57 @@ static int termCanDriveIndex(
#ifndef SQLITE_OMIT_AUTOMATIC_INDEX
+
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+/*
+** Argument pIdx represents an automatic index that the current statement
+** will create and populate. Add an OP_Explain with text of the form:
+**
+** CREATE AUTOMATIC INDEX ON <table>(<cols>) [WHERE <expr>]
+**
+** This is only required if sqlite3_stmt_scanstatus() is enabled, to
+** associate an SQLITE_SCANSTAT_NCYCLE and SQLITE_SCANSTAT_NLOOP
+** values with. In order to avoid breaking legacy code and test cases,
+** the OP_Explain is not added if this is an EXPLAIN QUERY PLAN command.
+*/
+static void explainAutomaticIndex(
+ Parse *pParse,
+ Index *pIdx, /* Automatic index to explain */
+ int bPartial, /* True if pIdx is a partial index */
+ int *pAddrExplain /* OUT: Address of OP_Explain */
+){
+ if( pParse->explain!=2 ){
+ Table *pTab = pIdx->pTable;
+ const char *zSep = "";
+ char *zText = 0;
+ int ii = 0;
+ sqlite3_str *pStr = sqlite3_str_new(pParse->db);
+ sqlite3_str_appendf(pStr,"CREATE AUTOMATIC INDEX ON %s(", pTab->zName);
+ assert( pIdx->nColumn>1 );
+ assert( pIdx->aiColumn[pIdx->nColumn-1]==XN_ROWID );
+ for(ii=0; ii<(pIdx->nColumn-1); ii++){
+ const char *zName = 0;
+ int iCol = pIdx->aiColumn[ii];
+
+ zName = pTab->aCol[iCol].zCnName;
+ sqlite3_str_appendf(pStr, "%s%s", zSep, zName);
+ zSep = ", ";
+ }
+ zText = sqlite3_str_finish(pStr);
+ if( zText==0 ){
+ sqlite3OomFault(pParse->db);
+ }else{
+ *pAddrExplain = sqlite3VdbeExplain(
+ pParse, 0, "%s)%s", zText, (bPartial ? " WHERE <expr>" : "")
+ );
+ sqlite3_free(zText);
+ }
+ }
+}
+#else
+# define explainAutomaticIndex(a,b,c,d)
+#endif
+
/*
** Generate code to construct the Index object for an automatic index
** and to set up the WhereLevel object pLevel so that the code generator
@@ -156485,6 +157836,9 @@ static SQLITE_NOINLINE void constructAutomaticIndex(
SrcItem *pTabItem; /* FROM clause term being indexed */
int addrCounter = 0; /* Address where integer counter is initialized */
int regBase; /* Array of registers where record is assembled */
+#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
+ int addrExp = 0; /* Address of OP_Explain */
+#endif
/* Generate code to skip over the creation and initialization of the
** transient index on 2nd and subsequent iterations of the loop. */
@@ -156608,6 +157962,7 @@ static SQLITE_NOINLINE void constructAutomaticIndex(
pIdx->azColl[n] = sqlite3StrBINARY;
/* Create the automatic index */
+ explainAutomaticIndex(pParse, pIdx, pPartial!=0, &addrExp);
assert( pLevel->iIdxCur>=0 );
pLevel->iIdxCur = pParse->nTab++;
sqlite3VdbeAddOp2(v, OP_OpenAutoindex, pLevel->iIdxCur, nKeyCol+1);
@@ -156643,6 +157998,7 @@ static SQLITE_NOINLINE void constructAutomaticIndex(
sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pLevel->regFilter, 0,
regBase, pLoop->u.btree.nEq);
}
+ sqlite3VdbeScanStatusCounters(v, addrExp, addrExp, sqlite3VdbeCurrentAddr(v));
sqlite3VdbeAddOp2(v, OP_IdxInsert, pLevel->iIdxCur, regRecord);
sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
if( pPartial ) sqlite3VdbeResolveLabel(v, iContinue);
@@ -156663,6 +158019,7 @@ static SQLITE_NOINLINE void constructAutomaticIndex(
/* Jump here when skipping the initialization */
sqlite3VdbeJumpHere(v, addrInit);
+ sqlite3VdbeScanStatusRange(v, addrExp, addrExp, -1);
end_auto_index_create:
sqlite3ExprDelete(pParse->db, pPartial);
@@ -156704,6 +158061,10 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter(
Vdbe *v = pParse->pVdbe; /* VDBE under construction */
WhereLoop *pLoop = pLevel->pWLoop; /* The loop being coded */
int iCur; /* Cursor for table getting the filter */
+ IndexedExpr *saved_pIdxEpr; /* saved copy of Parse.pIdxEpr */
+
+ saved_pIdxEpr = pParse->pIdxEpr;
+ pParse->pIdxEpr = 0;
assert( pLoop!=0 );
assert( v!=0 );
@@ -156760,9 +158121,8 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter(
int r1 = sqlite3GetTempRange(pParse, n);
int jj;
for(jj=0; jj<n; jj++){
- int iCol = pIdx->aiColumn[jj];
assert( pIdx->pTable==pItem->pTab );
- sqlite3ExprCodeGetColumnOfTable(v, pIdx->pTable, iCur, iCol,r1+jj);
+ sqlite3ExprCodeLoadIndexColumn(pParse, pIdx, iCur, jj, r1+jj);
}
sqlite3VdbeAddOp4Int(v, OP_FilterAdd, pLevel->regFilter, 0, r1, n);
sqlite3ReleaseTempRange(pParse, r1, n);
@@ -156793,6 +158153,7 @@ static SQLITE_NOINLINE void sqlite3ConstructBloomFilter(
}
}while( iLevel < pWInfo->nLevel );
sqlite3VdbeJumpHere(v, addrOnce);
+ pParse->pIdxEpr = saved_pIdxEpr;
}
@@ -157092,6 +158453,7 @@ static int whereKeyStats(
assert( pIdx->nSample>0 );
assert( pRec->nField>0 );
+
/* Do a binary search to find the first sample greater than or equal
** to pRec. If pRec contains a single field, the set of samples to search
** is simply the aSample[] array. If the samples in aSample[] contain more
@@ -157136,7 +158498,12 @@ static int whereKeyStats(
** it is extended to two fields. The duplicates that this creates do not
** cause any problems.
*/
- nField = MIN(pRec->nField, pIdx->nSample);
+ if( !HasRowid(pIdx->pTable) && IsPrimaryKeyIndex(pIdx) ){
+ nField = pIdx->nKeyCol;
+ }else{
+ nField = pIdx->nColumn;
+ }
+ nField = MIN(pRec->nField, nField);
iCol = 0;
iSample = pIdx->nSample * nField;
do{
@@ -157202,12 +158569,12 @@ static int whereKeyStats(
if( iCol>0 ){
pRec->nField = iCol;
assert( sqlite3VdbeRecordCompare(aSample[i].n, aSample[i].p, pRec)<=0
- || pParse->db->mallocFailed );
+ || pParse->db->mallocFailed || CORRUPT_DB );
}
if( i>0 ){
pRec->nField = nField;
assert( sqlite3VdbeRecordCompare(aSample[i-1].n, aSample[i-1].p, pRec)<0
- || pParse->db->mallocFailed );
+ || pParse->db->mallocFailed || CORRUPT_DB );
}
}
}
@@ -157380,7 +158747,7 @@ static int whereRangeSkipScanEst(
int nAdjust = (sqlite3LogEst(p->nSample) - sqlite3LogEst(nDiff));
pLoop->nOut -= nAdjust;
*pbDone = 1;
- WHERETRACE(0x10, ("range skip-scan regions: %u..%u adjust=%d est=%d\n",
+ WHERETRACE(0x20, ("range skip-scan regions: %u..%u adjust=%d est=%d\n",
nLower, nUpper, nAdjust*-1, pLoop->nOut));
}
@@ -157558,7 +158925,7 @@ static int whereRangeScanEst(
if( nNew<nOut ){
nOut = nNew;
}
- WHERETRACE(0x10, ("STAT4 range scan: %u..%u est=%d\n",
+ WHERETRACE(0x20, ("STAT4 range scan: %u..%u est=%d\n",
(u32)iLower, (u32)iUpper, nOut));
}
}else{
@@ -157591,7 +158958,7 @@ static int whereRangeScanEst(
if( nNew<nOut ) nOut = nNew;
#if defined(WHERETRACE_ENABLED)
if( pLoop->nOut>nOut ){
- WHERETRACE(0x10,("Range scan lowers nOut from %d to %d\n",
+ WHERETRACE(0x20,("Range scan lowers nOut from %d to %d\n",
pLoop->nOut, nOut));
}
#endif
@@ -157656,7 +159023,7 @@ static int whereEqualScanEst(
pBuilder->nRecValid = nEq;
whereKeyStats(pParse, p, pRec, 0, a);
- WHERETRACE(0x10,("equality scan regions %s(%d): %d\n",
+ WHERETRACE(0x20,("equality scan regions %s(%d): %d\n",
p->zName, nEq-1, (int)a[1]));
*pnRow = a[1];
@@ -157704,9 +159071,9 @@ static int whereInScanEst(
}
if( rc==SQLITE_OK ){
- if( nRowEst > nRow0 ) nRowEst = nRow0;
+ if( nRowEst > (tRowcnt)nRow0 ) nRowEst = nRow0;
*pnRow = nRowEst;
- WHERETRACE(0x10,("IN row estimate: est=%d\n", nRowEst));
+ WHERETRACE(0x20,("IN row estimate: est=%d\n", nRowEst));
}
assert( pBuilder->nRecValid==nRecValid );
return rc;
@@ -157815,7 +159182,7 @@ SQLITE_PRIVATE void sqlite3WhereLoopPrint(WhereLoop *p, WhereClause *pWC){
sqlite3DebugPrintf(" f %06x N %d", p->wsFlags, p->nLTerm);
}
sqlite3DebugPrintf(" cost %d,%d,%d\n", p->rSetup, p->rRun, p->nOut);
- if( p->nLTerm && (sqlite3WhereTrace & 0x100)!=0 ){
+ if( p->nLTerm && (sqlite3WhereTrace & 0x4000)!=0 ){
int i;
for(i=0; i<p->nLTerm; i++){
sqlite3WhereTermPrint(p->aLTerm[i], i);
@@ -158279,6 +159646,7 @@ static void whereLoopOutputAdjust(
if( pX->iParent>=0 && (&pWC->a[pX->iParent])==pTerm ) break;
}
if( j<0 ){
+ sqlite3ProgressCheck(pWC->pWInfo->pParse);
if( pLoop->maskSelf==pTerm->prereqAll ){
/* If there are extra terms in the WHERE clause not used by an index
** that depend only on the table being scanned, and that will tend to
@@ -158446,7 +159814,10 @@ static int whereLoopAddBtreeIndex(
WhereTerm *pTop = 0, *pBtm = 0; /* Top and bottom range constraints */
pNew = pBuilder->pNew;
- if( db->mallocFailed ) return SQLITE_NOMEM_BKPT;
+ assert( db->mallocFailed==0 || pParse->nErr>0 );
+ if( pParse->nErr ){
+ return pParse->rc;
+ }
WHERETRACE(0x800, ("BEGIN %s.addBtreeIdx(%s), nEq=%d, nSkip=%d, rRun=%d\n",
pProbe->pTable->zName,pProbe->zName,
pNew->u.btree.nEq, pNew->nSkip, pNew->rRun));
@@ -158693,7 +160064,7 @@ static int whereLoopAddBtreeIndex(
&& pNew->nOut+10 > pProbe->aiRowLogEst[0]
){
#if WHERETRACE_ENABLED /* 0x01 */
- if( sqlite3WhereTrace & 0x01 ){
+ if( sqlite3WhereTrace & 0x20 ){
sqlite3DebugPrintf(
"STAT4 determines term has low selectivity:\n");
sqlite3WhereTermPrint(pTerm, 999);
@@ -158730,9 +160101,17 @@ static int whereLoopAddBtreeIndex(
** seek only. Then, if this is a non-covering index, add the cost of
** visiting the rows in the main table. */
assert( pSrc->pTab->szTabRow>0 );
- rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow;
+ if( pProbe->idxType==SQLITE_IDXTYPE_IPK ){
+ /* The pProbe->szIdxRow is low for an IPK table since the interior
+ ** pages are small. Thuse szIdxRow gives a good estimate of seek cost.
+ ** But the leaf pages are full-size, so pProbe->szIdxRow would badly
+ ** under-estimate the scanning cost. */
+ rCostIdx = pNew->nOut + 16;
+ }else{
+ rCostIdx = pNew->nOut + 1 + (15*pProbe->szIdxRow)/pSrc->pTab->szTabRow;
+ }
pNew->rRun = sqlite3LogEstAdd(rLogSize, rCostIdx);
- if( (pNew->wsFlags & (WHERE_IDX_ONLY|WHERE_IPK))==0 ){
+ if( (pNew->wsFlags & (WHERE_IDX_ONLY|WHERE_IPK|WHERE_EXPRIDX))==0 ){
pNew->rRun = sqlite3LogEstAdd(pNew->rRun, pNew->nOut + 16);
}
ApplyCostMultiplier(pNew->rRun, pProbe->pTable->costMult);
@@ -158754,6 +160133,9 @@ static int whereLoopAddBtreeIndex(
&& (pNew->u.btree.nEq<pProbe->nKeyCol ||
pProbe->idxType!=SQLITE_IDXTYPE_PRIMARYKEY)
){
+ if( pNew->u.btree.nEq>3 ){
+ sqlite3ProgressCheck(pParse);
+ }
whereLoopAddBtreeIndex(pBuilder, pSrc, pProbe, nInMul+nIn);
}
pNew->nOut = saved_nOut;
@@ -158886,15 +160268,38 @@ static int whereUsablePartialIndex(
}
/*
+** pIdx is an index containing expressions. Check it see if any of the
+** expressions in the index match the pExpr expression.
+*/
+static int exprIsCoveredByIndex(
+ const Expr *pExpr,
+ const Index *pIdx,
+ int iTabCur
+){
+ int i;
+ for(i=0; i<pIdx->nColumn; i++){
+ if( pIdx->aiColumn[i]==XN_EXPR
+ && sqlite3ExprCompare(0, pExpr, pIdx->aColExpr->a[i].pExpr, iTabCur)==0
+ ){
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/*
** Structure passed to the whereIsCoveringIndex Walker callback.
*/
+typedef struct CoveringIndexCheck CoveringIndexCheck;
struct CoveringIndexCheck {
Index *pIdx; /* The index */
int iTabCur; /* Cursor number for the corresponding table */
+ u8 bExpr; /* Uses an indexed expression */
+ u8 bUnidx; /* Uses an unindexed column not within an indexed expr */
};
/*
-** Information passed in is pWalk->u.pCovIdxCk. Call is pCk.
+** Information passed in is pWalk->u.pCovIdxCk. Call it pCk.
**
** If the Expr node references the table with cursor pCk->iTabCur, then
** make sure that column is covered by the index pCk->pIdx. We know that
@@ -158906,71 +160311,103 @@ struct CoveringIndexCheck {
**
** If this node does not disprove that the index can be a covering index,
** then just return WRC_Continue, to continue the search.
+**
+** If pCk->pIdx contains indexed expressions and one of those expressions
+** matches pExpr, then prune the search.
*/
static int whereIsCoveringIndexWalkCallback(Walker *pWalk, Expr *pExpr){
- int i; /* Loop counter */
- const Index *pIdx; /* The index of interest */
- const i16 *aiColumn; /* Columns contained in the index */
- u16 nColumn; /* Number of columns in the index */
- if( pExpr->op!=TK_COLUMN && pExpr->op!=TK_AGG_COLUMN ) return WRC_Continue;
- if( pExpr->iColumn<(BMS-1) ) return WRC_Continue;
- if( pExpr->iTable!=pWalk->u.pCovIdxCk->iTabCur ) return WRC_Continue;
- pIdx = pWalk->u.pCovIdxCk->pIdx;
- aiColumn = pIdx->aiColumn;
- nColumn = pIdx->nColumn;
- for(i=0; i<nColumn; i++){
- if( aiColumn[i]==pExpr->iColumn ) return WRC_Continue;
- }
- pWalk->eCode = 1;
- return WRC_Abort;
+ int i; /* Loop counter */
+ const Index *pIdx; /* The index of interest */
+ const i16 *aiColumn; /* Columns contained in the index */
+ u16 nColumn; /* Number of columns in the index */
+ CoveringIndexCheck *pCk; /* Info about this search */
+
+ pCk = pWalk->u.pCovIdxCk;
+ pIdx = pCk->pIdx;
+ if( (pExpr->op==TK_COLUMN || pExpr->op==TK_AGG_COLUMN) ){
+ /* if( pExpr->iColumn<(BMS-1) && pIdx->bHasExpr==0 ) return WRC_Continue;*/
+ if( pExpr->iTable!=pCk->iTabCur ) return WRC_Continue;
+ pIdx = pWalk->u.pCovIdxCk->pIdx;
+ aiColumn = pIdx->aiColumn;
+ nColumn = pIdx->nColumn;
+ for(i=0; i<nColumn; i++){
+ if( aiColumn[i]==pExpr->iColumn ) return WRC_Continue;
+ }
+ pCk->bUnidx = 1;
+ return WRC_Abort;
+ }else if( pIdx->bHasExpr
+ && exprIsCoveredByIndex(pExpr, pIdx, pWalk->u.pCovIdxCk->iTabCur) ){
+ pCk->bExpr = 1;
+ return WRC_Prune;
+ }
+ return WRC_Continue;
}
/*
** pIdx is an index that covers all of the low-number columns used by
-** pWInfo->pSelect (columns from 0 through 62). But there are columns
-** in pWInfo->pSelect beyond 62. This routine tries to answer the question
-** of whether pIdx covers *all* columns in the query.
+** pWInfo->pSelect (columns from 0 through 62) or an index that has
+** expressions terms. Hence, we cannot determine whether or not it is
+** a covering index by using the colUsed bitmasks. We have to do a search
+** to see if the index is covering. This routine does that search.
**
-** Return 0 if pIdx is a covering index. Return non-zero if pIdx is
-** not a covering index or if we are unable to determine if pIdx is a
-** covering index.
+** The return value is one of these:
**
-** This routine is an optimization. It is always safe to return non-zero.
-** But returning zero when non-zero should have been returned can lead to
-** incorrect bytecode and assertion faults.
+** 0 The index is definitely not a covering index
+**
+** WHERE_IDX_ONLY The index is definitely a covering index
+**
+** WHERE_EXPRIDX The index is likely a covering index, but it is
+** difficult to determine precisely because of the
+** expressions that are indexed. Score it as a
+** covering index, but still keep the main table open
+** just in case we need it.
+**
+** This routine is an optimization. It is always safe to return zero.
+** But returning one of the other two values when zero should have been
+** returned can lead to incorrect bytecode and assertion faults.
*/
static SQLITE_NOINLINE u32 whereIsCoveringIndex(
WhereInfo *pWInfo, /* The WHERE clause context */
Index *pIdx, /* Index that is being tested */
int iTabCur /* Cursor for the table being indexed */
){
- int i;
+ int i, rc;
struct CoveringIndexCheck ck;
Walker w;
if( pWInfo->pSelect==0 ){
/* We don't have access to the full query, so we cannot check to see
** if pIdx is covering. Assume it is not. */
- return 1;
- }
- for(i=0; i<pIdx->nColumn; i++){
- if( pIdx->aiColumn[i]>=BMS-1 ) break;
+ return 0;
}
- if( i>=pIdx->nColumn ){
- /* pIdx does not index any columns greater than 62, but we know from
- ** colMask that columns greater than 62 are used, so this is not a
- ** covering index */
- return 1;
+ if( pIdx->bHasExpr==0 ){
+ for(i=0; i<pIdx->nColumn; i++){
+ if( pIdx->aiColumn[i]>=BMS-1 ) break;
+ }
+ if( i>=pIdx->nColumn ){
+ /* pIdx does not index any columns greater than 62, but we know from
+ ** colMask that columns greater than 62 are used, so this is not a
+ ** covering index */
+ return 0;
+ }
}
ck.pIdx = pIdx;
ck.iTabCur = iTabCur;
+ ck.bExpr = 0;
+ ck.bUnidx = 0;
memset(&w, 0, sizeof(w));
w.xExprCallback = whereIsCoveringIndexWalkCallback;
w.xSelectCallback = sqlite3SelectWalkNoop;
w.u.pCovIdxCk = &ck;
- w.eCode = 0;
sqlite3WalkSelect(&w, pWInfo->pSelect);
- return w.eCode;
+ if( ck.bUnidx ){
+ rc = 0;
+ }else if( ck.bExpr ){
+ rc = WHERE_EXPRIDX;
+ }else{
+ rc = WHERE_IDX_ONLY;
+ }
+ return rc;
}
/*
@@ -159055,7 +160492,7 @@ static int whereLoopAddBtree(
sPk.aiRowLogEst = aiRowEstPk;
sPk.onError = OE_Replace;
sPk.pTable = pTab;
- sPk.szIdxRow = pTab->szTabRow;
+ sPk.szIdxRow = 3; /* TUNING: Interior rows of IPK table are very small */
sPk.idxType = SQLITE_IDXTYPE_IPK;
aiRowEstPk[0] = pTab->nRowLogEst;
aiRowEstPk[1] = 0;
@@ -159106,7 +160543,8 @@ static int whereLoopAddBtree(
if( !IsView(pTab) && (pTab->tabFlags & TF_Ephemeral)==0 ){
pNew->rSetup += 28;
}else{
- pNew->rSetup -= 10;
+ pNew->rSetup -= 25; /* Greatly reduced setup cost for auto indexes
+ ** on ephemeral materializations of views */
}
ApplyCostMultiplier(pNew->rSetup, pTab->costMult);
if( pNew->rSetup<0 ) pNew->rSetup = 0;
@@ -159186,14 +160624,38 @@ static int whereLoopAddBtree(
}else{
Bitmask m;
if( pProbe->isCovering ){
- pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED;
m = 0;
+ pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED;
}else{
m = pSrc->colUsed & pProbe->colNotIdxed;
- if( m==TOPBIT ){
- m = whereIsCoveringIndex(pWInfo, pProbe, pSrc->iCursor);
+ pNew->wsFlags = WHERE_INDEXED;
+ if( m==TOPBIT || (pProbe->bHasExpr && !pProbe->bHasVCol && m!=0) ){
+ u32 isCov = whereIsCoveringIndex(pWInfo, pProbe, pSrc->iCursor);
+ if( isCov==0 ){
+ WHERETRACE(0x200,
+ ("-> %s is not a covering index"
+ " according to whereIsCoveringIndex()\n", pProbe->zName));
+ assert( m!=0 );
+ }else{
+ m = 0;
+ pNew->wsFlags |= isCov;
+ if( isCov & WHERE_IDX_ONLY ){
+ WHERETRACE(0x200,
+ ("-> %s is a covering expression index"
+ " according to whereIsCoveringIndex()\n", pProbe->zName));
+ }else{
+ assert( isCov==WHERE_EXPRIDX );
+ WHERETRACE(0x200,
+ ("-> %s might be a covering expression index"
+ " according to whereIsCoveringIndex()\n", pProbe->zName));
+ }
+ }
+ }else if( m==0 ){
+ WHERETRACE(0x200,
+ ("-> %s a covering index according to bitmasks\n",
+ pProbe->zName, m==0 ? "is" : "is not"));
+ pNew->wsFlags = WHERE_IDX_ONLY | WHERE_INDEXED;
}
- pNew->wsFlags = (m==0) ? (WHERE_IDX_ONLY|WHERE_INDEXED) : WHERE_INDEXED;
}
/* Full scan via index */
@@ -159366,7 +160828,7 @@ static int whereLoopAddVirtualOne(
** that the particular combination of parameters provided is unusable.
** Make no entries in the loop table.
*/
- WHERETRACE(0xffff, (" ^^^^--- non-viable plan rejected!\n"));
+ WHERETRACE(0xffffffff, (" ^^^^--- non-viable plan rejected!\n"));
return SQLITE_OK;
}
return rc;
@@ -159477,7 +160939,7 @@ static int whereLoopAddVirtualOne(
sqlite3_free(pNew->u.vtab.idxStr);
pNew->u.vtab.needFree = 0;
}
- WHERETRACE(0xffff, (" bIn=%d prereqIn=%04llx prereqOut=%04llx\n",
+ WHERETRACE(0xffffffff, (" bIn=%d prereqIn=%04llx prereqOut=%04llx\n",
*pbIn, (sqlite3_uint64)mPrereq,
(sqlite3_uint64)(pNew->prereq & ~mPrereq)));
@@ -159582,7 +161044,7 @@ SQLITE_API int sqlite3_vtab_distinct(sqlite3_index_info *pIdxInfo){
&& !defined(SQLITE_OMIT_VIRTUALTABLE)
/*
** Cause the prepared statement that is associated with a call to
-** xBestIndex to potentiall use all schemas. If the statement being
+** xBestIndex to potentially use all schemas. If the statement being
** prepared is read-only, then just start read transactions on all
** schemas. But if this is a write operation, start writes on all
** schemas.
@@ -159597,7 +161059,7 @@ SQLITE_PRIVATE void sqlite3VtabUsesAllSchemas(sqlite3_index_info *pIdxInfo){
for(i=0; i<nDb; i++){
sqlite3CodeVerifySchema(pParse, i);
}
- if( pParse->writeMask ){
+ if( DbMaskNonZero(pParse->writeMask) ){
for(i=0; i<nDb; i++){
sqlite3BeginWriteOperation(pParse, 0, i);
}
@@ -159669,7 +161131,7 @@ static int whereLoopAddVirtual(
/* First call xBestIndex() with all constraints usable. */
WHERETRACE(0x800, ("BEGIN %s.addVirtual()\n", pSrc->pTab->zName));
- WHERETRACE(0x40, (" VirtualOne: all usable\n"));
+ WHERETRACE(0x800, (" VirtualOne: all usable\n"));
rc = whereLoopAddVirtualOne(
pBuilder, mPrereq, ALLBITS, 0, p, mNoOmit, &bIn, &bRetry
);
@@ -159694,7 +161156,7 @@ static int whereLoopAddVirtual(
/* If the plan produced by the earlier call uses an IN(...) term, call
** xBestIndex again, this time with IN(...) terms disabled. */
if( bIn ){
- WHERETRACE(0x40, (" VirtualOne: all usable w/o IN\n"));
+ WHERETRACE(0x800, (" VirtualOne: all usable w/o IN\n"));
rc = whereLoopAddVirtualOne(
pBuilder, mPrereq, ALLBITS, WO_IN, p, mNoOmit, &bIn, 0);
assert( bIn==0 );
@@ -159720,7 +161182,7 @@ static int whereLoopAddVirtual(
mPrev = mNext;
if( mNext==ALLBITS ) break;
if( mNext==mBest || mNext==mBestNoIn ) continue;
- WHERETRACE(0x40, (" VirtualOne: mPrev=%04llx mNext=%04llx\n",
+ WHERETRACE(0x800, (" VirtualOne: mPrev=%04llx mNext=%04llx\n",
(sqlite3_uint64)mPrev, (sqlite3_uint64)mNext));
rc = whereLoopAddVirtualOne(
pBuilder, mPrereq, mNext|mPrereq, 0, p, mNoOmit, &bIn, 0);
@@ -159734,7 +161196,7 @@ static int whereLoopAddVirtual(
** that requires no source tables at all (i.e. one guaranteed to be
** usable), make a call here with all source tables disabled */
if( rc==SQLITE_OK && seenZero==0 ){
- WHERETRACE(0x40, (" VirtualOne: all disabled\n"));
+ WHERETRACE(0x800, (" VirtualOne: all disabled\n"));
rc = whereLoopAddVirtualOne(
pBuilder, mPrereq, mPrereq, 0, p, mNoOmit, &bIn, 0);
if( bIn==0 ) seenZeroNoIN = 1;
@@ -159744,7 +161206,7 @@ static int whereLoopAddVirtual(
** that requires no source tables at all and does not use an IN(...)
** operator, make a final call to obtain one here. */
if( rc==SQLITE_OK && seenZeroNoIN==0 ){
- WHERETRACE(0x40, (" VirtualOne: all disabled and w/o IN\n"));
+ WHERETRACE(0x800, (" VirtualOne: all disabled and w/o IN\n"));
rc = whereLoopAddVirtualOne(
pBuilder, mPrereq, mPrereq, WO_IN, p, mNoOmit, &bIn, 0);
}
@@ -159800,7 +161262,7 @@ static int whereLoopAddOr(
sSubBuild = *pBuilder;
sSubBuild.pOrSet = &sCur;
- WHERETRACE(0x200, ("Begin processing OR-clause %p\n", pTerm));
+ WHERETRACE(0x400, ("Begin processing OR-clause %p\n", pTerm));
for(pOrTerm=pOrWC->a; pOrTerm<pOrWCEnd; pOrTerm++){
if( (pOrTerm->eOperator & WO_AND)!=0 ){
sSubBuild.pWC = &pOrTerm->u.pAndInfo->wc;
@@ -159817,9 +161279,9 @@ static int whereLoopAddOr(
}
sCur.n = 0;
#ifdef WHERETRACE_ENABLED
- WHERETRACE(0x200, ("OR-term %d of %p has %d subterms:\n",
+ WHERETRACE(0x400, ("OR-term %d of %p has %d subterms:\n",
(int)(pOrTerm-pOrWC->a), pTerm, sSubBuild.pWC->nTerm));
- if( sqlite3WhereTrace & 0x400 ){
+ if( sqlite3WhereTrace & 0x20000 ){
sqlite3WhereClausePrint(sSubBuild.pWC);
}
#endif
@@ -159834,8 +161296,6 @@ static int whereLoopAddOr(
if( rc==SQLITE_OK ){
rc = whereLoopAddOr(&sSubBuild, mPrereq, mUnusable);
}
- assert( rc==SQLITE_OK || rc==SQLITE_DONE || sCur.n==0
- || rc==SQLITE_NOMEM );
testcase( rc==SQLITE_NOMEM && sCur.n>0 );
testcase( rc==SQLITE_DONE );
if( sCur.n==0 ){
@@ -159881,7 +161341,7 @@ static int whereLoopAddOr(
pNew->prereq = sSum.a[i].prereq;
rc = whereLoopInsert(pBuilder, pNew);
}
- WHERETRACE(0x200, ("End processing OR-clause %p\n", pTerm));
+ WHERETRACE(0x400, ("End processing OR-clause %p\n", pTerm));
}
}
return rc;
@@ -160229,8 +161689,8 @@ static i8 wherePathSatisfiesOrderBy(
if( pOBExpr->iTable!=iCur ) continue;
if( pOBExpr->iColumn!=iColumn ) continue;
}else{
- Expr *pIdxExpr = pIndex->aColExpr->a[j].pExpr;
- if( sqlite3ExprCompareSkip(pOBExpr, pIdxExpr, iCur) ){
+ Expr *pIxExpr = pIndex->aColExpr->a[j].pExpr;
+ if( sqlite3ExprCompareSkip(pOBExpr, pIxExpr, iCur) ){
continue;
}
}
@@ -160362,37 +161822,56 @@ static const char *wherePathName(WherePath *pPath, int nLoop, WhereLoop *pLast){
** order.
*/
static LogEst whereSortingCost(
- WhereInfo *pWInfo,
- LogEst nRow,
- int nOrderBy,
- int nSorted
+ WhereInfo *pWInfo, /* Query planning context */
+ LogEst nRow, /* Estimated number of rows to sort */
+ int nOrderBy, /* Number of ORDER BY clause terms */
+ int nSorted /* Number of initial ORDER BY terms naturally in order */
){
- /* TUNING: Estimated cost of a full external sort, where N is
+ /* Estimated cost of a full external sort, where N is
** the number of rows to sort is:
**
- ** cost = (3.0 * N * log(N)).
+ ** cost = (K * N * log(N)).
**
** Or, if the order-by clause has X terms but only the last Y
** terms are out of order, then block-sorting will reduce the
** sorting cost to:
**
- ** cost = (3.0 * N * log(N)) * (Y/X)
+ ** cost = (K * N * log(N)) * (Y/X)
**
- ** The (Y/X) term is implemented using stack variable rScale
- ** below.
+ ** The constant K is at least 2.0 but will be larger if there are a
+ ** large number of columns to be sorted, as the sorting time is
+ ** proportional to the amount of content to be sorted. The algorithm
+ ** does not currently distinguish between fat columns (BLOBs and TEXTs)
+ ** and skinny columns (INTs). It just uses the number of columns as
+ ** an approximation for the row width.
+ **
+ ** And extra factor of 2.0 or 3.0 is added to the sorting cost if the sort
+ ** is built using OP_IdxInsert and OP_Sort rather than with OP_SorterInsert.
*/
- LogEst rScale, rSortCost;
- assert( nOrderBy>0 && 66==sqlite3LogEst(100) );
- rScale = sqlite3LogEst((nOrderBy-nSorted)*100/nOrderBy) - 66;
- rSortCost = nRow + rScale + 16;
+ LogEst rSortCost, nCol;
+ assert( pWInfo->pSelect!=0 );
+ assert( pWInfo->pSelect->pEList!=0 );
+ /* TUNING: sorting cost proportional to the number of output columns: */
+ nCol = sqlite3LogEst((pWInfo->pSelect->pEList->nExpr+59)/30);
+ rSortCost = nRow + nCol;
+ if( nSorted>0 ){
+ /* Scale the result by (Y/X) */
+ rSortCost += sqlite3LogEst((nOrderBy-nSorted)*100/nOrderBy) - 66;
+ }
/* Multiple by log(M) where M is the number of output rows.
** Use the LIMIT for M if it is smaller. Or if this sort is for
** a DISTINCT operator, M will be the number of distinct output
** rows, so fudge it downwards a bit.
*/
- if( (pWInfo->wctrlFlags & WHERE_USE_LIMIT)!=0 && pWInfo->iLimit<nRow ){
- nRow = pWInfo->iLimit;
+ if( (pWInfo->wctrlFlags & WHERE_USE_LIMIT)!=0 ){
+ rSortCost += 10; /* TUNING: Extra 2.0x if using LIMIT */
+ if( nSorted!=0 ){
+ rSortCost += 6; /* TUNING: Extra 1.5x if also using partial sort */
+ }
+ if( pWInfo->iLimit<nRow ){
+ nRow = pWInfo->iLimit;
+ }
}else if( (pWInfo->wctrlFlags & WHERE_WANT_DISTINCT) ){
/* TUNING: In the sort for a DISTINCT operator, assume that the DISTINCT
** reduces the number of output rows by a factor of 2 */
@@ -160544,11 +162023,11 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
pWInfo, nRowEst, nOrderBy, isOrdered
);
}
- /* TUNING: Add a small extra penalty (5) to sorting as an
+ /* TUNING: Add a small extra penalty (3) to sorting as an
** extra encouragment to the query planner to select a plan
** where the rows emerge in the correct order without any sorting
** required. */
- rCost = sqlite3LogEstAdd(rUnsorted, aSortCost[isOrdered]) + 5;
+ rCost = sqlite3LogEstAdd(rUnsorted, aSortCost[isOrdered]) + 3;
WHERETRACE(0x002,
("---- sort cost=%-3d (%d/%d) increases cost %3d to %-3d\n",
@@ -160752,6 +162231,10 @@ static int wherePathSolver(WhereInfo *pWInfo, LogEst nRowEst){
if( pFrom->isOrdered==pWInfo->pOrderBy->nExpr ){
pWInfo->eDistinct = WHERE_DISTINCT_ORDERED;
}
+ if( pWInfo->pSelect->pOrderBy
+ && pWInfo->nOBSat > pWInfo->pSelect->pOrderBy->nExpr ){
+ pWInfo->nOBSat = pWInfo->pSelect->pOrderBy->nExpr;
+ }
}else{
pWInfo->revMask = pFrom->revLoop;
if( pWInfo->nOBSat<=0 ){
@@ -160896,7 +162379,7 @@ static int whereShortCut(WhereLoopBuilder *pBuilder){
pLoop->cId = '0';
#endif
#ifdef WHERETRACE_ENABLED
- if( sqlite3WhereTrace ){
+ if( sqlite3WhereTrace & 0x02 ){
sqlite3DebugPrintf("whereShortCut() used to compute solution\n");
}
#endif
@@ -161026,7 +162509,7 @@ static SQLITE_NOINLINE Bitmask whereOmitNoopJoin(
}
}
if( pTerm<pEnd ) continue;
- WHERETRACE(0xffff, ("-> drop loop %c not used\n", pLoop->cId));
+ WHERETRACE(0xffffffff, ("-> drop loop %c not used\n", pLoop->cId));
notReady &= ~pLoop->maskSelf;
for(pTerm=pWInfo->sWC.a; pTerm<pEnd; pTerm++){
if( (pTerm->prereqAll & pLoop->maskSelf)!=0 ){
@@ -161065,28 +162548,27 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful(
const WhereInfo *pWInfo
){
int i;
- LogEst nSearch;
+ LogEst nSearch = 0;
assert( pWInfo->nLevel>=2 );
assert( OptimizationEnabled(pWInfo->pParse->db, SQLITE_BloomFilter) );
- nSearch = pWInfo->a[0].pWLoop->nOut;
- for(i=1; i<pWInfo->nLevel; i++){
+ for(i=0; i<pWInfo->nLevel; i++){
WhereLoop *pLoop = pWInfo->a[i].pWLoop;
const unsigned int reqFlags = (WHERE_SELFCULL|WHERE_COLUMN_EQ);
- if( (pLoop->wsFlags & reqFlags)==reqFlags
+ SrcItem *pItem = &pWInfo->pTabList->a[pLoop->iTab];
+ Table *pTab = pItem->pTab;
+ if( (pTab->tabFlags & TF_HasStat1)==0 ) break;
+ pTab->tabFlags |= TF_StatsUsed;
+ if( i>=1
+ && (pLoop->wsFlags & reqFlags)==reqFlags
/* vvvvvv--- Always the case if WHERE_COLUMN_EQ is defined */
&& ALWAYS((pLoop->wsFlags & (WHERE_IPK|WHERE_INDEXED))!=0)
){
- SrcItem *pItem = &pWInfo->pTabList->a[pLoop->iTab];
- Table *pTab = pItem->pTab;
- pTab->tabFlags |= TF_StatsUsed;
- if( nSearch > pTab->nRowLogEst
- && (pTab->tabFlags & TF_HasStat1)!=0
- ){
+ if( nSearch > pTab->nRowLogEst ){
testcase( pItem->fg.jointype & JT_LEFT );
pLoop->wsFlags |= WHERE_BLOOMFILTER;
pLoop->wsFlags &= ~WHERE_IDX_ONLY;
- WHERETRACE(0xffff, (
+ WHERETRACE(0xffffffff, (
"-> use Bloom-filter on loop %c because there are ~%.1e "
"lookups into %s which has only ~%.1e rows\n",
pLoop->cId, (double)sqlite3LogEstToInt(nSearch), pTab->zName,
@@ -161099,13 +162581,13 @@ static SQLITE_NOINLINE void whereCheckIfBloomFilterIsUseful(
/*
** This is an sqlite3ParserAddCleanup() callback that is invoked to
-** free the Parse->pIdxExpr list when the Parse object is destroyed.
+** free the Parse->pIdxEpr list when the Parse object is destroyed.
*/
static void whereIndexedExprCleanup(sqlite3 *db, void *pObject){
Parse *pParse = (Parse*)pObject;
- while( pParse->pIdxExpr!=0 ){
- IndexedExpr *p = pParse->pIdxExpr;
- pParse->pIdxExpr = p->pIENext;
+ while( pParse->pIdxEpr!=0 ){
+ IndexedExpr *p = pParse->pIdxEpr;
+ pParse->pIdxEpr = p->pIENext;
sqlite3ExprDelete(db, p->pExpr);
sqlite3DbFreeNN(db, p);
}
@@ -161117,13 +162599,13 @@ static void whereIndexedExprCleanup(sqlite3 *db, void *pObject){
** number for the index and iDataCur is the cursor number for the corresponding
** table.
**
-** This routine adds IndexedExpr entries to the Parse->pIdxExpr field for
+** This routine adds IndexedExpr entries to the Parse->pIdxEpr field for
** each of the expressions in the index so that the expression code generator
** will know to replace occurrences of the indexed expression with
** references to the corresponding column of the index.
*/
static SQLITE_NOINLINE void whereAddIndexedExpr(
- Parse *pParse, /* Add IndexedExpr entries to pParse->pIdxExpr */
+ Parse *pParse, /* Add IndexedExpr entries to pParse->pIdxEpr */
Index *pIdx, /* The index-on-expression that contains the expressions */
int iIdxCur, /* Cursor number for pIdx */
SrcItem *pTabItem /* The FROM clause entry for the table */
@@ -161152,16 +162634,25 @@ static SQLITE_NOINLINE void whereAddIndexedExpr(
if( sqlite3ExprIsConstant(pExpr) ) continue;
p = sqlite3DbMallocRaw(pParse->db, sizeof(IndexedExpr));
if( p==0 ) break;
- p->pIENext = pParse->pIdxExpr;
+ p->pIENext = pParse->pIdxEpr;
+#ifdef WHERETRACE_ENABLED
+ if( sqlite3WhereTrace & 0x200 ){
+ sqlite3DebugPrintf("New pParse->pIdxEpr term {%d,%d}\n", iIdxCur, i);
+ if( sqlite3WhereTrace & 0x5000 ) sqlite3ShowExpr(pExpr);
+ }
+#endif
p->pExpr = sqlite3ExprDup(pParse->db, pExpr, 0);
p->iDataCur = pTabItem->iCursor;
p->iIdxCur = iIdxCur;
p->iIdxCol = i;
p->bMaybeNullRow = bMaybeNullRow;
+ if( sqlite3IndexAffinityStr(pParse->db, pIdx) ){
+ p->aff = pIdx->zColAff[i];
+ }
#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
p->zIdxName = pIdx->zName;
#endif
- pParse->pIdxExpr = p;
+ pParse->pIdxEpr = p;
if( p->pIENext==0 ){
sqlite3ParserAddCleanup(pParse, whereIndexedExprCleanup, pParse);
}
@@ -161453,13 +162944,13 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
/* Construct the WhereLoop objects */
#if defined(WHERETRACE_ENABLED)
- if( sqlite3WhereTrace & 0xffff ){
+ if( sqlite3WhereTrace & 0xffffffff ){
sqlite3DebugPrintf("*** Optimizer Start *** (wctrlFlags: 0x%x",wctrlFlags);
if( wctrlFlags & WHERE_USE_LIMIT ){
sqlite3DebugPrintf(", limit: %d", iAuxArg);
}
sqlite3DebugPrintf(")\n");
- if( sqlite3WhereTrace & 0x100 ){
+ if( sqlite3WhereTrace & 0x8000 ){
Select sSelect;
memset(&sSelect, 0, sizeof(sSelect));
sSelect.selFlags = SF_WhereBegin;
@@ -161469,10 +162960,10 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
sSelect.pEList = pResultSet;
sqlite3TreeViewSelect(0, &sSelect, 0);
}
- }
- if( sqlite3WhereTrace & 0x100 ){ /* Display all terms of the WHERE clause */
- sqlite3DebugPrintf("---- WHERE clause at start of analysis:\n");
- sqlite3WhereClausePrint(sWLB.pWC);
+ if( sqlite3WhereTrace & 0x4000 ){ /* Display all WHERE clause terms */
+ sqlite3DebugPrintf("---- WHERE clause at start of analysis:\n");
+ sqlite3WhereClausePrint(sWLB.pWC);
+ }
}
#endif
@@ -161488,7 +162979,7 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
** loops will be built using the revised truthProb values. */
if( sWLB.bldFlags2 & SQLITE_BLDF2_2NDPASS ){
WHERETRACE_ALL_LOOPS(pWInfo, sWLB.pWC);
- WHERETRACE(0xffff,
+ WHERETRACE(0xffffffff,
("**** Redo all loop computations due to"
" TERM_HIGHTRUTH changes ****\n"));
while( pWInfo->pLoops ){
@@ -161574,11 +163065,11 @@ SQLITE_PRIVATE WhereInfo *sqlite3WhereBegin(
}
#if defined(WHERETRACE_ENABLED)
- if( sqlite3WhereTrace & 0x100 ){ /* Display all terms of the WHERE clause */
+ if( sqlite3WhereTrace & 0x4000 ){ /* Display all terms of the WHERE clause */
sqlite3DebugPrintf("---- WHERE clause at end of analysis:\n");
sqlite3WhereClausePrint(sWLB.pWC);
}
- WHERETRACE(0xffff,("*** Optimizer Finished ***\n"));
+ WHERETRACE(0xffffffff,("*** Optimizer Finished ***\n"));
#endif
pWInfo->pParse->nQueryLoop += pWInfo->nRowOut;
@@ -162112,9 +163603,16 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
last = pWInfo->iEndWhere;
}
if( pIdx->bHasExpr ){
- IndexedExpr *p = pParse->pIdxExpr;
+ IndexedExpr *p = pParse->pIdxEpr;
while( p ){
if( p->iIdxCur==pLevel->iIdxCur ){
+#ifdef WHERETRACE_ENABLED
+ if( sqlite3WhereTrace & 0x200 ){
+ sqlite3DebugPrintf("Disable pParse->pIdxEpr term {%d,%d}\n",
+ p->iIdxCur, p->iIdxCol);
+ if( sqlite3WhereTrace & 0x5000 ) sqlite3ShowExpr(p->pExpr);
+ }
+#endif
p->iDataCur = -1;
p->iIdxCur = -1;
}
@@ -163283,7 +164781,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){
pSub = sqlite3SelectNew(
pParse, pSublist, pSrc, pWhere, pGroupBy, pHaving, pSort, 0, 0
);
- SELECTTRACE(1,pParse,pSub,
+ TREETRACE(0x40,pParse,pSub,
("New window-function subquery in FROM clause of (%u/%p)\n",
p->selId, p));
p->pSrc = sqlite3SrcListAppend(pParse, 0, 0, 0);
@@ -163293,6 +164791,7 @@ SQLITE_PRIVATE int sqlite3WindowRewrite(Parse *pParse, Select *p){
if( p->pSrc ){
Table *pTab2;
p->pSrc->a[0].pSelect = pSub;
+ p->pSrc->a[0].fg.isCorrelated = 1;
sqlite3SrcListAssignCursors(pParse, p->pSrc);
pSub->selFlags |= SF_Expanded|SF_OrderByReqd;
pTab2 = sqlite3ResultSetOfSelect(pParse, pSub, SQLITE_AFF_NONE);
@@ -165158,8 +166657,7 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep(
VdbeCoverageNeverNullIf(v, op==OP_Ge); /* NeverNull because bound <expr> */
VdbeCoverageNeverNullIf(v, op==OP_Le); /* values previously checked */
windowAggFinal(&s, 0);
- sqlite3VdbeAddOp2(v, OP_Rewind, s.current.csr, 1);
- VdbeCoverageNeverTaken(v);
+ sqlite3VdbeAddOp1(v, OP_Rewind, s.current.csr);
windowReturnOneRow(&s);
sqlite3VdbeAddOp1(v, OP_ResetSorter, s.current.csr);
sqlite3VdbeAddOp2(v, OP_Goto, 0, lblWhereEnd);
@@ -165171,13 +166669,10 @@ SQLITE_PRIVATE void sqlite3WindowCodeStep(
}
if( pMWin->eStart!=TK_UNBOUNDED ){
- sqlite3VdbeAddOp2(v, OP_Rewind, s.start.csr, 1);
- VdbeCoverageNeverTaken(v);
+ sqlite3VdbeAddOp1(v, OP_Rewind, s.start.csr);
}
- sqlite3VdbeAddOp2(v, OP_Rewind, s.current.csr, 1);
- VdbeCoverageNeverTaken(v);
- sqlite3VdbeAddOp2(v, OP_Rewind, s.end.csr, 1);
- VdbeCoverageNeverTaken(v);
+ sqlite3VdbeAddOp1(v, OP_Rewind, s.current.csr);
+ sqlite3VdbeAddOp1(v, OP_Rewind, s.end.csr);
if( regPeer && pOrderBy ){
sqlite3VdbeAddOp3(v, OP_Copy, regNewPeer, regPeer, pOrderBy->nExpr-1);
sqlite3VdbeAddOp3(v, OP_Copy, regPeer, s.start.reg, pOrderBy->nExpr-1);
@@ -169875,6 +171370,11 @@ static YYACTIONTYPE yy_reduce(
sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322);
pRHS = sqlite3PExpr(pParse, TK_UPLUS, pRHS, 0);
yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_EQ, yymsp[-4].minor.yy528, pRHS);
+ }else if( yymsp[-1].minor.yy322->nExpr==1 && pRHS->op==TK_SELECT ){
+ yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0);
+ sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy528, pRHS->x.pSelect);
+ pRHS->x.pSelect = 0;
+ sqlite3ExprListDelete(pParse->db, yymsp[-1].minor.yy322);
}else{
yymsp[-4].minor.yy528 = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy528, 0);
if( yymsp[-4].minor.yy528==0 ){
@@ -174077,6 +175577,7 @@ SQLITE_PRIVATE const char *sqlite3ErrName(int rc){
case SQLITE_NOTICE_RECOVER_WAL: zName = "SQLITE_NOTICE_RECOVER_WAL";break;
case SQLITE_NOTICE_RECOVER_ROLLBACK:
zName = "SQLITE_NOTICE_RECOVER_ROLLBACK"; break;
+ case SQLITE_NOTICE_RBU: zName = "SQLITE_NOTICE_RBU"; break;
case SQLITE_WARNING: zName = "SQLITE_WARNING"; break;
case SQLITE_WARNING_AUTOINDEX: zName = "SQLITE_WARNING_AUTOINDEX"; break;
case SQLITE_DONE: zName = "SQLITE_DONE"; break;
@@ -174306,7 +175807,9 @@ SQLITE_API int sqlite3_busy_timeout(sqlite3 *db, int ms){
*/
SQLITE_API void sqlite3_interrupt(sqlite3 *db){
#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) && (db==0 || db->eOpenState!=SQLITE_STATE_ZOMBIE) ){
+ if( !sqlite3SafetyCheckOk(db)
+ && (db==0 || db->eOpenState!=SQLITE_STATE_ZOMBIE)
+ ){
(void)SQLITE_MISUSE_BKPT;
return;
}
@@ -174314,6 +175817,21 @@ SQLITE_API void sqlite3_interrupt(sqlite3 *db){
AtomicStore(&db->u1.isInterrupted, 1);
}
+/*
+** Return true or false depending on whether or not an interrupt is
+** pending on connection db.
+*/
+SQLITE_API int sqlite3_is_interrupted(sqlite3 *db){
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db)
+ && (db==0 || db->eOpenState!=SQLITE_STATE_ZOMBIE)
+ ){
+ (void)SQLITE_MISUSE_BKPT;
+ return 0;
+ }
+#endif
+ return AtomicLoad(&db->u1.isInterrupted)!=0;
+}
/*
** This function is exactly the same as sqlite3_create_function(), except
@@ -174358,7 +175876,7 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
/* The SQLITE_INNOCUOUS flag is the same bit as SQLITE_FUNC_UNSAFE. But
** the meaning is inverted. So flip the bit. */
assert( SQLITE_FUNC_UNSAFE==SQLITE_INNOCUOUS );
- extraFlags ^= SQLITE_FUNC_UNSAFE;
+ extraFlags ^= SQLITE_FUNC_UNSAFE; /* tag-20230109-1 */
#ifndef SQLITE_OMIT_UTF16
@@ -174376,11 +175894,11 @@ SQLITE_PRIVATE int sqlite3CreateFunc(
case SQLITE_ANY: {
int rc;
rc = sqlite3CreateFunc(db, zFunctionName, nArg,
- (SQLITE_UTF8|extraFlags)^SQLITE_FUNC_UNSAFE,
+ (SQLITE_UTF8|extraFlags)^SQLITE_FUNC_UNSAFE, /* tag-20230109-1 */
pUserData, xSFunc, xStep, xFinal, xValue, xInverse, pDestructor);
if( rc==SQLITE_OK ){
rc = sqlite3CreateFunc(db, zFunctionName, nArg,
- (SQLITE_UTF16LE|extraFlags)^SQLITE_FUNC_UNSAFE,
+ (SQLITE_UTF16LE|extraFlags)^SQLITE_FUNC_UNSAFE, /* tag-20230109-1*/
pUserData, xSFunc, xStep, xFinal, xValue, xInverse, pDestructor);
}
if( rc!=SQLITE_OK ){
@@ -178962,6 +180480,8 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeIsalnum(int);
SQLITE_PRIVATE int sqlite3FtsUnicodeIsdiacritic(int);
#endif
+SQLITE_PRIVATE int sqlite3Fts3ExprIterate(Fts3Expr*, int (*x)(Fts3Expr*,int,void*), void*);
+
#endif /* !SQLITE_CORE || SQLITE_ENABLE_FTS3 */
#endif /* _FTSINT_H */
@@ -183965,9 +185485,8 @@ static void fts3EvalNextRow(
Fts3Expr *pExpr, /* Expr. to advance to next matching row */
int *pRc /* IN/OUT: Error code */
){
- if( *pRc==SQLITE_OK ){
+ if( *pRc==SQLITE_OK && pExpr->bEof==0 ){
int bDescDoclist = pCsr->bDesc; /* Used by DOCID_CMP() macro */
- assert( pExpr->bEof==0 );
pExpr->bStart = 1;
switch( pExpr->eType ){
@@ -184444,6 +185963,22 @@ static void fts3EvalUpdateCounts(Fts3Expr *pExpr, int nCol){
}
/*
+** This is an sqlite3Fts3ExprIterate() callback. If the Fts3Expr.aMI[] array
+** has not yet been allocated, allocate and zero it. Otherwise, just zero
+** it.
+*/
+static int fts3AllocateMSI(Fts3Expr *pExpr, int iPhrase, void *pCtx){
+ Fts3Table *pTab = (Fts3Table*)pCtx;
+ UNUSED_PARAMETER(iPhrase);
+ if( pExpr->aMI==0 ){
+ pExpr->aMI = (u32 *)sqlite3_malloc64(pTab->nColumn * 3 * sizeof(u32));
+ if( pExpr->aMI==0 ) return SQLITE_NOMEM;
+ }
+ memset(pExpr->aMI, 0, pTab->nColumn * 3 * sizeof(u32));
+ return SQLITE_OK;
+}
+
+/*
** Expression pExpr must be of type FTSQUERY_PHRASE.
**
** If it is not already allocated and populated, this function allocates and
@@ -184464,7 +185999,6 @@ static int fts3EvalGatherStats(
if( pExpr->aMI==0 ){
Fts3Table *pTab = (Fts3Table *)pCsr->base.pVtab;
Fts3Expr *pRoot; /* Root of NEAR expression */
- Fts3Expr *p; /* Iterator used for several purposes */
sqlite3_int64 iPrevId = pCsr->iPrevId;
sqlite3_int64 iDocid;
@@ -184472,7 +186006,9 @@ static int fts3EvalGatherStats(
/* Find the root of the NEAR expression */
pRoot = pExpr;
- while( pRoot->pParent && pRoot->pParent->eType==FTSQUERY_NEAR ){
+ while( pRoot->pParent
+ && (pRoot->pParent->eType==FTSQUERY_NEAR || pRoot->bDeferred)
+ ){
pRoot = pRoot->pParent;
}
iDocid = pRoot->iDocid;
@@ -184480,14 +186016,8 @@ static int fts3EvalGatherStats(
assert( pRoot->bStart );
/* Allocate space for the aMSI[] array of each FTSQUERY_PHRASE node */
- for(p=pRoot; p; p=p->pLeft){
- Fts3Expr *pE = (p->eType==FTSQUERY_PHRASE?p:p->pRight);
- assert( pE->aMI==0 );
- pE->aMI = (u32 *)sqlite3_malloc64(pTab->nColumn * 3 * sizeof(u32));
- if( !pE->aMI ) return SQLITE_NOMEM;
- memset(pE->aMI, 0, pTab->nColumn * 3 * sizeof(u32));
- }
-
+ rc = sqlite3Fts3ExprIterate(pRoot, fts3AllocateMSI, (void*)pTab);
+ if( rc!=SQLITE_OK ) return rc;
fts3EvalRestart(pCsr, pRoot, &rc);
while( pCsr->isEof==0 && rc==SQLITE_OK ){
@@ -184643,6 +186173,7 @@ SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist(
u8 bTreeEof = 0;
Fts3Expr *p; /* Used to iterate from pExpr to root */
Fts3Expr *pNear; /* Most senior NEAR ancestor (or pExpr) */
+ Fts3Expr *pRun; /* Closest non-deferred ancestor of pNear */
int bMatch;
/* Check if this phrase descends from an OR expression node. If not,
@@ -184657,25 +186188,30 @@ SQLITE_PRIVATE int sqlite3Fts3EvalPhrasePoslist(
if( p->bEof ) bTreeEof = 1;
}
if( bOr==0 ) return SQLITE_OK;
+ pRun = pNear;
+ while( pRun->bDeferred ){
+ assert( pRun->pParent );
+ pRun = pRun->pParent;
+ }
/* This is the descendent of an OR node. In this case we cannot use
** an incremental phrase. Load the entire doclist for the phrase
** into memory in this case. */
if( pPhrase->bIncr ){
- int bEofSave = pNear->bEof;
- fts3EvalRestart(pCsr, pNear, &rc);
- while( rc==SQLITE_OK && !pNear->bEof ){
- fts3EvalNextRow(pCsr, pNear, &rc);
- if( bEofSave==0 && pNear->iDocid==iDocid ) break;
+ int bEofSave = pRun->bEof;
+ fts3EvalRestart(pCsr, pRun, &rc);
+ while( rc==SQLITE_OK && !pRun->bEof ){
+ fts3EvalNextRow(pCsr, pRun, &rc);
+ if( bEofSave==0 && pRun->iDocid==iDocid ) break;
}
assert( rc!=SQLITE_OK || pPhrase->bIncr==0 );
- if( rc==SQLITE_OK && pNear->bEof!=bEofSave ){
+ if( rc==SQLITE_OK && pRun->bEof!=bEofSave ){
rc = FTS_CORRUPT_VTAB;
}
}
if( bTreeEof ){
- while( rc==SQLITE_OK && !pNear->bEof ){
- fts3EvalNextRow(pCsr, pNear, &rc);
+ while( rc==SQLITE_OK && !pRun->bEof ){
+ fts3EvalNextRow(pCsr, pRun, &rc);
}
}
if( rc!=SQLITE_OK ) return rc;
@@ -191591,16 +193127,18 @@ static int fts3MsrBufferData(
char *pList,
i64 nList
){
- if( nList>pMsr->nBuffer ){
+ if( (nList+FTS3_NODE_PADDING)>pMsr->nBuffer ){
char *pNew;
- pMsr->nBuffer = nList*2;
- pNew = (char *)sqlite3_realloc64(pMsr->aBuffer, pMsr->nBuffer);
+ int nNew = nList*2 + FTS3_NODE_PADDING;
+ pNew = (char *)sqlite3_realloc64(pMsr->aBuffer, nNew);
if( !pNew ) return SQLITE_NOMEM;
pMsr->aBuffer = pNew;
+ pMsr->nBuffer = nNew;
}
assert( nList>0 );
memcpy(pMsr->aBuffer, pList, nList);
+ memset(&pMsr->aBuffer[nList], 0, FTS3_NODE_PADDING);
return SQLITE_OK;
}
@@ -194782,7 +196320,7 @@ typedef sqlite3_int64 i64;
/*
-** Used as an fts3ExprIterate() context when loading phrase doclists to
+** Used as an sqlite3Fts3ExprIterate() context when loading phrase doclists to
** Fts3Expr.aDoclist[]/nDoclist.
*/
typedef struct LoadDoclistCtx LoadDoclistCtx;
@@ -194826,7 +196364,7 @@ struct SnippetFragment {
};
/*
-** This type is used as an fts3ExprIterate() context object while
+** This type is used as an sqlite3Fts3ExprIterate() context object while
** accumulating the data returned by the matchinfo() function.
*/
typedef struct MatchInfo MatchInfo;
@@ -194985,7 +196523,7 @@ static void fts3GetDeltaPosition(char **pp, i64 *piPos){
}
/*
-** Helper function for fts3ExprIterate() (see below).
+** Helper function for sqlite3Fts3ExprIterate() (see below).
*/
static int fts3ExprIterate2(
Fts3Expr *pExpr, /* Expression to iterate phrases of */
@@ -195019,7 +196557,7 @@ static int fts3ExprIterate2(
** Otherwise, SQLITE_OK is returned after a callback has been made for
** all eligible phrase nodes.
*/
-static int fts3ExprIterate(
+SQLITE_PRIVATE int sqlite3Fts3ExprIterate(
Fts3Expr *pExpr, /* Expression to iterate phrases of */
int (*x)(Fts3Expr*,int,void*), /* Callback function to invoke for phrases */
void *pCtx /* Second argument to pass to callback */
@@ -195028,10 +196566,9 @@ static int fts3ExprIterate(
return fts3ExprIterate2(pExpr, &iPhrase, x, pCtx);
}
-
/*
-** This is an fts3ExprIterate() callback used while loading the doclists
-** for each phrase into Fts3Expr.aDoclist[]/nDoclist. See also
+** This is an sqlite3Fts3ExprIterate() callback used while loading the
+** doclists for each phrase into Fts3Expr.aDoclist[]/nDoclist. See also
** fts3ExprLoadDoclists().
*/
static int fts3ExprLoadDoclistsCb(Fts3Expr *pExpr, int iPhrase, void *ctx){
@@ -195063,9 +196600,9 @@ static int fts3ExprLoadDoclists(
int *pnToken /* OUT: Number of tokens in query */
){
int rc; /* Return Code */
- LoadDoclistCtx sCtx = {0,0,0}; /* Context for fts3ExprIterate() */
+ LoadDoclistCtx sCtx = {0,0,0}; /* Context for sqlite3Fts3ExprIterate() */
sCtx.pCsr = pCsr;
- rc = fts3ExprIterate(pCsr->pExpr, fts3ExprLoadDoclistsCb, (void *)&sCtx);
+ rc = sqlite3Fts3ExprIterate(pCsr->pExpr,fts3ExprLoadDoclistsCb,(void*)&sCtx);
if( pnPhrase ) *pnPhrase = sCtx.nPhrase;
if( pnToken ) *pnToken = sCtx.nToken;
return rc;
@@ -195078,7 +196615,7 @@ static int fts3ExprPhraseCountCb(Fts3Expr *pExpr, int iPhrase, void *ctx){
}
static int fts3ExprPhraseCount(Fts3Expr *pExpr){
int nPhrase = 0;
- (void)fts3ExprIterate(pExpr, fts3ExprPhraseCountCb, (void *)&nPhrase);
+ (void)sqlite3Fts3ExprIterate(pExpr, fts3ExprPhraseCountCb, (void *)&nPhrase);
return nPhrase;
}
@@ -195206,8 +196743,9 @@ static void fts3SnippetDetails(
}
/*
-** This function is an fts3ExprIterate() callback used by fts3BestSnippet().
-** Each invocation populates an element of the SnippetIter.aPhrase[] array.
+** This function is an sqlite3Fts3ExprIterate() callback used by
+** fts3BestSnippet(). Each invocation populates an element of the
+** SnippetIter.aPhrase[] array.
*/
static int fts3SnippetFindPositions(Fts3Expr *pExpr, int iPhrase, void *ctx){
SnippetIter *p = (SnippetIter *)ctx;
@@ -195297,7 +196835,9 @@ static int fts3BestSnippet(
sIter.nSnippet = nSnippet;
sIter.nPhrase = nList;
sIter.iCurrent = -1;
- rc = fts3ExprIterate(pCsr->pExpr, fts3SnippetFindPositions, (void*)&sIter);
+ rc = sqlite3Fts3ExprIterate(
+ pCsr->pExpr, fts3SnippetFindPositions, (void*)&sIter
+ );
if( rc==SQLITE_OK ){
/* Set the *pmSeen output variable. */
@@ -195658,10 +197198,10 @@ static int fts3ExprLHitGather(
}
/*
-** fts3ExprIterate() callback used to collect the "global" matchinfo stats
-** for a single query.
+** sqlite3Fts3ExprIterate() callback used to collect the "global" matchinfo
+** stats for a single query.
**
-** fts3ExprIterate() callback to load the 'global' elements of a
+** sqlite3Fts3ExprIterate() callback to load the 'global' elements of a
** FTS3_MATCHINFO_HITS matchinfo array. The global stats are those elements
** of the matchinfo array that are constant for all rows returned by the
** current query.
@@ -195696,7 +197236,7 @@ static int fts3ExprGlobalHitsCb(
}
/*
-** fts3ExprIterate() callback used to collect the "local" part of the
+** sqlite3Fts3ExprIterate() callback used to collect the "local" part of the
** FTS3_MATCHINFO_HITS array. The local stats are those elements of the
** array that are different for each row returned by the query.
*/
@@ -195892,7 +197432,7 @@ static int fts3MatchinfoLcs(Fts3Cursor *pCsr, MatchInfo *pInfo){
**/
aIter = sqlite3Fts3MallocZero(sizeof(LcsIterator) * pCsr->nPhrase);
if( !aIter ) return SQLITE_NOMEM;
- (void)fts3ExprIterate(pCsr->pExpr, fts3MatchinfoLcsCb, (void*)aIter);
+ (void)sqlite3Fts3ExprIterate(pCsr->pExpr, fts3MatchinfoLcsCb, (void*)aIter);
for(i=0; i<pInfo->nPhrase; i++){
LcsIterator *pIter = &aIter[i];
@@ -196069,11 +197609,11 @@ static int fts3MatchinfoValues(
rc = fts3MatchinfoSelectDoctotal(pTab, &pSelect, &pInfo->nDoc,0,0);
if( rc!=SQLITE_OK ) break;
}
- rc = fts3ExprIterate(pExpr, fts3ExprGlobalHitsCb,(void*)pInfo);
+ rc = sqlite3Fts3ExprIterate(pExpr, fts3ExprGlobalHitsCb,(void*)pInfo);
sqlite3Fts3EvalTestDeferred(pCsr, &rc);
if( rc!=SQLITE_OK ) break;
}
- (void)fts3ExprIterate(pExpr, fts3ExprLocalHitsCb,(void*)pInfo);
+ (void)sqlite3Fts3ExprIterate(pExpr, fts3ExprLocalHitsCb,(void*)pInfo);
break;
}
}
@@ -196296,7 +197836,7 @@ struct TermOffsetCtx {
};
/*
-** This function is an fts3ExprIterate() callback used by sqlite3Fts3Offsets().
+** This function is an sqlite3Fts3ExprIterate() callback used by sqlite3Fts3Offsets().
*/
static int fts3ExprTermOffsetInit(Fts3Expr *pExpr, int iPhrase, void *ctx){
TermOffsetCtx *p = (TermOffsetCtx *)ctx;
@@ -196378,7 +197918,9 @@ SQLITE_PRIVATE void sqlite3Fts3Offsets(
*/
sCtx.iCol = iCol;
sCtx.iTerm = 0;
- rc = fts3ExprIterate(pCsr->pExpr, fts3ExprTermOffsetInit, (void*)&sCtx);
+ rc = sqlite3Fts3ExprIterate(
+ pCsr->pExpr, fts3ExprTermOffsetInit, (void*)&sCtx
+ );
if( rc!=SQLITE_OK ) goto offsets_out;
/* Retreive the text stored in column iCol. If an SQL NULL is stored
@@ -199754,6 +201296,13 @@ static int jsonEachBestIndex(
idxMask |= iMask;
}
}
+ if( pIdxInfo->nOrderBy>0
+ && pIdxInfo->aOrderBy[0].iColumn<0
+ && pIdxInfo->aOrderBy[0].desc==0
+ ){
+ pIdxInfo->orderByConsumed = 1;
+ }
+
if( (unusableMask & ~idxMask)!=0 ){
/* If there are any unusable constraints on JSON or ROOT, then reject
** this entire plan */
@@ -199949,10 +201498,10 @@ SQLITE_PRIVATE void sqlite3RegisterJsonFunctions(void){
#endif
WAGGREGATE(json_group_array, 1, 0, 0,
jsonArrayStep, jsonArrayFinal, jsonArrayValue, jsonGroupInverse,
- SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS),
+ SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC),
WAGGREGATE(json_group_object, 2, 0, 0,
jsonObjectStep, jsonObjectFinal, jsonObjectValue, jsonGroupInverse,
- SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS)
+ SQLITE_SUBTYPE|SQLITE_UTF8|SQLITE_DETERMINISTIC)
};
sqlite3InsertBuiltinFuncs(aJsonFunc, ArraySize(aJsonFunc));
#endif
@@ -200484,7 +202033,7 @@ static int readInt16(u8 *p){
return (p[0]<<8) + p[1];
}
static void readCoord(u8 *p, RtreeCoord *pCoord){
- assert( ((((char*)p) - (char*)0)&3)==0 ); /* p is always 4-byte aligned */
+ assert( (((sqlite3_uint64)p)&3)==0 ); /* p is always 4-byte aligned */
#if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
pCoord->u = _byteswap_ulong(*(u32*)p);
#elif SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000
@@ -200538,7 +202087,7 @@ static void writeInt16(u8 *p, int i){
}
static int writeCoord(u8 *p, RtreeCoord *pCoord){
u32 i;
- assert( ((((char*)p) - (char*)0)&3)==0 ); /* p is always 4-byte aligned */
+ assert( (((sqlite3_uint64)p)&3)==0 ); /* p is always 4-byte aligned */
assert( sizeof(RtreeCoord)==4 );
assert( sizeof(u32)==4 );
#if SQLITE_BYTEORDER==1234 && GCC_VERSION>=4003000
@@ -201266,7 +202815,7 @@ static void rtreeNonleafConstraint(
assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE
|| p->op==RTREE_GT || p->op==RTREE_EQ || p->op==RTREE_TRUE
|| p->op==RTREE_FALSE );
- assert( ((((char*)pCellData) - (char*)0)&3)==0 ); /* 4-byte aligned */
+ assert( (((sqlite3_uint64)pCellData)&3)==0 ); /* 4-byte aligned */
switch( p->op ){
case RTREE_TRUE: return; /* Always satisfied */
case RTREE_FALSE: break; /* Never satisfied */
@@ -201319,7 +202868,7 @@ static void rtreeLeafConstraint(
|| p->op==RTREE_GT || p->op==RTREE_EQ || p->op==RTREE_TRUE
|| p->op==RTREE_FALSE );
pCellData += 8 + p->iCoord*4;
- assert( ((((char*)pCellData) - (char*)0)&3)==0 ); /* 4-byte aligned */
+ assert( (((sqlite3_uint64)pCellData)&3)==0 ); /* 4-byte aligned */
RTREE_DECODE_COORD(eInt, pCellData, xN);
switch( p->op ){
case RTREE_TRUE: return; /* Always satisfied */
@@ -204691,7 +206240,7 @@ static GeoPoly *geopolyFuncParam(
int nByte;
testcase( pCtx==0 );
if( sqlite3_value_type(pVal)==SQLITE_BLOB
- && (nByte = sqlite3_value_bytes(pVal))>=(4+6*sizeof(GeoCoord))
+ && (nByte = sqlite3_value_bytes(pVal))>=(int)(4+6*sizeof(GeoCoord))
){
const unsigned char *a = sqlite3_value_blob(pVal);
int nVertex;
@@ -204749,6 +206298,7 @@ static void geopolyBlobFunc(
sqlite3_value **argv
){
GeoPoly *p = geopolyFuncParam(context, argv[0], 0);
+ (void)argc;
if( p ){
sqlite3_result_blob(context, p->hdr,
4+8*p->nVertex, SQLITE_TRANSIENT);
@@ -204768,6 +206318,7 @@ static void geopolyJsonFunc(
sqlite3_value **argv
){
GeoPoly *p = geopolyFuncParam(context, argv[0], 0);
+ (void)argc;
if( p ){
sqlite3 *db = sqlite3_context_db_handle(context);
sqlite3_str *x = sqlite3_str_new(db);
@@ -204849,6 +206400,7 @@ static void geopolyXformFunc(
double F = sqlite3_value_double(argv[6]);
GeoCoord x1, y1, x0, y0;
int ii;
+ (void)argc;
if( p ){
for(ii=0; ii<p->nVertex; ii++){
x0 = GeoX(p,ii);
@@ -204899,6 +206451,7 @@ static void geopolyAreaFunc(
sqlite3_value **argv
){
GeoPoly *p = geopolyFuncParam(context, argv[0], 0);
+ (void)argc;
if( p ){
sqlite3_result_double(context, geopolyArea(p));
sqlite3_free(p);
@@ -204924,6 +206477,7 @@ static void geopolyCcwFunc(
sqlite3_value **argv
){
GeoPoly *p = geopolyFuncParam(context, argv[0], 0);
+ (void)argc;
if( p ){
if( geopolyArea(p)<0.0 ){
int ii, jj;
@@ -204978,6 +206532,7 @@ static void geopolyRegularFunc(
int n = sqlite3_value_int(argv[3]);
int i;
GeoPoly *p;
+ (void)argc;
if( n<3 || r<=0.0 ) return;
if( n>1000 ) n = 1000;
@@ -205087,6 +206642,7 @@ static void geopolyBBoxFunc(
sqlite3_value **argv
){
GeoPoly *p = geopolyBBox(context, argv[0], 0, 0);
+ (void)argc;
if( p ){
sqlite3_result_blob(context, p->hdr,
4+8*p->nVertex, SQLITE_TRANSIENT);
@@ -205114,6 +206670,7 @@ static void geopolyBBoxStep(
){
RtreeCoord a[4];
int rc = SQLITE_OK;
+ (void)argc;
(void)geopolyBBox(context, argv[0], a, &rc);
if( rc==SQLITE_OK ){
GeoBBox *pBBox;
@@ -205202,6 +206759,8 @@ static void geopolyContainsPointFunc(
int v = 0;
int cnt = 0;
int ii;
+ (void)argc;
+
if( p1==0 ) return;
for(ii=0; ii<p1->nVertex-1; ii++){
v = pointBeneathLine(x0,y0,GeoX(p1,ii), GeoY(p1,ii),
@@ -205241,6 +206800,7 @@ static void geopolyWithinFunc(
){
GeoPoly *p1 = geopolyFuncParam(context, argv[0], 0);
GeoPoly *p2 = geopolyFuncParam(context, argv[1], 0);
+ (void)argc;
if( p1 && p2 ){
int x = geopolyOverlap(p1, p2);
if( x<0 ){
@@ -205571,6 +207131,7 @@ static void geopolyOverlapFunc(
){
GeoPoly *p1 = geopolyFuncParam(context, argv[0], 0);
GeoPoly *p2 = geopolyFuncParam(context, argv[1], 0);
+ (void)argc;
if( p1 && p2 ){
int x = geopolyOverlap(p1, p2);
if( x<0 ){
@@ -205591,8 +207152,12 @@ static void geopolyDebugFunc(
int argc,
sqlite3_value **argv
){
+ (void)context;
+ (void)argc;
#ifdef GEOPOLY_ENABLE_DEBUG
geo_debug = sqlite3_value_int(argv[0]);
+#else
+ (void)argv;
#endif
}
@@ -205620,6 +207185,7 @@ static int geopolyInit(
sqlite3_str *pSql;
char *zSql;
int ii;
+ (void)pAux;
sqlite3_vtab_config(db, SQLITE_VTAB_CONSTRAINT_SUPPORT, 1);
@@ -205736,6 +207302,7 @@ static int geopolyFilter(
RtreeNode *pRoot = 0;
int rc = SQLITE_OK;
int iCell = 0;
+ (void)idxStr;
rtreeReference(pRtree);
@@ -205862,6 +207429,7 @@ static int geopolyBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
int iRowidTerm = -1;
int iFuncTerm = -1;
int idxNum = 0;
+ (void)tab;
for(ii=0; ii<pIdxInfo->nConstraint; ii++){
struct sqlite3_index_constraint *p = &pIdxInfo->aConstraint[ii];
@@ -206108,6 +207676,8 @@ static int geopolyFindFunction(
void (**pxFunc)(sqlite3_context*,int,sqlite3_value**),
void **ppArg
){
+ (void)pVtab;
+ (void)nArg;
if( sqlite3_stricmp(zName, "geopoly_overlap")==0 ){
*pxFunc = geopolyOverlapFunc;
*ppArg = 0;
@@ -206177,7 +207747,7 @@ static int sqlite3_geopoly_init(sqlite3 *db){
} aAgg[] = {
{ geopolyBBoxStep, geopolyBBoxFinal, "geopoly_group_bbox" },
};
- int i;
+ unsigned int i;
for(i=0; i<sizeof(aFunc)/sizeof(aFunc[0]) && rc==SQLITE_OK; i++){
int enc;
if( aFunc[i].bPure ){
@@ -207398,7 +208968,7 @@ SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule(
** The order of the columns in the data_% table does not matter.
**
** Instead of a regular table, the RBU database may also contain virtual
-** tables or view named using the data_<target> naming scheme.
+** tables or views named using the data_<target> naming scheme.
**
** Instead of the plain data_<target> naming scheme, RBU database tables
** may also be named data<integer>_<target>, where <integer> is any sequence
@@ -207411,7 +208981,7 @@ SQLITE_PRIVATE void sqlite3Fts3IcuTokenizerModule(
**
** If the target database table is a virtual table or a table that has no
** PRIMARY KEY declaration, the data_% table must also contain a column
-** named "rbu_rowid". This column is mapped to the tables implicit primary
+** named "rbu_rowid". This column is mapped to the table's implicit primary
** key column - "rowid". Virtual tables for which the "rowid" column does
** not function like a primary key value cannot be updated using RBU. For
** example, if the target db contains either of the following:
@@ -210877,11 +212447,11 @@ static void rbuSetupCheckpoint(sqlite3rbu *p, RbuState *pState){
** no-ops. These locks will not be released until the connection
** is closed.
**
- ** * Attempting to xSync() the database file causes an SQLITE_INTERNAL
+ ** * Attempting to xSync() the database file causes an SQLITE_NOTICE
** error.
**
** As a result, unless an error (i.e. OOM or SQLITE_BUSY) occurs, the
- ** checkpoint below fails with SQLITE_INTERNAL, and leaves the aFrame[]
+ ** checkpoint below fails with SQLITE_NOTICE, and leaves the aFrame[]
** array populated with a set of (frame -> page) mappings. Because the
** WRITER, CHECKPOINT and READ0 locks are still held, it is safe to copy
** data from the wal file into the database file according to the
@@ -210891,7 +212461,7 @@ static void rbuSetupCheckpoint(sqlite3rbu *p, RbuState *pState){
int rc2;
p->eStage = RBU_STAGE_CAPTURE;
rc2 = sqlite3_exec(p->dbMain, "PRAGMA main.wal_checkpoint=restart", 0, 0,0);
- if( rc2!=SQLITE_INTERNAL ) p->rc = rc2;
+ if( rc2!=SQLITE_NOTICE ) p->rc = rc2;
}
if( p->rc==SQLITE_OK && p->nFrame>0 ){
@@ -210937,7 +212507,7 @@ static int rbuCaptureWalRead(sqlite3rbu *pRbu, i64 iOff, int iAmt){
if( pRbu->mLock!=mReq ){
pRbu->rc = SQLITE_BUSY;
- return SQLITE_INTERNAL;
+ return SQLITE_NOTICE_RBU;
}
pRbu->pgsz = iAmt;
@@ -211676,7 +213246,8 @@ static void rbuSetupOal(sqlite3rbu *p, RbuState *pState){
static void rbuDeleteOalFile(sqlite3rbu *p){
char *zOal = rbuMPrintf(p, "%s-oal", p->zTarget);
if( zOal ){
- sqlite3_vfs *pVfs = sqlite3_vfs_find(0);
+ sqlite3_vfs *pVfs = 0;
+ sqlite3_file_control(p->dbMain, "main", SQLITE_FCNTL_VFS_POINTER, &pVfs);
assert( pVfs && p->rc==SQLITE_OK && p->zErrmsg==0 );
pVfs->xDelete(pVfs, zOal, 0);
sqlite3_free(zOal);
@@ -212324,7 +213895,7 @@ SQLITE_API void sqlite3rbu_rename_handler(
** database file are recorded. xShmLock() calls to unlock the same
** locks are no-ops (so that once obtained, these locks are never
** relinquished). Finally, calls to xSync() on the target database
-** file fail with SQLITE_INTERNAL errors.
+** file fail with SQLITE_NOTICE errors.
*/
static void rbuUnlockShm(rbu_file *p){
@@ -212433,9 +214004,12 @@ static int rbuVfsClose(sqlite3_file *pFile){
sqlite3_free(p->zDel);
if( p->openFlags & SQLITE_OPEN_MAIN_DB ){
+ const sqlite3_io_methods *pMeth = p->pReal->pMethods;
rbuMainlistRemove(p);
rbuUnlockShm(p);
- p->pReal->pMethods->xShmUnmap(p->pReal, 0);
+ if( pMeth->iVersion>1 && pMeth->xShmUnmap ){
+ pMeth->xShmUnmap(p->pReal, 0);
+ }
}
else if( (p->openFlags & SQLITE_OPEN_DELETEONCLOSE) && p->pRbu ){
rbuUpdateTempSize(p, 0);
@@ -212603,7 +214177,7 @@ static int rbuVfsSync(sqlite3_file *pFile, int flags){
rbu_file *p = (rbu_file *)pFile;
if( p->pRbu && p->pRbu->eStage==RBU_STAGE_CAPTURE ){
if( p->openFlags & SQLITE_OPEN_MAIN_DB ){
- return SQLITE_INTERNAL;
+ return SQLITE_NOTICE_RBU;
}
return SQLITE_OK;
}
@@ -212894,6 +214468,25 @@ static int rbuVfsOpen(
rbuVfsShmUnmap, /* xShmUnmap */
0, 0 /* xFetch, xUnfetch */
};
+ static sqlite3_io_methods rbuvfs_io_methods1 = {
+ 1, /* iVersion */
+ rbuVfsClose, /* xClose */
+ rbuVfsRead, /* xRead */
+ rbuVfsWrite, /* xWrite */
+ rbuVfsTruncate, /* xTruncate */
+ rbuVfsSync, /* xSync */
+ rbuVfsFileSize, /* xFileSize */
+ rbuVfsLock, /* xLock */
+ rbuVfsUnlock, /* xUnlock */
+ rbuVfsCheckReservedLock, /* xCheckReservedLock */
+ rbuVfsFileControl, /* xFileControl */
+ rbuVfsSectorSize, /* xSectorSize */
+ rbuVfsDeviceCharacteristics, /* xDeviceCharacteristics */
+ 0, 0, 0, 0, 0, 0
+ };
+
+
+
rbu_vfs *pRbuVfs = (rbu_vfs*)pVfs;
sqlite3_vfs *pRealVfs = pRbuVfs->pRealVfs;
rbu_file *pFd = (rbu_file *)pFile;
@@ -212948,10 +214541,15 @@ static int rbuVfsOpen(
rc = pRealVfs->xOpen(pRealVfs, zOpen, pFd->pReal, oflags, pOutFlags);
}
if( pFd->pReal->pMethods ){
+ const sqlite3_io_methods *pMeth = pFd->pReal->pMethods;
/* The xOpen() operation has succeeded. Set the sqlite3_file.pMethods
** pointer and, if the file is a main database file, link it into the
** mutex protected linked list of all such files. */
- pFile->pMethods = &rbuvfs_io_methods;
+ if( pMeth->iVersion<2 || pMeth->xShmLock==0 ){
+ pFile->pMethods = &rbuvfs_io_methods1;
+ }else{
+ pFile->pMethods = &rbuvfs_io_methods;
+ }
if( flags & SQLITE_OPEN_MAIN_DB ){
rbuMainlistAdd(pFd);
}
@@ -213376,7 +214974,7 @@ struct StatTable {
*/
static int statConnect(
sqlite3 *db,
- void *pAux __maybe_unused,
+ void *pAux,
int argc, const char *const*argv,
sqlite3_vtab **ppVtab,
char **pzErr
@@ -213384,6 +214982,7 @@ static int statConnect(
StatTable *pTab = 0;
int rc = SQLITE_OK;
int iDb;
+ (void)pAux;
if( argc>=4 ){
Token nm;
@@ -213432,11 +215031,12 @@ static int statDisconnect(sqlite3_vtab *pVtab){
** 0x04 There is an aggregate=? term in the WHERE clause
** 0x08 Output should be ordered by name and path
*/
-static int statBestIndex(sqlite3_vtab *tab __maybe_unused, sqlite3_index_info *pIdxInfo){
+static int statBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
int i;
int iSchema = -1;
int iName = -1;
int iAgg = -1;
+ (void)tab;
/* Look for a valid schema=? constraint. If found, change the idxNum to
** 1 and request the value of that constraint be sent to xFilter. And
@@ -213952,8 +215552,8 @@ static int statEof(sqlite3_vtab_cursor *pCursor){
*/
static int statFilter(
sqlite3_vtab_cursor *pCursor,
- int idxNum, const char *idxStr __maybe_unused,
- int argc __maybe_unused, sqlite3_value **argv
+ int idxNum, const char *idxStr,
+ int argc, sqlite3_value **argv
){
StatCursor *pCsr = (StatCursor *)pCursor;
StatTable *pTab = (StatTable*)(pCursor->pVtab);
@@ -213962,6 +215562,8 @@ static int statFilter(
int iArg = 0; /* Count of argv[] parameters used so far */
int rc = SQLITE_OK; /* Result of this operation */
const char *zName = 0; /* Only provide analysis of this table */
+ (void)argc;
+ (void)idxStr;
statResetCsr(pCsr);
sqlite3_finalize(pCsr->pStmt);
@@ -214045,16 +215647,16 @@ static int statColumn(
}
break;
case 4: /* ncell */
- sqlite3_result_int(ctx, pCsr->nCell);
+ sqlite3_result_int64(ctx, pCsr->nCell);
break;
case 5: /* payload */
- sqlite3_result_int(ctx, pCsr->nPayload);
+ sqlite3_result_int64(ctx, pCsr->nPayload);
break;
case 6: /* unused */
- sqlite3_result_int(ctx, pCsr->nUnused);
+ sqlite3_result_int64(ctx, pCsr->nUnused);
break;
case 7: /* mx_payload */
- sqlite3_result_int(ctx, pCsr->nMxPayload);
+ sqlite3_result_int64(ctx, pCsr->nMxPayload);
break;
case 8: /* pgoffset */
if( !pCsr->isAgg ){
@@ -214062,7 +215664,7 @@ static int statColumn(
}
break;
case 9: /* pgsize */
- sqlite3_result_int(ctx, pCsr->szPage);
+ sqlite3_result_int64(ctx, pCsr->szPage);
break;
case 10: { /* schema */
sqlite3 *db = sqlite3_context_db_handle(ctx);
@@ -214196,6 +215798,10 @@ static int dbpageConnect(
){
DbpageTable *pTab = 0;
int rc = SQLITE_OK;
+ (void)pAux;
+ (void)argc;
+ (void)argv;
+ (void)pzErr;
sqlite3_vtab_config(db, SQLITE_VTAB_DIRECTONLY);
rc = sqlite3_declare_vtab(db,
@@ -214234,6 +215840,7 @@ static int dbpageDisconnect(sqlite3_vtab *pVtab){
static int dbpageBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
int i;
int iPlan = 0;
+ (void)tab;
/* If there is a schema= constraint, it must be honored. Report a
** ridiculously large estimated cost if the schema= constraint is
@@ -214349,6 +215956,8 @@ static int dbpageFilter(
sqlite3 *db = pTab->db;
Btree *pBt;
+ (void)idxStr;
+
/* Default setting is no rows of result */
pCsr->pgno = 1;
pCsr->mxPgno = 0;
@@ -214363,7 +215972,7 @@ static int dbpageFilter(
pCsr->iDb = 0;
}
pBt = db->aDb[pCsr->iDb].pBt;
- if( pBt==0 ) return SQLITE_OK;
+ if( NEVER(pBt==0) ) return SQLITE_OK;
pCsr->pPager = sqlite3BtreePager(pBt);
pCsr->szPage = sqlite3BtreeGetPageSize(pBt);
pCsr->mxPgno = sqlite3BtreeLastPage(pBt);
@@ -214444,6 +216053,7 @@ static int dbpageUpdate(
Pager *pPager;
int szPage;
+ (void)pRowid;
if( pTab->db->flags & SQLITE_Defensive ){
zErr = "read-only";
goto update_fail;
@@ -214453,18 +216063,20 @@ static int dbpageUpdate(
goto update_fail;
}
pgno = sqlite3_value_int(argv[0]);
- if( (Pgno)sqlite3_value_int(argv[1])!=pgno ){
+ if( sqlite3_value_type(argv[0])==SQLITE_NULL
+ || (Pgno)sqlite3_value_int(argv[1])!=pgno
+ ){
zErr = "cannot insert";
goto update_fail;
}
zSchema = (const char*)sqlite3_value_text(argv[4]);
- iDb = zSchema ? sqlite3FindDbName(pTab->db, zSchema) : -1;
- if( iDb<0 ){
+ iDb = ALWAYS(zSchema) ? sqlite3FindDbName(pTab->db, zSchema) : -1;
+ if( NEVER(iDb<0) ){
zErr = "no such schema";
goto update_fail;
}
pBt = pTab->db->aDb[iDb].pBt;
- if( pgno<1 || pBt==0 || pgno>sqlite3BtreeLastPage(pBt) ){
+ if( NEVER(pgno<1) || NEVER(pBt==0) || NEVER(pgno>sqlite3BtreeLastPage(pBt)) ){
zErr = "bad page number";
goto update_fail;
}
@@ -214503,12 +216115,11 @@ static int dbpageBegin(sqlite3_vtab *pVtab){
DbpageTable *pTab = (DbpageTable *)pVtab;
sqlite3 *db = pTab->db;
int i;
- int rc = SQLITE_OK;
- for(i=0; rc==SQLITE_OK && i<db->nDb; i++){
+ for(i=0; i<db->nDb; i++){
Btree *pBt = db->aDb[i].pBt;
- if( pBt ) rc = sqlite3BtreeBeginTrans(pBt, 1, 0);
+ if( pBt ) (void)sqlite3BtreeBeginTrans(pBt, 1, 0);
}
- return rc;
+ return SQLITE_OK;
}
@@ -216051,6 +217662,8 @@ static void xPreUpdate(
int nDb = sqlite3Strlen30(zDb);
assert( sqlite3_mutex_held(db->mutex) );
+ (void)iKey1;
+ (void)iKey2;
for(pSession=(sqlite3_session *)pCtx; pSession; pSession=pSession->pNext){
SessionTable *pTab;
@@ -216127,6 +217740,7 @@ static int sessionDiffCount(void *pCtx){
return p->nOldOff ? p->nOldOff : sqlite3_column_count(p->pStmt);
}
static int sessionDiffDepth(void *pCtx){
+ (void)pCtx;
return 0;
}
@@ -216200,7 +217814,6 @@ static char *sessionExprCompareOther(
}
static char *sessionSelectFindNew(
- int nCol,
const char *zDb1, /* Pick rows in this db only */
const char *zDb2, /* But not in this one */
const char *zTbl, /* Table name */
@@ -216224,7 +217837,7 @@ static int sessionDiffFindNew(
char *zExpr
){
int rc = SQLITE_OK;
- char *zStmt = sessionSelectFindNew(pTab->nCol, zDb1, zDb2, pTab->zName,zExpr);
+ char *zStmt = sessionSelectFindNew(zDb1, zDb2, pTab->zName,zExpr);
if( zStmt==0 ){
rc = SQLITE_NOMEM;
@@ -218741,7 +220354,6 @@ static int sessionBindRow(
** UPDATE, bind values from the old.* record.
*/
static int sessionSeekToRow(
- sqlite3 *db, /* Database handle */
sqlite3_changeset_iter *pIter, /* Changeset iterator */
u8 *abPK, /* Primary key flags array */
sqlite3_stmt *pSelect /* SELECT statement from sessionSelectRow() */
@@ -218871,7 +220483,7 @@ static int sessionConflictHandler(
/* Bind the new.* PRIMARY KEY values to the SELECT statement. */
if( pbReplace ){
- rc = sessionSeekToRow(p->db, pIter, p->abPK, p->pSelect);
+ rc = sessionSeekToRow(pIter, p->abPK, p->pSelect);
}else{
rc = SQLITE_OK;
}
@@ -219045,7 +220657,7 @@ static int sessionApplyOneOp(
/* Check if there is a conflicting row. For sqlite_stat1, this needs
** to be done using a SELECT, as there is no PRIMARY KEY in the
** database schema to throw an exception if a duplicate is inserted. */
- rc = sessionSeekToRow(p->db, pIter, p->abPK, p->pSelect);
+ rc = sessionSeekToRow(pIter, p->abPK, p->pSelect);
if( rc==SQLITE_ROW ){
rc = SQLITE_CONSTRAINT;
sqlite3_reset(p->pSelect);
@@ -225695,6 +227307,19 @@ static int sqlite3Fts5ExprNew(
}
/*
+** Assuming that buffer z is at least nByte bytes in size and contains a
+** valid utf-8 string, return the number of characters in the string.
+*/
+static int fts5ExprCountChar(const char *z, int nByte){
+ int nRet = 0;
+ int ii;
+ for(ii=0; ii<nByte; ii++){
+ if( (z[ii] & 0xC0)!=0x80 ) nRet++;
+ }
+ return nRet;
+}
+
+/*
** This function is only called when using the special 'trigram' tokenizer.
** Argument zText contains the text of a LIKE or GLOB pattern matched
** against column iCol. This function creates and compiles an FTS5 MATCH
@@ -225731,7 +227356,8 @@ static int sqlite3Fts5ExprPattern(
if( i==nText
|| zText[i]==aSpec[0] || zText[i]==aSpec[1] || zText[i]==aSpec[2]
){
- if( i-iFirst>=3 ){
+
+ if( fts5ExprCountChar(&zText[iFirst], i-iFirst)>=3 ){
int jj;
zExpr[iOut++] = '"';
for(jj=iFirst; jj<i; jj++){
@@ -229092,6 +230718,8 @@ static void sqlite3Fts5HashScanEntry(
# error "FTS5_MAX_PREFIX_INDEXES is too large"
#endif
+#define FTS5_MAX_LEVEL 64
+
/*
** Details:
**
@@ -233806,10 +235434,10 @@ static Fts5Structure *fts5IndexOptimizeStruct(
if( pNew ){
Fts5StructureLevel *pLvl;
nByte = nSeg * sizeof(Fts5StructureSegment);
- pNew->nLevel = pStruct->nLevel+1;
+ pNew->nLevel = MIN(pStruct->nLevel+1, FTS5_MAX_LEVEL);
pNew->nRef = 1;
pNew->nWriteCounter = pStruct->nWriteCounter;
- pLvl = &pNew->aLevel[pStruct->nLevel];
+ pLvl = &pNew->aLevel[pNew->nLevel-1];
pLvl->aSeg = (Fts5StructureSegment*)sqlite3Fts5MallocZero(&p->rc, nByte);
if( pLvl->aSeg ){
int iLvl, iSeg;
@@ -234111,7 +235739,7 @@ static void fts5MergePrefixLists(
/* Initialize a doclist-iterator for each input buffer. Arrange them in
** a linked-list starting at pHead in ascending order of rowid. Avoid
** linking any iterators already at EOF into the linked list at all. */
- assert( nBuf+1<=sizeof(aMerger)/sizeof(aMerger[0]) );
+ assert( nBuf+1<=(int)(sizeof(aMerger)/sizeof(aMerger[0])) );
memset(aMerger, 0, sizeof(PrefixMerger)*(nBuf+1));
pHead = &aMerger[nBuf];
fts5DoclistIterInit(p1, &pHead->iter);
@@ -238654,7 +240282,7 @@ static void fts5SourceIdFunc(
){
assert( nArg==0 );
UNUSED_PARAM2(nArg, apUnused);
- sqlite3_result_text(pCtx, "fts5: 2022-12-28 14:03:47 df5c253c0b3dd24916e4ec7cf77d3db5294cc9fd45ae7b9c5e82ad8197f38a24", -1, SQLITE_TRANSIENT);
+ sqlite3_result_text(pCtx, "fts5: 2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da", -1, SQLITE_TRANSIENT);
}
/*
@@ -238727,7 +240355,9 @@ static int fts5Init(sqlite3 *db){
}
if( rc==SQLITE_OK ){
rc = sqlite3_create_function(
- db, "fts5_source_id", 0, SQLITE_UTF8, p, fts5SourceIdFunc, 0, 0
+ db, "fts5_source_id", 0,
+ SQLITE_UTF8|SQLITE_DETERMINISTIC|SQLITE_INNOCUOUS,
+ p, fts5SourceIdFunc, 0, 0
);
}
}
@@ -243392,6 +245022,10 @@ static int stmtConnect(
#define STMT_COLUMN_MEM 10 /* SQLITE_STMTSTATUS_MEMUSED */
+ (void)pAux;
+ (void)argc;
+ (void)argv;
+ (void)pzErr;
rc = sqlite3_declare_vtab(db,
"CREATE TABLE x(sql,ncol,ro,busy,nscan,nsort,naidx,nstep,"
"reprep,run,mem)");
@@ -243511,6 +245145,10 @@ static int stmtFilter(
sqlite3_int64 iRowid = 1;
StmtRow **ppRow = 0;
+ (void)idxNum;
+ (void)idxStr;
+ (void)argc;
+ (void)argv;
stmtCsrReset(pCur);
ppRow = &pCur->pRow;
for(p=sqlite3_next_stmt(pCur->db, 0); p; p=sqlite3_next_stmt(pCur->db, p)){
@@ -243566,6 +245204,7 @@ static int stmtBestIndex(
sqlite3_vtab *tab,
sqlite3_index_info *pIdxInfo
){
+ (void)tab;
pIdxInfo->estimatedCost = (double)500;
pIdxInfo->estimatedRows = 500;
return SQLITE_OK;
diff --git a/database/sqlite/sqlite3.h b/database/sqlite/sqlite3.h
index 24b916750..7e43e1f1b 100644
--- a/database/sqlite/sqlite3.h
+++ b/database/sqlite/sqlite3.h
@@ -146,9 +146,9 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.40.1"
-#define SQLITE_VERSION_NUMBER 3040001
-#define SQLITE_SOURCE_ID "2022-12-28 14:03:47 df5c253c0b3dd24916e4ec7cf77d3db5294cc9fd45ae7b9c5e82ad8197f38a24"
+#define SQLITE_VERSION "3.41.2"
+#define SQLITE_VERSION_NUMBER 3041002
+#define SQLITE_SOURCE_ID "2023-03-22 11:56:21 0d1fc92f94cb6b76bffe3ec34d69cffde2924203304e8ffc4155597af0c191da"
/*
** CAPI3REF: Run-Time Library Version Numbers
@@ -563,6 +563,7 @@ SQLITE_API int sqlite3_exec(
#define SQLITE_CONSTRAINT_DATATYPE (SQLITE_CONSTRAINT |(12<<8))
#define SQLITE_NOTICE_RECOVER_WAL (SQLITE_NOTICE | (1<<8))
#define SQLITE_NOTICE_RECOVER_ROLLBACK (SQLITE_NOTICE | (2<<8))
+#define SQLITE_NOTICE_RBU (SQLITE_NOTICE | (3<<8))
#define SQLITE_WARNING_AUTOINDEX (SQLITE_WARNING | (1<<8))
#define SQLITE_AUTH_USER (SQLITE_AUTH | (1<<8))
#define SQLITE_OK_LOAD_PERMANENTLY (SQLITE_OK | (1<<8))
@@ -1175,7 +1176,6 @@ struct sqlite3_io_methods {
** in wal mode after the client has finished copying pages from the wal
** file to the database file, but before the *-shm file is updated to
** record the fact that the pages have been checkpointed.
-** </ul>
**
** <li>[[SQLITE_FCNTL_EXTERNAL_READER]]
** The EXPERIMENTAL [SQLITE_FCNTL_EXTERNAL_READER] opcode is used to detect
@@ -1188,16 +1188,16 @@ struct sqlite3_io_methods {
** the database is not a wal-mode db, or if there is no such connection in any
** other process. This opcode cannot be used to detect transactions opened
** by clients within the current process, only within other processes.
-** </ul>
**
** <li>[[SQLITE_FCNTL_CKSM_FILE]]
-** Used by the cksmvfs VFS module only.
+** The [SQLITE_FCNTL_CKSM_FILE] opcode is for use interally by the
+** [checksum VFS shim] only.
**
** <li>[[SQLITE_FCNTL_RESET_CACHE]]
** If there is currently no transaction open on the database, and the
-** database is not a temp db, then this file-control purges the contents
-** of the in-memory page cache. If there is an open transaction, or if
-** the db is a temp-db, it is a no-op, not an error.
+** database is not a temp db, then the [SQLITE_FCNTL_RESET_CACHE] file-control
+** purges the contents of the in-memory page cache. If there is an open
+** transaction, or if the db is a temp-db, this opcode is a no-op, not an error.
** </ul>
*/
#define SQLITE_FCNTL_LOCKSTATE 1
@@ -2184,7 +2184,7 @@ struct sqlite3_mem_methods {
** configuration for a database connection can only be changed when that
** connection is not currently using lookaside memory, or in other words
** when the "current value" returned by
-** [sqlite3_db_status](D,[SQLITE_CONFIG_LOOKASIDE],...) is zero.
+** [sqlite3_db_status](D,[SQLITE_DBSTATUS_LOOKASIDE_USED],...) is zero.
** Any attempt to change the lookaside memory configuration when lookaside
** memory is in use leaves the configuration unchanged and returns
** [SQLITE_BUSY].)^</dd>
@@ -2334,8 +2334,12 @@ struct sqlite3_mem_methods {
** <li> sqlite3_db_config(db, SQLITE_DBCONFIG_RESET_DATABASE, 0, 0);
** </ol>
** Because resetting a database is destructive and irreversible, the
-** process requires the use of this obscure API and multiple steps to help
-** ensure that it does not happen by accident.
+** process requires the use of this obscure API and multiple steps to
+** help ensure that it does not happen by accident. Because this
+** feature must be capable of resetting corrupt databases, and
+** shutting down virtual tables may require access to that corrupt
+** storage, the library must abandon any installed virtual tables
+** without calling their xDestroy() methods.
**
** [[SQLITE_DBCONFIG_DEFENSIVE]] <dt>SQLITE_DBCONFIG_DEFENSIVE</dt>
** <dd>The SQLITE_DBCONFIG_DEFENSIVE option activates or deactivates the
@@ -2674,8 +2678,12 @@ SQLITE_API sqlite3_int64 sqlite3_total_changes64(sqlite3*);
** ^A call to sqlite3_interrupt(D) that occurs when there are no running
** SQL statements is a no-op and has no effect on SQL statements
** that are started after the sqlite3_interrupt() call returns.
+**
+** ^The [sqlite3_is_interrupted(D)] interface can be used to determine whether
+** or not an interrupt is currently in effect for [database connection] D.
*/
SQLITE_API void sqlite3_interrupt(sqlite3*);
+SQLITE_API int sqlite3_is_interrupted(sqlite3*);
/*
** CAPI3REF: Determine If An SQL Statement Is Complete
@@ -3293,8 +3301,8 @@ SQLITE_API SQLITE_DEPRECATED void *sqlite3_profile(sqlite3*,
** <dd>^An SQLITE_TRACE_PROFILE callback provides approximately the same
** information as is provided by the [sqlite3_profile()] callback.
** ^The P argument is a pointer to the [prepared statement] and the
-** X argument points to a 64-bit integer which is the estimated of
-** the number of nanosecond that the prepared statement took to run.
+** X argument points to a 64-bit integer which is approximately
+** the number of nanoseconds that the prepared statement took to run.
** ^The SQLITE_TRACE_PROFILE callback is invoked when the statement finishes.
**
** [[SQLITE_TRACE_ROW]] <dt>SQLITE_TRACE_ROW</dt>
@@ -3357,7 +3365,7 @@ SQLITE_API int sqlite3_trace_v2(
**
** ^The sqlite3_progress_handler(D,N,X,P) interface causes the callback
** function X to be invoked periodically during long running calls to
-** [sqlite3_exec()], [sqlite3_step()] and [sqlite3_get_table()] for
+** [sqlite3_step()] and [sqlite3_prepare()] and similar for
** database connection D. An example use for this
** interface is to keep a GUI updated during a large query.
**
@@ -3382,6 +3390,13 @@ SQLITE_API int sqlite3_trace_v2(
** Note that [sqlite3_prepare_v2()] and [sqlite3_step()] both modify their
** database connections for the meaning of "modify" in this paragraph.
**
+** The progress handler callback would originally only be invoked from the
+** bytecode engine. It still might be invoked during [sqlite3_prepare()]
+** and similar because those routines might force a reparse of the schema
+** which involves running the bytecode engine. However, beginning with
+** SQLite version 3.41.0, the progress handler callback might also be
+** invoked directly from [sqlite3_prepare()] while analyzing and generating
+** code for complex queries.
*/
SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
@@ -3418,13 +3433,18 @@ SQLITE_API void sqlite3_progress_handler(sqlite3*, int, int(*)(void*), void*);
**
** <dl>
** ^(<dt>[SQLITE_OPEN_READONLY]</dt>
-** <dd>The database is opened in read-only mode. If the database does not
-** already exist, an error is returned.</dd>)^
+** <dd>The database is opened in read-only mode. If the database does
+** not already exist, an error is returned.</dd>)^
**
** ^(<dt>[SQLITE_OPEN_READWRITE]</dt>
-** <dd>The database is opened for reading and writing if possible, or reading
-** only if the file is write protected by the operating system. In either
-** case the database must already exist, otherwise an error is returned.</dd>)^
+** <dd>The database is opened for reading and writing if possible, or
+** reading only if the file is write protected by the operating
+** system. In either case the database must already exist, otherwise
+** an error is returned. For historical reasons, if opening in
+** read-write mode fails due to OS-level permissions, an attempt is
+** made to open it in read-only mode. [sqlite3_db_readonly()] can be
+** used to determine whether the database is actually
+** read-write.</dd>)^
**
** ^(<dt>[SQLITE_OPEN_READWRITE] | [SQLITE_OPEN_CREATE]</dt>
** <dd>The database is opened for reading and writing, and is created if
@@ -5405,10 +5425,21 @@ SQLITE_API int sqlite3_create_window_function(
** from top-level SQL, and cannot be used in VIEWs or TRIGGERs nor in
** schema structures such as [CHECK constraints], [DEFAULT clauses],
** [expression indexes], [partial indexes], or [generated columns].
-** The SQLITE_DIRECTONLY flags is a security feature which is recommended
-** for all [application-defined SQL functions], and especially for functions
-** that have side-effects or that could potentially leak sensitive
-** information.
+** <p>
+** The SQLITE_DIRECTONLY flag is recommended for any
+** [application-defined SQL function]
+** that has side-effects or that could potentially leak sensitive information.
+** This will prevent attacks in which an application is tricked
+** into using a database file that has had its schema surreptiously
+** modified to invoke the application-defined function in ways that are
+** harmful.
+** <p>
+** Some people say it is good practice to set SQLITE_DIRECTONLY on all
+** [application-defined SQL functions], regardless of whether or not they
+** are security sensitive, as doing so prevents those functions from being used
+** inside of the database schema, and thus ensures that the database
+** can be inspected and modified using generic tools (such as the [CLI])
+** that do not have access to the application-defined functions.
** </dd>
**
** [[SQLITE_INNOCUOUS]] <dt>SQLITE_INNOCUOUS</dt><dd>
@@ -5549,16 +5580,6 @@ SQLITE_API SQLITE_DEPRECATED int sqlite3_memory_alarm(void(*)(void*,sqlite3_int6
** then the conversion is performed. Otherwise no conversion occurs.
** The [SQLITE_INTEGER | datatype] after conversion is returned.)^
**
-** ^(The sqlite3_value_encoding(X) interface returns one of [SQLITE_UTF8],
-** [SQLITE_UTF16BE], or [SQLITE_UTF16LE] according to the current encoding
-** of the value X, assuming that X has type TEXT.)^ If sqlite3_value_type(X)
-** returns something other than SQLITE_TEXT, then the return value from
-** sqlite3_value_encoding(X) is meaningless. ^Calls to
-** sqlite3_value_text(X), sqlite3_value_text16(X), sqlite3_value_text16be(X),
-** sqlite3_value_text16le(X), sqlite3_value_bytes(X), or
-** sqlite3_value_bytes16(X) might change the encoding of the value X and
-** thus change the return from subsequent calls to sqlite3_value_encoding(X).
-**
** ^Within the [xUpdate] method of a [virtual table], the
** sqlite3_value_nochange(X) interface returns true if and only if
** the column corresponding to X is unchanged by the UPDATE operation
@@ -5623,6 +5644,27 @@ SQLITE_API int sqlite3_value_type(sqlite3_value*);
SQLITE_API int sqlite3_value_numeric_type(sqlite3_value*);
SQLITE_API int sqlite3_value_nochange(sqlite3_value*);
SQLITE_API int sqlite3_value_frombind(sqlite3_value*);
+
+/*
+** CAPI3REF: Report the internal text encoding state of an sqlite3_value object
+** METHOD: sqlite3_value
+**
+** ^(The sqlite3_value_encoding(X) interface returns one of [SQLITE_UTF8],
+** [SQLITE_UTF16BE], or [SQLITE_UTF16LE] according to the current text encoding
+** of the value X, assuming that X has type TEXT.)^ If sqlite3_value_type(X)
+** returns something other than SQLITE_TEXT, then the return value from
+** sqlite3_value_encoding(X) is meaningless. ^Calls to
+** [sqlite3_value_text(X)], [sqlite3_value_text16(X)], [sqlite3_value_text16be(X)],
+** [sqlite3_value_text16le(X)], [sqlite3_value_bytes(X)], or
+** [sqlite3_value_bytes16(X)] might change the encoding of the value X and
+** thus change the return from subsequent calls to sqlite3_value_encoding(X).
+**
+** This routine is intended for used by applications that test and validate
+** the SQLite implementation. This routine is inquiring about the opaque
+** internal state of an [sqlite3_value] object. Ordinary applications should
+** not need to know what the internal state of an sqlite3_value object is and
+** hence should not need to use this interface.
+*/
SQLITE_API int sqlite3_value_encoding(sqlite3_value*);
/*
@@ -7004,15 +7046,6 @@ SQLITE_API int sqlite3_cancel_auto_extension(void(*xEntryPoint)(void));
SQLITE_API void sqlite3_reset_auto_extension(void);
/*
-** The interface to the virtual-table mechanism is currently considered
-** to be experimental. The interface might change in incompatible ways.
-** If this is a problem for you, do not use the interface at this time.
-**
-** When the virtual-table mechanism stabilizes, we will declare the
-** interface fixed, support it indefinitely, and remove this comment.
-*/
-
-/*
** Structures used by the virtual table interface
*/
typedef struct sqlite3_vtab sqlite3_vtab;
@@ -7130,10 +7163,10 @@ struct sqlite3_module {
** when the omit flag is true there is no guarantee that the constraint will
** not be checked again using byte code.)^
**
-** ^The idxNum and idxPtr values are recorded and passed into the
+** ^The idxNum and idxStr values are recorded and passed into the
** [xFilter] method.
-** ^[sqlite3_free()] is used to free idxPtr if and only if
-** needToFreeIdxPtr is true.
+** ^[sqlite3_free()] is used to free idxStr if and only if
+** needToFreeIdxStr is true.
**
** ^The orderByConsumed means that output from [xFilter]/[xNext] will occur in
** the correct order to satisfy the ORDER BY clause so that no separate
@@ -7253,7 +7286,7 @@ struct sqlite3_index_info {
** the [sqlite3_vtab_collation()] interface. For most real-world virtual
** tables, the collating sequence of constraints does not matter (for example
** because the constraints are numeric) and so the sqlite3_vtab_collation()
-** interface is no commonly needed.
+** interface is not commonly needed.
*/
#define SQLITE_INDEX_CONSTRAINT_EQ 2
#define SQLITE_INDEX_CONSTRAINT_GT 4
@@ -7413,16 +7446,6 @@ SQLITE_API int sqlite3_declare_vtab(sqlite3*, const char *zSQL);
SQLITE_API int sqlite3_overload_function(sqlite3*, const char *zFuncName, int nArg);
/*
-** The interface to the virtual-table mechanism defined above (back up
-** to a comment remarkably similar to this one) is currently considered
-** to be experimental. The interface might change in incompatible ways.
-** If this is a problem for you, do not use the interface at this time.
-**
-** When the virtual-table mechanism stabilizes, we will declare the
-** interface fixed, support it indefinitely, and remove this comment.
-*/
-
-/*
** CAPI3REF: A Handle To An Open BLOB
** KEYWORDS: {BLOB handle} {BLOB handles}
**
@@ -9625,7 +9648,7 @@ SQLITE_API int sqlite3_vtab_nochange(sqlite3_context*);
** <li><p> Otherwise, "BINARY" is returned.
** </ol>
*/
-SQLITE_API SQLITE_EXPERIMENTAL const char *sqlite3_vtab_collation(sqlite3_index_info*,int);
+SQLITE_API const char *sqlite3_vtab_collation(sqlite3_index_info*,int);
/*
** CAPI3REF: Determine if a virtual table query is DISTINCT
@@ -9782,21 +9805,20 @@ SQLITE_API int sqlite3_vtab_in(sqlite3_index_info*, int iCons, int bHandle);
** is undefined and probably harmful.
**
** The X parameter in a call to sqlite3_vtab_in_first(X,P) or
-** sqlite3_vtab_in_next(X,P) must be one of the parameters to the
+** sqlite3_vtab_in_next(X,P) should be one of the parameters to the
** xFilter method which invokes these routines, and specifically
** a parameter that was previously selected for all-at-once IN constraint
** processing use the [sqlite3_vtab_in()] interface in the
** [xBestIndex|xBestIndex method]. ^(If the X parameter is not
** an xFilter argument that was selected for all-at-once IN constraint
-** processing, then these routines return [SQLITE_MISUSE])^ or perhaps
-** exhibit some other undefined or harmful behavior.
+** processing, then these routines return [SQLITE_ERROR].)^
**
** ^(Use these routines to access all values on the right-hand side
** of the IN constraint using code like the following:
**
** <blockquote><pre>
** &nbsp; for(rc=sqlite3_vtab_in_first(pList, &pVal);
-** &nbsp; rc==SQLITE_OK && pVal
+** &nbsp; rc==SQLITE_OK && pVal;
** &nbsp; rc=sqlite3_vtab_in_next(pList, &pVal)
** &nbsp; ){
** &nbsp; // do something with pVal
@@ -9894,6 +9916,10 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value **
** managed by the prepared statement S and will be automatically freed when
** S is finalized.
**
+** Not all values are available for all query elements. When a value is
+** not available, the output variable is set to -1 if the value is numeric,
+** or to NULL if it is a string (SQLITE_SCANSTAT_NAME).
+**
** <dl>
** [[SQLITE_SCANSTAT_NLOOP]] <dt>SQLITE_SCANSTAT_NLOOP</dt>
** <dd>^The [sqlite3_int64] variable pointed to by the V parameter will be
@@ -9921,12 +9947,24 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value **
** to a zero-terminated UTF-8 string containing the [EXPLAIN QUERY PLAN]
** description for the X-th loop.
**
-** [[SQLITE_SCANSTAT_SELECTID]] <dt>SQLITE_SCANSTAT_SELECT</dt>
+** [[SQLITE_SCANSTAT_SELECTID]] <dt>SQLITE_SCANSTAT_SELECTID</dt>
** <dd>^The "int" variable pointed to by the V parameter will be set to the
-** "select-id" for the X-th loop. The select-id identifies which query or
-** subquery the loop is part of. The main query has a select-id of zero.
-** The select-id is the same value as is output in the first column
-** of an [EXPLAIN QUERY PLAN] query.
+** id for the X-th query plan element. The id value is unique within the
+** statement. The select-id is the same value as is output in the first
+** column of an [EXPLAIN QUERY PLAN] query.
+**
+** [[SQLITE_SCANSTAT_PARENTID]] <dt>SQLITE_SCANSTAT_PARENTID</dt>
+** <dd>The "int" variable pointed to by the V parameter will be set to the
+** the id of the parent of the current query element, if applicable, or
+** to zero if the query element has no parent. This is the same value as
+** returned in the second column of an [EXPLAIN QUERY PLAN] query.
+**
+** [[SQLITE_SCANSTAT_NCYCLE]] <dt>SQLITE_SCANSTAT_NCYCLE</dt>
+** <dd>The sqlite3_int64 output value is set to the number of cycles,
+** according to the processor time-stamp counter, that elapsed while the
+** query element was being processed. This value is not available for
+** all query elements - if it is unavailable the output variable is
+** set to -1.
** </dl>
*/
#define SQLITE_SCANSTAT_NLOOP 0
@@ -9935,12 +9973,14 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value **
#define SQLITE_SCANSTAT_NAME 3
#define SQLITE_SCANSTAT_EXPLAIN 4
#define SQLITE_SCANSTAT_SELECTID 5
+#define SQLITE_SCANSTAT_PARENTID 6
+#define SQLITE_SCANSTAT_NCYCLE 7
/*
** CAPI3REF: Prepared Statement Scan Status
** METHOD: sqlite3_stmt
**
-** This interface returns information about the predicted and measured
+** These interfaces return information about the predicted and measured
** performance for pStmt. Advanced applications can use this
** interface to compare the predicted and the measured performance and
** issue warnings and/or rerun [ANALYZE] if discrepancies are found.
@@ -9951,19 +9991,25 @@ SQLITE_API int sqlite3_vtab_rhs_value(sqlite3_index_info*, int, sqlite3_value **
**
** The "iScanStatusOp" parameter determines which status information to return.
** The "iScanStatusOp" must be one of the [scanstatus options] or the behavior
-** of this interface is undefined.
-** ^The requested measurement is written into a variable pointed to by
-** the "pOut" parameter.
-** Parameter "idx" identifies the specific loop to retrieve statistics for.
-** Loops are numbered starting from zero. ^If idx is out of range - less than
-** zero or greater than or equal to the total number of loops used to implement
-** the statement - a non-zero value is returned and the variable that pOut
-** points to is unchanged.
-**
-** ^Statistics might not be available for all loops in all statements. ^In cases
-** where there exist loops with no available statistics, this function behaves
-** as if the loop did not exist - it returns non-zero and leave the variable
-** that pOut points to unchanged.
+** of this interface is undefined. ^The requested measurement is written into
+** a variable pointed to by the "pOut" parameter.
+**
+** The "flags" parameter must be passed a mask of flags. At present only
+** one flag is defined - SQLITE_SCANSTAT_COMPLEX. If SQLITE_SCANSTAT_COMPLEX
+** is specified, then status information is available for all elements
+** of a query plan that are reported by "EXPLAIN QUERY PLAN" output. If
+** SQLITE_SCANSTAT_COMPLEX is not specified, then only query plan elements
+** that correspond to query loops (the "SCAN..." and "SEARCH..." elements of
+** the EXPLAIN QUERY PLAN output) are available. Invoking API
+** sqlite3_stmt_scanstatus() is equivalent to calling
+** sqlite3_stmt_scanstatus_v2() with a zeroed flags parameter.
+**
+** Parameter "idx" identifies the specific query element to retrieve statistics
+** for. Query elements are numbered starting from zero. A value of -1 may be
+** to query for statistics regarding the entire query. ^If idx is out of range
+** - less than -1 or greater than or equal to the total number of query
+** elements used to implement the statement - a non-zero value is returned and
+** the variable that pOut points to is unchanged.
**
** See also: [sqlite3_stmt_scanstatus_reset()]
*/
@@ -9973,6 +10019,19 @@ SQLITE_API int sqlite3_stmt_scanstatus(
int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */
void *pOut /* Result written here */
);
+SQLITE_API int sqlite3_stmt_scanstatus_v2(
+ sqlite3_stmt *pStmt, /* Prepared statement for which info desired */
+ int idx, /* Index of loop to report on */
+ int iScanStatusOp, /* Information desired. SQLITE_SCANSTAT_* */
+ int flags, /* Mask of flags defined below */
+ void *pOut /* Result written here */
+);
+
+/*
+** CAPI3REF: Prepared Statement Scan Status
+** KEYWORDS: {scan status flags}
+*/
+#define SQLITE_SCANSTAT_COMPLEX 0x0001
/*
** CAPI3REF: Zero Scan-Status Counters
@@ -10063,6 +10122,10 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
** function is not defined for operations on WITHOUT ROWID tables, or for
** DELETE operations on rowid tables.
**
+** ^The sqlite3_preupdate_hook(D,C,P) function returns the P argument from
+** the previous call on the same [database connection] D, or NULL for
+** the first call on D.
+**
** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()],
** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces
** provide additional information about a preupdate event. These routines
@@ -10468,6 +10531,19 @@ SQLITE_API int sqlite3_deserialize(
# undef double
#endif
+#if defined(__wasi__)
+# undef SQLITE_WASI
+# define SQLITE_WASI 1
+# undef SQLITE_OMIT_WAL
+# define SQLITE_OMIT_WAL 1/* because it requires shared memory APIs */
+# ifndef SQLITE_OMIT_LOAD_EXTENSION
+# define SQLITE_OMIT_LOAD_EXTENSION
+# endif
+# ifndef SQLITE_THREADSAFE
+# define SQLITE_THREADSAFE 0
+# endif
+#endif
+
#ifdef __cplusplus
} /* End of the 'extern "C"' block */
#endif
diff --git a/database/sqlite/sqlite_aclk_alert.c b/database/sqlite/sqlite_aclk_alert.c
index 1e5bd0b74..52d343acb 100644
--- a/database/sqlite/sqlite_aclk_alert.c
+++ b/database/sqlite/sqlite_aclk_alert.c
@@ -75,7 +75,7 @@ static inline bool is_event_from_alert_variable_config(uint32_t unique_id, char
return ret;
}
-#define MAX_REMOVED_PERIOD 86400
+#define MAX_REMOVED_PERIOD 604800 //a week
//decide if some events should be sent or not
#define SQL_SELECT_ALERT_BY_ID "SELECT hl.new_status, hl.config_hash_id, hl.unique_id FROM health_log_%s hl, aclk_alert_%s aa " \
@@ -255,6 +255,29 @@ int rrdcalc_status_to_proto_enum(RRDCALC_STATUS status)
#endif
}
+static inline char *sqlite3_uuid_unparse_strdupz(sqlite3_stmt *res, int iCol) {
+ char uuid_str[UUID_STR_LEN];
+
+ if(sqlite3_column_type(res, iCol) == SQLITE_NULL)
+ uuid_str[0] = '\0';
+ else
+ uuid_unparse_lower(*((uuid_t *) sqlite3_column_blob(res, iCol)), uuid_str);
+
+ return strdupz(uuid_str);
+}
+
+static inline char *sqlite3_text_strdupz_empty(sqlite3_stmt *res, int iCol) {
+ char *ret;
+
+ if(sqlite3_column_type(res, iCol) == SQLITE_NULL)
+ ret = "";
+ else
+ ret = (char *)sqlite3_column_text(res, iCol);
+
+ return strdupz(ret);
+}
+
+
void aclk_push_alert_event(struct aclk_sync_host_config *wc)
{
#ifndef ENABLE_ACLK
@@ -285,7 +308,7 @@ void aclk_push_alert_event(struct aclk_sync_host_config *wc)
buffer_sprintf(sql, "select aa.sequence_id, hl.unique_id, hl.alarm_id, hl.config_hash_id, hl.updated_by_id, hl.when_key, " \
" hl.duration, hl.non_clear_duration, hl.flags, hl.exec_run_timestamp, hl.delay_up_to_timestamp, hl.name, " \
" hl.chart, hl.family, hl.exec, hl.recipient, hl.source, hl.units, hl.info, hl.exec_code, hl.new_status, " \
- " hl.old_status, hl.delay, hl.new_value, hl.old_value, hl.last_repeat, hl.chart_context " \
+ " hl.old_status, hl.delay, hl.new_value, hl.old_value, hl.last_repeat, hl.chart_context, hl.transition_id, hl.alarm_event_id " \
" from health_log_%s hl, aclk_alert_%s aa " \
" where hl.unique_id = aa.alert_unique_id and aa.date_submitted is null " \
" order by aa.sequence_id asc limit %d;", wc->uuid_str, wc->uuid_str, limit);
@@ -321,7 +344,6 @@ void aclk_push_alert_event(struct aclk_sync_host_config *wc)
}
}
- char uuid_str[GUID_LEN + 1];
uint64_t first_sequence_id = 0;
uint64_t last_sequence_id = 0;
static __thread uint64_t log_first_sequence_id = 0;
@@ -343,8 +365,7 @@ void aclk_push_alert_event(struct aclk_sync_host_config *wc)
//alarm_log.sequence_id = (uint64_t) sqlite3_column_int64(res, 0);
alarm_log.when = (time_t) sqlite3_column_int64(res, 5);
- uuid_unparse_lower(*((uuid_t *) sqlite3_column_blob(res, 3)), uuid_str);
- alarm_log.config_hash = strdupz((char *)uuid_str);
+ alarm_log.config_hash = sqlite3_uuid_unparse_strdupz(res, 3);
alarm_log.utc_offset = wc->host->utc_offset;
alarm_log.timezone = strdupz(rrdhost_abbrev_timezone(wc->host));
@@ -387,13 +408,12 @@ void aclk_push_alert_event(struct aclk_sync_host_config *wc)
alarm_log.old_value = (NETDATA_DOUBLE) sqlite3_column_double(res, 24);
alarm_log.updated = (sqlite3_column_int64(res, 8) & HEALTH_ENTRY_FLAG_UPDATED) ? 1 : 0;
- alarm_log.rendered_info = sqlite3_column_type(res, 18) == SQLITE_NULL ?
- strdupz((char *)"") :
- strdupz((char *)sqlite3_column_text(res, 18));
+ alarm_log.rendered_info = sqlite3_text_strdupz_empty(res, 18);
- alarm_log.chart_context = sqlite3_column_type(res, 26) == SQLITE_NULL ?
- strdupz((char *)"") :
- strdupz((char *)sqlite3_column_text(res, 26));
+ alarm_log.chart_context = sqlite3_text_strdupz_empty(res, 26);
+ alarm_log.transition_id = sqlite3_uuid_unparse_strdupz(res, 27);
+
+ alarm_log.event_id = (time_t) sqlite3_column_int64(res, 28);
aclk_send_alarm_log_entry(&alarm_log);
@@ -463,7 +483,7 @@ void aclk_push_alert_events_for_all_hosts(void)
void sql_queue_existing_alerts_to_aclk(RRDHOST *host)
{
- char uuid_str[GUID_LEN + 1];
+ char uuid_str[UUID_STR_LEN];
uuid_unparse_lower_fix(&host->host_uuid, uuid_str);
BUFFER *sql = buffer_create(1024, &netdata_buffers_statistics.buffers_sqlite);
@@ -747,8 +767,10 @@ void aclk_process_send_alarm_snapshot(char *node_id, char *claim_id __maybe_unus
void health_alarm_entry2proto_nolock(struct alarm_log_entry *alarm_log, ALARM_ENTRY *ae, RRDHOST *host)
{
char *edit_command = ae->source ? health_edit_command_from_source(ae_source(ae)) : strdupz("UNKNOWN=0=UNKNOWN");
- char config_hash_id[GUID_LEN + 1];
+ char config_hash_id[UUID_STR_LEN];
uuid_unparse_lower(ae->config_hash_id, config_hash_id);
+ char transition_id[UUID_STR_LEN];
+ uuid_unparse_lower(ae->transition_id, transition_id);
alarm_log->chart = strdupz(ae_chart_name(ae));
alarm_log->name = strdupz(ae_name(ae));
@@ -790,6 +812,9 @@ void health_alarm_entry2proto_nolock(struct alarm_log_entry *alarm_log, ALARM_EN
alarm_log->rendered_info = strdupz(ae_info(ae));
alarm_log->chart_context = strdupz(ae_chart_context(ae));
+ alarm_log->transition_id = strdupz((char *)transition_id);
+ alarm_log->event_id = (uint64_t) ae->alarm_event_id;
+
freez(edit_command);
}
#endif
@@ -939,18 +964,14 @@ void aclk_push_alert_snapshot_event(char *node_id __maybe_unused)
#endif
}
-#define SQL_DELETE_ALERT_ENTRIES "DELETE FROM aclk_alert_%s WHERE filtered_alert_unique_id NOT IN (SELECT unique_id FROM health_log_%s);"
-
+#define SQL_DELETE_ALERT_ENTRIES "DELETE FROM aclk_alert_%s WHERE filtered_alert_unique_id + %d < UNIXEPOCH();"
void sql_aclk_alert_clean_dead_entries(RRDHOST *host)
{
- if (!claimed())
- return;
-
char uuid_str[UUID_STR_LEN];
uuid_unparse_lower_fix(&host->host_uuid, uuid_str);
- char sql[512];
- snprintfz(sql,511,SQL_DELETE_ALERT_ENTRIES, uuid_str, uuid_str);
+ char sql[ACLK_SYNC_QUERY_SIZE];
+ snprintfz(sql, ACLK_SYNC_QUERY_SIZE - 1, SQL_DELETE_ALERT_ENTRIES, uuid_str, MAX_REMOVED_PERIOD);
char *err_msg = NULL;
int rc = sqlite3_exec_monitored(db_meta, sql, NULL, NULL, &err_msg);
@@ -1038,6 +1059,7 @@ static inline int compare_active_alerts(const void * a, const void * b) {
return strcmp(active_alerts_a->name, active_alerts_b->name);
}
+#define BATCH_ALLOCATED 10
void aclk_push_alarm_checkpoint(RRDHOST *host __maybe_unused)
{
#ifdef ENABLE_ACLK
@@ -1047,21 +1069,18 @@ void aclk_push_alarm_checkpoint(RRDHOST *host __maybe_unused)
return;
}
- //TODO: make sure all pending events are sent.
if (rrdhost_flag_check(host, RRDHOST_FLAG_ACLK_STREAM_ALERTS)) {
//postpone checkpoint send
- wc->alert_checkpoint_req++;
+ wc->alert_checkpoint_req+=3;
log_access("ACLK REQ [%s (N/A)]: ALERTS CHECKPOINT POSTPONED", rrdhost_hostname(host));
return;
}
- //TODO: lock rc here, or make sure it's called when health decides
- //count them
RRDCALC *rc;
uint32_t cnt = 0;
size_t len = 0;
- active_alerts_t *active_alerts = NULL;
+ active_alerts_t *active_alerts = callocz(BATCH_ALLOCATED, sizeof(active_alerts_t));
foreach_rrdcalc_in_rrdhost_read(host, rc) {
if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
continue;
@@ -1069,33 +1088,21 @@ void aclk_push_alarm_checkpoint(RRDHOST *host __maybe_unused)
if (rc->status == RRDCALC_STATUS_WARNING ||
rc->status == RRDCALC_STATUS_CRITICAL) {
+ if (cnt && !(cnt % BATCH_ALLOCATED)) {
+ active_alerts = reallocz(active_alerts, (BATCH_ALLOCATED * ((cnt / BATCH_ALLOCATED) + 1)) * sizeof(active_alerts_t));
+ }
+
+ active_alerts[cnt].name = (char *)rrdcalc_name(rc);
+ len += string_strlen(rc->name);
+ active_alerts[cnt].chart = (char *)rrdcalc_chart_name(rc);
+ len += string_strlen(rc->chart);
+ active_alerts[cnt].status = rc->status;
+ len++;
cnt++;
}
}
foreach_rrdcalc_in_rrdhost_done(rc);
- if (cnt) {
- active_alerts = callocz(cnt, sizeof(active_alerts_t));
- cnt = 0;
- foreach_rrdcalc_in_rrdhost_read(host, rc) {
- if(unlikely(!rc->rrdset || !rc->rrdset->last_collected_time.tv_sec))
- continue;
-
- if (rc->status == RRDCALC_STATUS_WARNING ||
- rc->status == RRDCALC_STATUS_CRITICAL) {
-
- active_alerts[cnt].name = (char *)rrdcalc_name(rc);
- len += string_strlen(rc->name);
- active_alerts[cnt].chart = (char *)rrdcalc_chart_name(rc);
- len += string_strlen(rc->chart);
- active_alerts[cnt].status = rc->status;
- len++;
- cnt++;
- }
- }
- foreach_rrdcalc_in_rrdhost_done(rc);
- }
-
BUFFER *alarms_to_hash;
if (cnt) {
qsort (active_alerts, cnt, sizeof(active_alerts_t), compare_active_alerts);
diff --git a/database/sqlite/sqlite_db_migration.c b/database/sqlite/sqlite_db_migration.c
index 3132ae2d0..9c7235fdb 100644
--- a/database/sqlite/sqlite_db_migration.c
+++ b/database/sqlite/sqlite_db_migration.c
@@ -12,7 +12,7 @@ static int return_int_cb(void *data, int argc, char **argv, char **column)
}
-static int table_exists_in_database(const char *table)
+int table_exists_in_database(const char *table)
{
char *err_msg = NULL;
char sql[128];
@@ -182,6 +182,38 @@ static int do_migration_v6_v7(sqlite3 *database, const char *name)
return 0;
}
+static int do_migration_v7_v8(sqlite3 *database, const char *name)
+{
+ UNUSED(name);
+ info("Running database migration %s", name);
+
+ char sql[256];
+
+ int rc;
+ sqlite3_stmt *res = NULL;
+ snprintfz(sql, 255, "SELECT name FROM sqlite_schema WHERE type ='table' AND name LIKE 'health_log_%%';");
+ rc = sqlite3_prepare_v2(database, sql, -1, &res, 0);
+ if (rc != SQLITE_OK) {
+ error_report("Failed to prepare statement to alter health_log tables");
+ return 1;
+ }
+
+ while (sqlite3_step_monitored(res) == SQLITE_ROW) {
+ char *table = strdupz((char *) sqlite3_column_text(res, 0));
+ if (!column_exists_in_table(table, "transition_id")) {
+ snprintfz(sql, 255, "ALTER TABLE %s ADD transition_id blob", table);
+ sqlite3_exec_monitored(database, sql, 0, 0, NULL);
+ }
+ freez(table);
+ }
+
+ rc = sqlite3_finalize(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to finalize statement when altering health_log tables, rc = %d", rc);
+
+ return 0;
+}
+
static int do_migration_noop(sqlite3 *database, const char *name)
{
@@ -233,6 +265,7 @@ DATABASE_FUNC_MIGRATION_LIST migration_action[] = {
{.name = "v4 to v5", .func = do_migration_v4_v5},
{.name = "v5 to v6", .func = do_migration_v5_v6},
{.name = "v6 to v7", .func = do_migration_v6_v7},
+ {.name = "v7 to v8", .func = do_migration_v7_v8},
// the terminator of this array
{.name = NULL, .func = NULL}
};
diff --git a/database/sqlite/sqlite_db_migration.h b/database/sqlite/sqlite_db_migration.h
index 138643a49..edaac5269 100644
--- a/database/sqlite/sqlite_db_migration.h
+++ b/database/sqlite/sqlite_db_migration.h
@@ -8,5 +8,6 @@
int perform_database_migration(sqlite3 *database, int target_version);
int perform_context_database_migration(sqlite3 *database, int target_version);
+int table_exists_in_database(const char *table);
#endif //NETDATA_SQLITE_DB_MIGRATION_H
diff --git a/database/sqlite/sqlite_functions.c b/database/sqlite/sqlite_functions.c
index 2fca2dfc8..555db1011 100644
--- a/database/sqlite/sqlite_functions.c
+++ b/database/sqlite/sqlite_functions.c
@@ -3,7 +3,7 @@
#include "sqlite_functions.h"
#include "sqlite_db_migration.h"
-#define DB_METADATA_VERSION 7
+#define DB_METADATA_VERSION 8
const char *database_config[] = {
"CREATE TABLE IF NOT EXISTS host(host_id BLOB PRIMARY KEY, hostname TEXT NOT NULL, "
diff --git a/database/sqlite/sqlite_health.c b/database/sqlite/sqlite_health.c
index dd08f63ec..aedbf1108 100644
--- a/database/sqlite/sqlite_health.c
+++ b/database/sqlite/sqlite_health.c
@@ -2,6 +2,8 @@
#include "sqlite_health.h"
#include "sqlite_functions.h"
+#include "sqlite_db_migration.h"
+#include "uuid.h"
#define MAX_HEALTH_SQL_SIZE 2048
#define sqlite3_bind_string_or_null(res,key,param) ((key) ? sqlite3_bind_text(res, param, string2str(key), -1, SQLITE_STATIC) : sqlite3_bind_null(res, param))
@@ -9,7 +11,7 @@
/* Health related SQL queries
Creates a health log table in sqlite, one per host guid
*/
-#define SQL_CREATE_HEALTH_LOG_TABLE(guid) "CREATE TABLE IF NOT EXISTS health_log_%s(hostname text, unique_id int, alarm_id int, alarm_event_id int, config_hash_id blob, updated_by_id int, updates_id int, when_key int, duration int, non_clear_duration int, flags int, exec_run_timestamp int, delay_up_to_timestamp int, name text, chart text, family text, exec text, recipient text, source text, units text, info text, exec_code int, new_status real, old_status real, delay int, new_value double, old_value double, last_repeat int, class text, component text, type text, chart_context text);", guid
+#define SQL_CREATE_HEALTH_LOG_TABLE(guid) "CREATE TABLE IF NOT EXISTS health_log_%s(hostname text, unique_id int, alarm_id int, alarm_event_id int, config_hash_id blob, updated_by_id int, updates_id int, when_key int, duration int, non_clear_duration int, flags int, exec_run_timestamp int, delay_up_to_timestamp int, name text, chart text, family text, exec text, recipient text, source text, units text, info text, exec_code int, new_status real, old_status real, delay int, new_value double, old_value double, last_repeat int, class text, component text, type text, chart_context text, transition_id blob);", guid
int sql_create_health_log_table(RRDHOST *host) {
int rc;
char command[MAX_HEALTH_SQL_SIZE + 1];
@@ -20,7 +22,7 @@ int sql_create_health_log_table(RRDHOST *host) {
return 1;
}
- char uuid_str[GUID_LEN + 1];
+ char uuid_str[UUID_STR_LEN];
uuid_unparse_lower_fix(&host->host_uuid, uuid_str);
snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_CREATE_HEALTH_LOG_TABLE(uuid_str));
@@ -53,7 +55,7 @@ void sql_health_alarm_log_update(RRDHOST *host, ALARM_ENTRY *ae) {
return;
}
- char uuid_str[GUID_LEN + 1];
+ char uuid_str[UUID_STR_LEN];
uuid_unparse_lower_fix(&host->host_uuid, uuid_str);
snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_UPDATE_HEALTH_LOG(uuid_str));
@@ -115,7 +117,7 @@ failed:
"config_hash_id, updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, " \
"exec_run_timestamp, delay_up_to_timestamp, name, chart, family, exec, recipient, source, " \
"units, info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, " \
- "class, component, type, chart_context) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);", guid
+ "class, component, type, chart_context, transition_id) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);", guid
void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae) {
sqlite3_stmt *res = NULL;
@@ -128,7 +130,7 @@ void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae) {
return;
}
- char uuid_str[GUID_LEN + 1];
+ char uuid_str[UUID_STR_LEN];
uuid_unparse_lower_fix(&host->host_uuid, uuid_str);
snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_INSERT_HEALTH_LOG(uuid_str));
@@ -335,6 +337,12 @@ void sql_health_alarm_log_insert(RRDHOST *host, ALARM_ENTRY *ae) {
goto failed;
}
+ rc = sqlite3_bind_blob(res, 33, &ae->transition_id, sizeof(ae->transition_id), SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to bind transition_id parameter for SQL_INSERT_HEALTH_LOG");
+ goto failed;
+ }
+
rc = execute_insert(res);
if (unlikely(rc != SQLITE_DONE)) {
error_report("HEALTH [%s]: Failed to execute SQL_INSERT_HEALTH_LOG, rc = %d", rrdhost_hostname(host), rc);
@@ -353,39 +361,72 @@ void sql_health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae)
{
if (ae->flags & HEALTH_ENTRY_FLAG_SAVED)
sql_health_alarm_log_update(host, ae);
- else
+ else {
sql_health_alarm_log_insert(host, ae);
+#ifdef ENABLE_ACLK
+ if (netdata_cloud_setting) {
+ sql_queue_alarm_to_aclk(host, ae, 0);
+ }
+#endif
+ }
}
/* Health related SQL queries
- Cleans up the health_log table.
+ Get a count of rows from health log table
*/
-#define SQL_CLEANUP_HEALTH_LOG(guid,guid2,limit) "DELETE from health_log_%s where unique_id in (SELECT unique_id from health_log_%s order by unique_id asc LIMIT %lu);", guid, guid2, limit
-void sql_health_alarm_log_cleanup(RRDHOST *host) {
+#define SQL_COUNT_HEALTH_LOG(guid) "SELECT count(1) FROM health_log_%s;", guid
+void sql_health_alarm_log_count(RRDHOST *host) {
sqlite3_stmt *res = NULL;
- static size_t rotate_every = 0;
int rc;
char command[MAX_HEALTH_SQL_SIZE + 1];
- if(unlikely(rotate_every == 0)) {
- rotate_every = (size_t)config_get_number(CONFIG_SECTION_HEALTH, "rotate log every lines", 2000);
- if(rotate_every < 100) rotate_every = 100;
+ if (unlikely(!db_meta)) {
+ if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE)
+ error_report("Database has not been initialized");
+ return;
}
- if(likely(host->health.health_log_entries_written < rotate_every)) {
+ char uuid_str[UUID_STR_LEN];
+ uuid_unparse_lower_fix(&host->host_uuid, uuid_str);
+
+ snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_COUNT_HEALTH_LOG(uuid_str));
+
+ rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to prepare statement to count health log entries from db");
return;
}
+ rc = sqlite3_step_monitored(res);
+ if (likely(rc == SQLITE_ROW))
+ host->health.health_log_entries_written = (size_t) sqlite3_column_int64(res, 0);
+
+ rc = sqlite3_finalize(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to finalize the prepared statement to count health log entries from db");
+
+ info("HEALTH [%s]: Table health_log_%s, contains %lu entries.", rrdhost_hostname(host), uuid_str, (unsigned long int) host->health.health_log_entries_written);
+}
+
+/* Health related SQL queries
+ Cleans up the health_log table on a non-claimed host
+*/
+#define SQL_CLEANUP_HEALTH_LOG_NOT_CLAIMED(guid,limit) "DELETE FROM health_log_%s ORDER BY unique_id ASC LIMIT %lu;", guid, limit
+void sql_health_alarm_log_cleanup_not_claimed(RRDHOST *host, size_t rotate_every) {
+ sqlite3_stmt *res = NULL;
+ int rc;
+ char command[MAX_HEALTH_SQL_SIZE + 1];
+
if (unlikely(!db_meta)) {
if (default_rrd_memory_mode == RRD_MEMORY_MODE_DBENGINE)
error_report("Database has not been initialized");
return;
}
- char uuid_str[GUID_LEN + 1];
+ char uuid_str[UUID_STR_LEN];
uuid_unparse_lower_fix(&host->host_uuid, uuid_str);
- snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_CLEANUP_HEALTH_LOG(uuid_str, uuid_str, (unsigned long int) (host->health.health_log_entries_written - rotate_every)));
+ snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_CLEANUP_HEALTH_LOG_NOT_CLAIMED(uuid_str, (unsigned long int) (host->health.health_log_entries_written - rotate_every)));
rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0);
if (unlikely(rc != SQLITE_OK)) {
@@ -403,14 +444,17 @@ void sql_health_alarm_log_cleanup(RRDHOST *host) {
host->health.health_log_entries_written = rotate_every;
- sql_aclk_alert_clean_dead_entries(host);
+ snprintfz(command, MAX_HEALTH_SQL_SIZE, "aclk_alert_%s", uuid_str);
+ if (unlikely(table_exists_in_database(command))) {
+ sql_aclk_alert_clean_dead_entries(host);
+ }
}
/* Health related SQL queries
- Get a count of rows from health log table
+ Cleans up the health_log table on a claimed host
*/
-#define SQL_COUNT_HEALTH_LOG(guid) "SELECT count(1) FROM health_log_%s;", guid
-void sql_health_alarm_log_count(RRDHOST *host) {
+#define SQL_CLEANUP_HEALTH_LOG_CLAIMED(guid, guid2, guid3, limit) "DELETE from health_log_%s WHERE unique_id NOT IN (SELECT filtered_alert_unique_id FROM aclk_alert_%s) AND unique_id IN (SELECT unique_id FROM health_log_%s ORDER BY unique_id asc LIMIT %lu);", guid, guid2, guid3, limit
+void sql_health_alarm_log_cleanup_claimed(RRDHOST *host, size_t rotate_every) {
sqlite3_stmt *res = NULL;
int rc;
char command[MAX_HEALTH_SQL_SIZE + 1];
@@ -421,33 +465,62 @@ void sql_health_alarm_log_count(RRDHOST *host) {
return;
}
- char uuid_str[GUID_LEN + 1];
+ char uuid_str[UUID_STR_LEN];
uuid_unparse_lower_fix(&host->host_uuid, uuid_str);
+ snprintfz(command, MAX_HEALTH_SQL_SIZE, "aclk_alert_%s", uuid_str);
- snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_COUNT_HEALTH_LOG(uuid_str));
+ if (!table_exists_in_database(command)) {
+ sql_health_alarm_log_cleanup_not_claimed(host, rotate_every);
+ return;
+ }
+
+ snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_CLEANUP_HEALTH_LOG_CLAIMED(uuid_str, uuid_str, uuid_str, (unsigned long int) (host->health.health_log_entries_written - rotate_every)));
rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0);
if (unlikely(rc != SQLITE_OK)) {
- error_report("Failed to prepare statement to count health log entries from db");
+ error_report("Failed to prepare statement to cleanup health log table");
return;
}
rc = sqlite3_step_monitored(res);
- if (likely(rc == SQLITE_ROW))
- host->health.health_log_entries_written = (size_t) sqlite3_column_int64(res, 0);
+ if (unlikely(rc != SQLITE_DONE))
+ error_report("Failed to cleanup health log table, rc = %d", rc);
rc = sqlite3_finalize(res);
if (unlikely(rc != SQLITE_OK))
- error_report("Failed to finalize the prepared statement to count health log entries from db");
+ error_report("Failed to finalize the prepared statement to cleanup health log table");
- info("HEALTH [%s]: Table health_log_%s, contains %lu entries.", rrdhost_hostname(host), uuid_str, (unsigned long int) host->health.health_log_entries_written);
+ sql_health_alarm_log_count(host);
+
+ sql_aclk_alert_clean_dead_entries(host);
+}
+
+/* Health related SQL queries
+ Cleans up the health_log table.
+*/
+void sql_health_alarm_log_cleanup(RRDHOST *host) {
+ static size_t rotate_every = 0;
+
+ if(unlikely(rotate_every == 0)) {
+ rotate_every = (size_t)config_get_number(CONFIG_SECTION_HEALTH, "rotate log every lines", 2000);
+ if(rotate_every < 100) rotate_every = 100;
+ }
+
+ if(likely(host->health.health_log_entries_written < rotate_every)) {
+ return;
+ }
+
+ if (!claimed()) {
+ sql_health_alarm_log_cleanup_not_claimed(host, rotate_every);
+ } else
+ sql_health_alarm_log_cleanup_claimed(host, rotate_every);
}
#define SQL_INJECT_REMOVED(guid, guid2) "insert into health_log_%s (hostname, unique_id, alarm_id, alarm_event_id, config_hash_id, updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, exec_run_timestamp, " \
-"delay_up_to_timestamp, name, chart, family, exec, recipient, source, units, info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, class, component, type, chart_context) " \
+"delay_up_to_timestamp, name, chart, family, exec, recipient, source, units, info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, class, component, type, chart_context, transition_id) " \
"select hostname, ?1, ?2, ?3, config_hash_id, 0, ?4, unixepoch(), 0, 0, flags, exec_run_timestamp, " \
-"unixepoch(), name, chart, family, exec, recipient, source, units, info, exec_code, -2, new_status, delay, NULL, new_value, 0, class, component, type, chart_context " \
-"from health_log_%s where unique_id = ?5", guid, guid2
+"unixepoch(), name, chart, family, exec, recipient, source, units, info, exec_code, -2, new_status, delay, NULL, new_value, 0, class, component, type, chart_context, ?5 " \
+"from health_log_%s where unique_id = ?6", guid, guid2
#define SQL_INJECT_REMOVED_UPDATE(guid) "update health_log_%s set flags = flags | ?1, updated_by_id = ?2 where unique_id = ?3; ", guid
void sql_inject_removed_status(char *uuid_str, uint32_t alarm_id, uint32_t alarm_event_id, uint32_t unique_id, uint32_t max_unique_id)
{
@@ -490,7 +563,15 @@ void sql_inject_removed_status(char *uuid_str, uint32_t alarm_id, uint32_t alarm
goto failed;
}
- rc = sqlite3_bind_int64(res, 5, (sqlite3_int64) unique_id);
+ uuid_t transition_id;
+ uuid_generate_random(transition_id);
+ rc = sqlite3_bind_blob(res, 5, &transition_id, sizeof(transition_id), SQLITE_STATIC);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to bind config_hash_id parameter for SQL_INSERT_HEALTH_LOG");
+ goto failed;
+ }
+
+ rc = sqlite3_bind_int64(res, 6, (sqlite3_int64) unique_id);
if (unlikely(rc != SQLITE_OK)) {
error_report("Failed to bind unique_id parameter for SQL_INJECT_REMOVED");
goto failed;
@@ -608,7 +689,7 @@ void sql_check_removed_alerts_state(char *uuid_str)
/* Health related SQL queries
Load from the health log table
*/
-#define SQL_LOAD_HEALTH_LOG(guid,limit) "SELECT hostname, unique_id, alarm_id, alarm_event_id, config_hash_id, updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, exec_run_timestamp, delay_up_to_timestamp, name, chart, family, exec, recipient, source, units, info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, class, component, type, chart_context FROM (SELECT hostname, unique_id, alarm_id, alarm_event_id, config_hash_id, updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, exec_run_timestamp, delay_up_to_timestamp, name, chart, family, exec, recipient, source, units, info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, class, component, type, chart_context FROM health_log_%s order by unique_id desc limit %u) order by unique_id asc;", guid, limit
+#define SQL_LOAD_HEALTH_LOG(guid) "SELECT hostname, unique_id, alarm_id, alarm_event_id, config_hash_id, updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, exec_run_timestamp, delay_up_to_timestamp, name, chart, family, exec, recipient, source, units, info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, class, component, type, chart_context, transition_id FROM health_log_%s group by alarm_id having max(alarm_event_id);", guid
void sql_health_alarm_log_load(RRDHOST *host) {
sqlite3_stmt *res = NULL;
int ret;
@@ -623,12 +704,12 @@ void sql_health_alarm_log_load(RRDHOST *host) {
return;
}
- char uuid_str[GUID_LEN + 1];
+ char uuid_str[UUID_STR_LEN];
uuid_unparse_lower_fix(&host->host_uuid, uuid_str);
sql_check_removed_alerts_state(uuid_str);
- snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_LOAD_HEALTH_LOG(uuid_str, host->health_log.max));
+ snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_LOAD_HEALTH_LOG(uuid_str));
ret = sqlite3_prepare_v2(db_meta, command, -1, &res, 0);
if (unlikely(ret != SQLITE_OK)) {
@@ -778,6 +859,9 @@ void sql_health_alarm_log_load(RRDHOST *host) {
else
ae->chart_context = NULL;
+ if (sqlite3_column_type(res, 32) != SQLITE_NULL)
+ uuid_copy(ae->transition_id, *((uuid_t *) sqlite3_column_blob(res, 32)));
+
char value_string[100 + 1];
string_freez(ae->old_value_string);
string_freez(ae->new_value_string);
@@ -1070,12 +1154,13 @@ int alert_hash_and_store_config(
DIGEST_ALERT_CONFIG_VAL(cfg->options);
DIGEST_ALERT_CONFIG_VAL(cfg->repeat);
DIGEST_ALERT_CONFIG_VAL(cfg->host_labels);
+ DIGEST_ALERT_CONFIG_VAL(cfg->chart_labels);
EVP_DigestFinal_ex(evpctx, hash_value, &hash_len);
EVP_MD_CTX_destroy(evpctx);
fatal_assert(hash_len > sizeof(uuid_t));
- char uuid_str[GUID_LEN + 1];
+ char uuid_str[UUID_STR_LEN];
uuid_unparse_lower(*((uuid_t *)&hash_value), uuid_str);
uuid_copy(hash_id, *((uuid_t *)&hash_value));
@@ -1090,3 +1175,210 @@ int alert_hash_and_store_config(
return 1;
}
+
+#define SQL_SELECT_HEALTH_LAST_EXECUTED_EVENT "SELECT new_status FROM health_log_%s WHERE alarm_id = %u AND unique_id != %u AND flags & %d ORDER BY unique_id DESC LIMIT 1"
+int sql_health_get_last_executed_event(RRDHOST *host, ALARM_ENTRY *ae, RRDCALC_STATUS *last_executed_status)
+{
+ int rc = 0, ret = -1;
+ char command[MAX_HEALTH_SQL_SIZE + 1];
+
+ char uuid_str[UUID_STR_LEN];
+ uuid_unparse_lower_fix(&host->host_uuid, uuid_str);
+
+ sqlite3_stmt *res = NULL;
+
+ snprintfz(command, MAX_HEALTH_SQL_SIZE, SQL_SELECT_HEALTH_LAST_EXECUTED_EVENT, uuid_str, ae->alarm_id, ae->unique_id, HEALTH_ENTRY_FLAG_EXEC_RUN);
+
+ rc = sqlite3_prepare_v2(db_meta, command, -1, &res, 0);
+ if (rc != SQLITE_OK) {
+ error_report("Failed to prepare statement when trying to get last executed status");
+ return ret;
+ }
+
+ ret = 0;
+ while (sqlite3_step_monitored(res) == SQLITE_ROW) {
+ *last_executed_status = (RRDCALC_STATUS) sqlite3_column_int(res, 0);
+ ret = 1;
+ }
+
+ rc = sqlite3_finalize(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to finalize the statement.");
+
+ return ret;
+}
+
+#define SQL_SELECT_HEALTH_LOG(guid) "SELECT hostname, unique_id, alarm_id, alarm_event_id, config_hash_id, updated_by_id, updates_id, when_key, duration, non_clear_duration, flags, exec_run_timestamp, delay_up_to_timestamp, name, chart, family, exec, recipient, source, units, info, exec_code, new_status, old_status, delay, new_value, old_value, last_repeat, class, component, type, chart_context, transition_id FROM health_log_%s WHERE 1=1 ", guid
+void sql_health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after, char *chart) {
+
+ buffer_strcat(wb, "[");
+
+ unsigned int max = host->health_log.max;
+ unsigned int count = 0;
+
+ sqlite3_stmt *res = NULL;
+ int rc;
+
+ BUFFER *command = buffer_create(MAX_HEALTH_SQL_SIZE, NULL);
+ char uuid_str[UUID_STR_LEN];
+ uuid_unparse_lower_fix(&host->host_uuid, uuid_str);
+
+ buffer_sprintf(command, SQL_SELECT_HEALTH_LOG(uuid_str));
+
+ if (chart) {
+ char chart_sql[MAX_HEALTH_SQL_SIZE + 1];
+ snprintfz(chart_sql, MAX_HEALTH_SQL_SIZE, "AND chart = '%s' ", chart);
+ buffer_strcat(command, chart_sql);
+ }
+
+ if (after) {
+ char after_sql[MAX_HEALTH_SQL_SIZE + 1];
+ snprintfz(after_sql, MAX_HEALTH_SQL_SIZE, "AND unique_id > %u ", after);
+ buffer_strcat(command, after_sql);
+ }
+
+ {
+ char limit_sql[MAX_HEALTH_SQL_SIZE + 1];
+ snprintfz(limit_sql, MAX_HEALTH_SQL_SIZE, "ORDER BY unique_id DESC LIMIT %u ", max);
+ buffer_strcat(command, limit_sql);
+ }
+
+ rc = sqlite3_prepare_v2(db_meta, buffer_tostring(command), -1, &res, 0);
+ if (unlikely(rc != SQLITE_OK)) {
+ error_report("Failed to prepare statement SQL_SELECT_HEALTH_LOG");
+ buffer_free(command);
+ return;
+ }
+
+ while (sqlite3_step(res) == SQLITE_ROW) {
+
+ char old_value_string[100 + 1];
+ char new_value_string[100 + 1];
+
+ char config_hash_id[UUID_STR_LEN];
+ uuid_unparse_lower(*((uuid_t *) sqlite3_column_blob(res, 4)), config_hash_id);
+
+ char transition_id[UUID_STR_LEN] = {0};
+ if (sqlite3_column_type(res, 32) != SQLITE_NULL)
+ uuid_unparse_lower(*((uuid_t *) sqlite3_column_blob(res, 32)), transition_id);
+
+ char *edit_command = health_edit_command_from_source((char *)sqlite3_column_text(res, 18));
+
+ if (count)
+ buffer_sprintf(wb, ",");
+
+ count++;
+
+ buffer_sprintf(
+ wb,
+ "\n\t{\n"
+ "\t\t\"hostname\": \"%s\",\n"
+ "\t\t\"utc_offset\": %d,\n"
+ "\t\t\"timezone\": \"%s\",\n"
+ "\t\t\"unique_id\": %u,\n"
+ "\t\t\"alarm_id\": %u,\n"
+ "\t\t\"alarm_event_id\": %u,\n"
+ "\t\t\"config_hash_id\": \"%s\",\n"
+ "\t\t\"transition_id\": \"%s\",\n"
+ "\t\t\"name\": \"%s\",\n"
+ "\t\t\"chart\": \"%s\",\n"
+ "\t\t\"context\": \"%s\",\n"
+ "\t\t\"family\": \"%s\",\n"
+ "\t\t\"class\": \"%s\",\n"
+ "\t\t\"component\": \"%s\",\n"
+ "\t\t\"type\": \"%s\",\n"
+ "\t\t\"processed\": %s,\n"
+ "\t\t\"updated\": %s,\n"
+ "\t\t\"exec_run\": %lu,\n"
+ "\t\t\"exec_failed\": %s,\n"
+ "\t\t\"exec\": \"%s\",\n"
+ "\t\t\"recipient\": \"%s\",\n"
+ "\t\t\"exec_code\": %d,\n"
+ "\t\t\"source\": \"%s\",\n"
+ "\t\t\"command\": \"%s\",\n"
+ "\t\t\"units\": \"%s\",\n"
+ "\t\t\"when\": %lu,\n"
+ "\t\t\"duration\": %lu,\n"
+ "\t\t\"non_clear_duration\": %lu,\n"
+ "\t\t\"status\": \"%s\",\n"
+ "\t\t\"old_status\": \"%s\",\n"
+ "\t\t\"delay\": %d,\n"
+ "\t\t\"delay_up_to_timestamp\": %lu,\n"
+ "\t\t\"updated_by_id\": %u,\n"
+ "\t\t\"updates_id\": %u,\n"
+ "\t\t\"value_string\": \"%s\",\n"
+ "\t\t\"old_value_string\": \"%s\",\n"
+ "\t\t\"last_repeat\": \"%lu\",\n"
+ "\t\t\"silenced\": \"%s\",\n",
+ sqlite3_column_text(res, 0),
+ host->utc_offset,
+ rrdhost_abbrev_timezone(host),
+ (unsigned int) sqlite3_column_int64(res, 1),
+ (unsigned int) sqlite3_column_int64(res, 2),
+ (unsigned int) sqlite3_column_int64(res, 3),
+ config_hash_id,
+ transition_id,
+ sqlite3_column_text(res, 13),
+ sqlite3_column_text(res, 14),
+ sqlite3_column_text(res, 31),
+ sqlite3_column_text(res, 15),
+ sqlite3_column_text(res, 28) ? (const char *) sqlite3_column_text(res, 28) : (char *) "Unknown",
+ sqlite3_column_text(res, 29) ? (const char *) sqlite3_column_text(res, 29) : (char *) "Unknown",
+ sqlite3_column_text(res, 30) ? (const char *) sqlite3_column_text(res, 30) : (char *) "Unknown",
+ (sqlite3_column_int64(res, 10) & HEALTH_ENTRY_FLAG_PROCESSED)?"true":"false",
+ (sqlite3_column_int64(res, 10) & HEALTH_ENTRY_FLAG_UPDATED)?"true":"false",
+ (long unsigned int)sqlite3_column_int64(res, 11),
+ (sqlite3_column_int64(res, 10) & HEALTH_ENTRY_FLAG_EXEC_FAILED)?"true":"false",
+ sqlite3_column_text(res, 16) ? (const char *) sqlite3_column_text(res, 16) : string2str(host->health.health_default_exec),
+ sqlite3_column_text(res, 17) ? (const char *) sqlite3_column_text(res, 17) : string2str(host->health.health_default_recipient),
+ sqlite3_column_int(res, 21),
+ sqlite3_column_text(res, 18),
+ edit_command,
+ sqlite3_column_text(res, 19),
+ (long unsigned int)sqlite3_column_int64(res, 7),
+ (long unsigned int)sqlite3_column_int64(res, 8),
+ (long unsigned int)sqlite3_column_int64(res, 9),
+ rrdcalc_status2string(sqlite3_column_int(res, 22)),
+ rrdcalc_status2string(sqlite3_column_int(res, 23)),
+ sqlite3_column_int(res, 24),
+ (long unsigned int)sqlite3_column_int64(res, 12),
+ (unsigned int)sqlite3_column_int64(res, 5),
+ (unsigned int)sqlite3_column_int64(res, 6),
+ sqlite3_column_type(res, 25) == SQLITE_NULL ? "-" : format_value_and_unit(new_value_string, 100, sqlite3_column_double(res, 25), (char *) sqlite3_column_text(res, 19), -1),
+ sqlite3_column_type(res, 26) == SQLITE_NULL ? "-" : format_value_and_unit(old_value_string, 100, sqlite3_column_double(res, 26), (char *) sqlite3_column_text(res, 19), -1),
+ (long unsigned int)sqlite3_column_int64(res, 27),
+ (sqlite3_column_int64(res, 10) & HEALTH_ENTRY_FLAG_SILENCED)?"true":"false");
+
+ health_string2json(wb, "\t\t", "info", (char *) sqlite3_column_text(res, 20), ",\n");
+
+ if(unlikely(sqlite3_column_int64(res, 10) & HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION)) {
+ buffer_strcat(wb, "\t\t\"no_clear_notification\": true,\n");
+ }
+
+ buffer_strcat(wb, "\t\t\"value\":");
+ if (sqlite3_column_type(res, 25) == SQLITE_NULL)
+ buffer_strcat(wb, "null");
+ else
+ buffer_print_netdata_double(wb, sqlite3_column_double(res, 25));
+ buffer_strcat(wb, ",\n");
+
+ buffer_strcat(wb, "\t\t\"old_value\":");
+ if (sqlite3_column_type(res, 26) == SQLITE_NULL)
+ buffer_strcat(wb, "null");
+ else
+ buffer_print_netdata_double(wb, sqlite3_column_double(res, 26));
+ buffer_strcat(wb, "\n");
+
+ buffer_strcat(wb, "\t}");
+
+ freez(edit_command);
+ }
+
+ buffer_strcat(wb, "\n]");
+
+ rc = sqlite3_finalize(res);
+ if (unlikely(rc != SQLITE_OK))
+ error_report("Failed to finalize statement for SQL_SELECT_HEALTH_LOG");
+
+ buffer_free(command);
+}
diff --git a/database/sqlite/sqlite_health.h b/database/sqlite/sqlite_health.h
index 87060dacc..96d090b54 100644
--- a/database/sqlite/sqlite_health.h
+++ b/database/sqlite/sqlite_health.h
@@ -14,4 +14,6 @@ void sql_health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae);
void sql_health_alarm_log_cleanup(RRDHOST *host);
int alert_hash_and_store_config(uuid_t hash_id, struct alert_config *cfg, int store_hash);
void sql_aclk_alert_clean_dead_entries(RRDHOST *host);
+int sql_health_get_last_executed_event(RRDHOST *host, ALARM_ENTRY *ae, RRDCALC_STATUS *last_executed_status);
+void sql_health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after, char *chart);
#endif //NETDATA_SQLITE_HEALTH_H
diff --git a/docs/Demo-Sites.md b/docs/Demo-Sites.md
index 1fd0d4192..177a37d16 100644
--- a/docs/Demo-Sites.md
+++ b/docs/Demo-Sites.md
@@ -11,10 +11,27 @@ sidebar_position: "90"
# Live demos
-See the live Netdata Cloud demo with rooms for specific use cases at **https://app.netdata.cloud/spaces/netdata-demo**
+See the live Netdata Cloud demo with rooms (listed below) for specific use cases at **https://app.netdata.cloud/spaces/netdata-demo**
-| Location | Netdata demo URL | 60 mins reqs | VM donated by |
+| Location | Netdata Demo URL | 60 mins reqs | VM donated by |
| :------------------ | :-------------------------------------------------------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| :------------------------------------------------- |
+| Netdata Cloud | **[Netdata Demo - All nodes](https://app.netdata.cloud/spaces/netdata-demo/rooms/all-nodes/overview)** |||
+| Netdata Cloud | **[Netdata Demo - Active Directory](https://app.netdata.cloud/spaces/netdata-demo/rooms/active-directory/overview)** |||
+| Netdata Cloud | **[Netdata Demo - Apache](https://app.netdata.cloud/spaces/netdata-demo/rooms/apache/overview)** |||
+| Netdata Cloud | **[Netdata Demo - Cassandra](https://app.netdata.cloud/spaces/netdata-demo/rooms/cassandra/overview)** |||
+| Netdata Cloud | **[Netdata Demo - CoreDNS](https://app.netdata.cloud/spaces/netdata-demo/rooms/coredns/overview)** |||
+| Netdata Cloud | **[Netdata Demo - DNS Query](https://app.netdata.cloud/spaces/netdata-demo/rooms/dns-query/overview)** |||
+| Netdata Cloud | **[Netdata Demo - Docker](https://app.netdata.cloud/spaces/netdata-demo/rooms/docker/overview)** |||
+| Netdata Cloud | **[Netdata Demo - Host Reachability](https://app.netdata.cloud/spaces/netdata-demo/rooms/host-reachability/overview)** |||
+| Netdata Cloud | **[Netdata Demo - HTTP Endpoints](https://app.netdata.cloud/spaces/netdata-demo/rooms/http-endpoints/overview)** |||
+| Netdata Cloud | **[Netdata Demo - IIS](https://app.netdata.cloud/spaces/netdata-demo/rooms/iis/overview)** |||
+| Netdata Cloud | **[Netdata Demo - Kubernetes](https://app.netdata.cloud/spaces/netdata-demo/rooms/kubernetes/kubernetes)** |||
+| Netdata Cloud | **[Netdata Demo - Machine Learning](https://app.netdata.cloud/spaces/netdata-demo/rooms/machine-learning/overview)** |||
+| Netdata Cloud | **[Netdata Demo - MS Exchange](https://app.netdata.cloud/spaces/netdata-demo/rooms/ms-exchange/overview)** |||
+| Netdata Cloud | **[Netdata Demo - Nginx](https://app.netdata.cloud/spaces/netdata-demo/rooms/nginx/overview)** |||
+| Netdata Cloud | **[Netdata Demo - PostgreSQL](https://app.netdata.cloud/spaces/netdata-demo/rooms/postgresql/overview)** |||
+| Netdata Cloud | **[Netdata Demo - Redis](https://app.netdata.cloud/spaces/netdata-demo/rooms/redis/overview)** |||
+| Netdata Cloud | **[Netdata Demo - Windows](https://app.netdata.cloud/spaces/netdata-demo/rooms/windows/overview)** |||
| London (UK) | **[london3.my-netdata.io](https://london3.my-netdata.io)**<br/>(this is the global Netdata **registry** and has **named** and **mysql** charts) | [![Requests Per Second](https://london3.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://london3.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) |
| Atlanta (USA) | **[cdn77.my-netdata.io](https://cdn77.my-netdata.io)**<br/>(with **named** and **mysql** charts) | [![Requests Per Second](https://cdn77.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://cdn77.my-netdata.io) | [CDN77.com](https://www.cdn77.com/) |
| Bangalore (India) | **[bangalore.my-netdata.io](https://bangalore.my-netdata.io)** | [![Requests Per Second](https://bangalore.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://bangalore.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) |
@@ -25,5 +42,3 @@ See the live Netdata Cloud demo with rooms for specific use cases at **https://a
| Toronto (Canada) | **[toronto.my-netdata.io](https://toronto.my-netdata.io)** | [![Requests Per Second](https://toronto.my-netdata.io/api/v1/badge.svg?chart=netdata.requests&dimensions=requests&after=-3600&options=unaligned&group=sum&label=reqs&units=empty&value_color=blue&precision=0&v42)](https://toronto.my-netdata.io) | [DigitalOcean.com](https://m.do.co/c/83dc9f941745) |
Netdata dashboards are mobile- and touch-friendly.
-
-
diff --git a/docs/anonymous-statistics.md b/docs/anonymous-statistics.md
index 512cd02d3..d8cc99689 100644
--- a/docs/anonymous-statistics.md
+++ b/docs/anonymous-statistics.md
@@ -8,8 +8,8 @@ learn_rel_path: "Configuration"
# Anonymous telemetry events
-By default, Netdata collects anonymous usage information from the open-source monitoring agent using the open-source
-product analytics platform [PostHog](https://github.com/PostHog/posthog). We use their [cloud enterprise platform](https://posthog.com/product).
+By default, Netdata collects anonymous usage information from the open-source monitoring agent. For agent events like start,stop,crash etc we use our own cloud function in GCP. For frontend telemetry (pageviews etc.) on the agent dashboard itself we use the open-source
+product analytics platform [PostHog](https://github.com/PostHog/posthog).
We are strongly committed to your [data privacy](https://netdata.cloud/privacy/).
@@ -52,7 +52,7 @@ variable is controlled via the [opt-out mechanism](#opt-out).
## Agent Backend - Anonymous Statistics Script
Every time the daemon is started or stopped and every time a fatal condition is encountered, Netdata uses the anonymous
-statistics script to collect system information and send it to the Netdata PostHog via an http call. The information collected for all
+statistics script to collect system information and send it to the Netdata telemetry cloud function via an http call. The information collected for all
events is:
- Netdata version
diff --git a/docs/category-overview-pages/accessing-netdata-dashboards.md b/docs/category-overview-pages/accessing-netdata-dashboards.md
new file mode 100644
index 000000000..46c0bcff1
--- /dev/null
+++ b/docs/category-overview-pages/accessing-netdata-dashboards.md
@@ -0,0 +1,3 @@
+# Accessing Netdata Dashboards
+
+This section contains documentation on how you can access the Netdata Agent's dashboards, and the Netdata Cloud's dashboards. \ No newline at end of file
diff --git a/docs/category-overview-pages/build-the-netdata-agent-yourself.md b/docs/category-overview-pages/build-the-netdata-agent-yourself.md
new file mode 100644
index 000000000..99166ad95
--- /dev/null
+++ b/docs/category-overview-pages/build-the-netdata-agent-yourself.md
@@ -0,0 +1,3 @@
+# Build the Netdata Agent yourself
+
+This section contains documentation on all the ways that you can build the Netdata Agent. \ No newline at end of file
diff --git a/docs/category-overview-pages/install-netdata-on-embedded-systems.md b/docs/category-overview-pages/install-netdata-on-embedded-systems.md
new file mode 100644
index 000000000..dfaa4482c
--- /dev/null
+++ b/docs/category-overview-pages/install-netdata-on-embedded-systems.md
@@ -0,0 +1,3 @@
+# Install Netdata on Embedded Systems Overview
+
+This section contains documentation for installation methods when it comes to Embedded Systems. \ No newline at end of file
diff --git a/docs/category-overview-pages/install-with-a-cicd-provisioning-system.md b/docs/category-overview-pages/install-with-a-cicd-provisioning-system.md
new file mode 100644
index 000000000..30a5a706c
--- /dev/null
+++ b/docs/category-overview-pages/install-with-a-cicd-provisioning-system.md
@@ -0,0 +1,3 @@
+# Install with a CI/CD Provisioning System Overview
+
+This section contains documentation on all the installation methods through a CI/CD system. \ No newline at end of file
diff --git a/docs/category-overview-pages/machine-learning-and-assisted-troubleshooting.md b/docs/category-overview-pages/machine-learning-and-assisted-troubleshooting.md
new file mode 100644
index 000000000..074051e3e
--- /dev/null
+++ b/docs/category-overview-pages/machine-learning-and-assisted-troubleshooting.md
@@ -0,0 +1,3 @@
+# Machine Learning and Assisted Troubleshooting Overview
+
+This section contains documentation regarding Netdata's troubleshooting and machine learning features. \ No newline at end of file
diff --git a/docs/category-overview-pages/maintenance-operations-on-netdata-agents.md b/docs/category-overview-pages/maintenance-operations-on-netdata-agents.md
new file mode 100644
index 000000000..207a0bd32
--- /dev/null
+++ b/docs/category-overview-pages/maintenance-operations-on-netdata-agents.md
@@ -0,0 +1,3 @@
+# Maintenance operations on Netdata Agents Overview
+
+This section provides information on various actions you can take when maintaining a Netdata Agent. \ No newline at end of file
diff --git a/docs/category-overview-pages/metrics-streaming-and-replication.md b/docs/category-overview-pages/metrics-streaming-and-replication.md
new file mode 100644
index 000000000..37b040e9e
--- /dev/null
+++ b/docs/category-overview-pages/metrics-streaming-and-replication.md
@@ -0,0 +1,3 @@
+# Metrics Streaming and Replication Overview
+
+This section contains documentation to help you understand and configure streaming and replication with Netdata. \ No newline at end of file
diff --git a/docs/category-overview-pages/misc-overview.md b/docs/category-overview-pages/misc-overview.md
index e0c1cc0d1..dbb11e9bc 100644
--- a/docs/category-overview-pages/misc-overview.md
+++ b/docs/category-overview-pages/misc-overview.md
@@ -1,19 +1,3 @@
-<!--
-title: "Miscellaneous material"
-sidebar_label: "Miscellaneous"
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/docs/category-overview-pages/misc-overview.md"
-description: "Available integrations in Netdata"
-learn_status: "Published"
-learn_rel_path: "Miscellaneous"
-sidebar_position: 110
--->
-
# Miscellaneous material
-This section contains temporary material that no longer belongs in our official documentation, and will
-be moved to other locations. We keep it here to make it accessible while we create the new articles.
-
-
-
-
-
+This section contains material that will be moved to new locations as we see fit. We keep it here to make it accessible while we make these changes. \ No newline at end of file
diff --git a/docs/category-overview-pages/monitor-your-infrastructure.md b/docs/category-overview-pages/monitor-your-infrastructure.md
new file mode 100644
index 000000000..3582e88a6
--- /dev/null
+++ b/docs/category-overview-pages/monitor-your-infrastructure.md
@@ -0,0 +1,3 @@
+# Monitor your Infrastructure Overview
+
+This section contains documentation on how you can use Netdata Cloud and it's features to monitor your entire infrastructure. \ No newline at end of file
diff --git a/docs/category-overview-pages/netdata-apis.md b/docs/category-overview-pages/netdata-apis.md
new file mode 100644
index 000000000..82d1c1752
--- /dev/null
+++ b/docs/category-overview-pages/netdata-apis.md
@@ -0,0 +1,5 @@
+# Netdata APIs Overview
+
+This section contains information about Netdata's APIs.
+
+You can access the Netdata Agent's API through swagger UI [here](/api). \ No newline at end of file
diff --git a/docs/category-overview-pages/netdata-architecture.md b/docs/category-overview-pages/netdata-architecture.md
new file mode 100644
index 000000000..70f126597
--- /dev/null
+++ b/docs/category-overview-pages/netdata-architecture.md
@@ -0,0 +1,3 @@
+# Netdata Architecture Overview
+
+This section's purpose is to explain the architecture of Netdata, the role of the Agent and the Cloud, and more. \ No newline at end of file
diff --git a/docs/category-overview-pages/netdata-dashboards-and-visualizations.md b/docs/category-overview-pages/netdata-dashboards-and-visualizations.md
new file mode 100644
index 000000000..cc9304365
--- /dev/null
+++ b/docs/category-overview-pages/netdata-dashboards-and-visualizations.md
@@ -0,0 +1,3 @@
+# Netdata Dashboards and Visualizations Overview
+
+This section provides documentation about all the visualization operations, features and insights that Netdata provides. \ No newline at end of file
diff --git a/docs/category-overview-pages/optimizing-metrics-database.md b/docs/category-overview-pages/optimizing-metrics-database.md
new file mode 100644
index 000000000..fdbd3b690
--- /dev/null
+++ b/docs/category-overview-pages/optimizing-metrics-database.md
@@ -0,0 +1,3 @@
+# Optimizing Metrics Database Overview
+
+This section contains documentation to help you understand how the metrics DB works, understand the key features and configure them to suit your needs. \ No newline at end of file
diff --git a/docs/cloud/alerts-notifications/add-discord-notification.md b/docs/cloud/alerts-notifications/add-discord-notification.md
index d1769f0e2..3edf5002b 100644
--- a/docs/cloud/alerts-notifications/add-discord-notification.md
+++ b/docs/cloud/alerts-notifications/add-discord-notification.md
@@ -8,7 +8,7 @@ To enable Discord notifications you need:
- A Netdata Cloud account
- Access to the space as an **administrator**
-- Have a Discord server able to receive webhook integrations. For mode details check [how to configure this on Discord](#settings-on-discord)
+- Have a Discord server able to receive webhook integrations. For more details check [how to configure this on Discord](#settings-on-discord)
## Steps
diff --git a/docs/cloud/alerts-notifications/add-mattermost-notification-configuration.md b/docs/cloud/alerts-notifications/add-mattermost-notification-configuration.md
new file mode 100644
index 000000000..79bc98619
--- /dev/null
+++ b/docs/cloud/alerts-notifications/add-mattermost-notification-configuration.md
@@ -0,0 +1,51 @@
+# Add Mattermost notification configuration
+
+From the Cloud interface, you can manage your space's notification settings and from these you can add a specific configuration to get notifications delivered on Mattermost.
+
+## Prerequisites
+
+To add Mattermost notification configurations you need:
+
+- A Netdata Cloud account
+- Access to the space as an **administrator**
+- Space needs to be on **Business** plan or higher
+- Have a Mattermost app on your workspace to receive the webhooks, for more details check [how to configure this on Mattermost](#settings-on-mattermost)
+
+## Steps
+
+1. Click on the **Space settings** cog (located above your profile icon)
+1. Click on the **Notification** tab
+1. Click on the **+ Add configuration** button (near the top-right corner of your screen)
+1. On the **Mattermost** card click on **+ Add**
+1. A modal will be presented to you to enter the required details to enable the configuration:
+ 1. **Notification settings** are Netdata specific settings
+ - Configuration name - you can optionally provide a name for your configuration you can easily refer to it
+ - Rooms - by specifying a list of Rooms you are select to which nodes or areas of your infrastructure you want to be notified using this configuration
+ - Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only
+ 1. **Integration configuration** are the specific notification integration required settings, which vary by notification method. For Mattermost:
+ - Webhook URL - URL provided on Mattermost for the channel you want to receive your notifications. For more details check [how to configure this on Mattermost](#settings-on-mattermost)
+
+## Settings on Mattermost
+
+To enable the webhook integrations on Mattermost you need:
+1. In Mattermost, go to Product menu > Integrations > Incoming Webhook.
+
+![image](https://user-images.githubusercontent.com/26550862/243394526-6d45f6c2-c3cc-4d5f-a9cb-85d8170fc8ac.png)
+
+ - If you don’t have the Integrations option, incoming webhooks may not be enabled on your Mattermost server or may be disabled for non-admins. They can be enabled by a System Admin from System Console > Integrations > Integration Management. Once incoming webhooks are enabled, continue with the steps below
+
+![image](https://user-images.githubusercontent.com/26550862/243394734-f911ccf7-bb18-41b2-ab52-31195861dd1b.png)
+
+2. Select Add Incoming Webhook and add a name and description for the webhook. The description can be up to 500 characters
+
+3. Select the channel to receive webhook payloads, then select Add to create the webhook
+
+![image](https://user-images.githubusercontent.com/26550862/243394626-363b7cbc-3550-47ef-b2f3-ce929919145f.png)
+
+4. You will end up with a webhook endpoint that looks like so:
+```
+https://your-mattermost-server.com/hooks/xxx-generatedkey-xxx
+```
+ - Treat this endpoint as a secret. Anyone who has it will be able to post messages to your Mattermost instance.
+
+For more details please check Mattermost's article [Incoming webhooks for Mattermost](https://developers.mattermost.com/integrate/webhooks/incoming/).
diff --git a/docs/cloud/alerts-notifications/add-opsgenie-notification-configuration.md b/docs/cloud/alerts-notifications/add-opsgenie-notification-configuration.md
index 28e526c90..0a80311ef 100644
--- a/docs/cloud/alerts-notifications/add-opsgenie-notification-configuration.md
+++ b/docs/cloud/alerts-notifications/add-opsgenie-notification-configuration.md
@@ -4,7 +4,7 @@ From the Cloud interface, you can manage your space's notification settings and
## Prerequisites
-To add Opsgenie notification configurations you need
+To add Opsgenie notification configurations you need:
- A Netdata Cloud account
- Access to the space as an **administrator**
@@ -34,4 +34,4 @@ To enable the Netdata integration on Opsgenie you need:
1. Pick **API** from available integrations. Copy your API Key and press **Save Integration**.
-1. Paste copied API key into the corresponding field in **Integration configuration** section of Opsgenie modal window in Netdata. \ No newline at end of file
+1. Paste copied API key into the corresponding field in **Integration configuration** section of Opsgenie modal window in Netdata.
diff --git a/docs/cloud/alerts-notifications/add-pagerduty-notification-configuration.md b/docs/cloud/alerts-notifications/add-pagerduty-notification-configuration.md
index 64880ebe3..eec4f94c1 100644
--- a/docs/cloud/alerts-notifications/add-pagerduty-notification-configuration.md
+++ b/docs/cloud/alerts-notifications/add-pagerduty-notification-configuration.md
@@ -4,12 +4,12 @@ From the Cloud interface, you can manage your space's notification settings and
## Prerequisites
-To add PagerDuty notification configurations you need
+To add PagerDuty notification configurations you need:
- A Cloud account
- Access to the space as and **administrator**
- Space needs to be on **Business** plan or higher
-- Have a PagerDuty service to receive events, for mode details check [how to configure this on PagerDuty](#settings-on-pagerduty)
+- Have a PagerDuty service to receive events, for more details check [how to configure this on PagerDuty](#settings-on-pagerduty)
## Steps
diff --git a/docs/cloud/alerts-notifications/add-slack-notification-configuration.md b/docs/cloud/alerts-notifications/add-slack-notification-configuration.md
index 99bb2d5b5..ed845b4d3 100644
--- a/docs/cloud/alerts-notifications/add-slack-notification-configuration.md
+++ b/docs/cloud/alerts-notifications/add-slack-notification-configuration.md
@@ -4,12 +4,12 @@ From the Cloud interface, you can manage your space's notification settings and
## Prerequisites
-To add discord notification configurations you need
+To add slack notification configurations you need:
- A Netdata Cloud account
- Access to the space as an **administrator**
- Space needs to be on **Business** plan or higher
-- Have a Slack app on your workspace to receive the webhooks, for mode details check [how to configure this on Slack](#settings-on-slack)
+- Have a Slack app on your workspace to receive the webhooks, for more details check [how to configure this on Slack](#settings-on-slack)
## Steps
@@ -34,7 +34,7 @@ To enable the webhook integrations on Slack you need:
- On your app go to **Incoming Webhooks** and click on **activate incoming webhooks**
![image](https://user-images.githubusercontent.com/2930882/214251948-486229bb-195b-499b-92e4-4be59a567a19.png)
-
+
- At the bottom of **Webhook URLs for Your Workspace** section you have **Add New Webhook to Workspace**
- After pressing that specify the channel where you want your notifications to be delivered
diff --git a/docs/cloud/alerts-notifications/add-webhook-notification-configuration.md b/docs/cloud/alerts-notifications/add-webhook-notification-configuration.md
index 0140c30fd..21d1b6ed8 100644
--- a/docs/cloud/alerts-notifications/add-webhook-notification-configuration.md
+++ b/docs/cloud/alerts-notifications/add-webhook-notification-configuration.md
@@ -4,12 +4,12 @@ From the Cloud interface, you can manage your space's notification settings and
## Prerequisites
-To add discord notification configurations you need
+To add webhook notification configurations you need:
- A Netdata Cloud account
- Access to the space as an **administrator**
- Space needs to be on **Pro** plan or higher
-- Have an app that allows you to receive webhooks following a predefined schema, for mode details check [how to create the webhook service](#webhook-service)
+- Have an app that allows you to receive webhooks following a predefined schema, for more details check [how to create the webhook service](#webhook-service)
## Steps
@@ -24,8 +24,8 @@ To add discord notification configurations you need
- Notification - you specify which notifications you want to be notified using this configuration: All Alerts and unreachable, All Alerts, Critical only
1. **Integration configuration** are the specific notification integration required settings, which vary by notification method. For webhook:
- Webhook URL - webhook URL is the url of the service that Netdata will send notifications to. In order to keep the communication secured, we only accept HTTPS urls. Check [how to create the webhook service](#webhook-service).
- - Extra headers - these are optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL. For mode details check [Extra headers](#extra-headers)
- - Authentication Mechanism - Netdata webhook integration supports 3 different authentication mechanisms. For mode details check [Authentication mechanisms](#authentication-mechanisms):
+ - Extra headers - these are optional key-value pairs that you can set to be included in the HTTP requests sent to the webhook URL. For more details check [Extra headers](#extra-headers)
+ - Authentication Mechanism - Netdata webhook integration supports 3 different authentication mechanisms. For more details check [Authentication mechanisms](#authentication-mechanisms):
- Mutual TLS (recommended) - default authentication mechanism used if no other method is selected.
- Basic - the client sends a request with an Authorization header that includes a base64-encoded string in the format **username:password**. These will settings will be required inputs.
- Bearer - the client sends a request with an Authorization header that includes a **bearer token**. This setting will be a required input.
@@ -134,7 +134,7 @@ nsjoQAm6OwpTN5362vE9SYu1twz7KdzBlUkDhePEOgQkWfLHBJWwB+PvB1j/cUA3
```bash
server {
listen 443 ssl default_server;
-
+
# ... existing SSL configuration for server authentication ...
ssl_verify_client on;
ssl_client_certificate /path/to/Netdata_CA.pem;
diff --git a/docs/cloud/alerts-notifications/manage-alert-notification-silencing-rules.md b/docs/cloud/alerts-notifications/manage-alert-notification-silencing-rules.md
new file mode 100644
index 000000000..b9806c6fa
--- /dev/null
+++ b/docs/cloud/alerts-notifications/manage-alert-notification-silencing-rules.md
@@ -0,0 +1,58 @@
+# Manage alert notification silencing rules
+
+From the Cloud interface, you can manage your space's alert notification silencing rules settings as well as allow users to define their personal ones.
+
+## Prerequisites
+
+To manage **space's alert notification silencing rule settings**, you will need the following:
+
+- A Netdata Cloud account
+- Access to the space as an **administrator** or **manager** (**troubleshooters** can only view space rules)
+
+
+To manage your **personal alert notification silencing rule settings**, you will need the following:
+
+- A Netdata Cloud account
+- Access to the space with any roles except **billing**
+
+### Steps
+
+1. Click on the **Space settings** cog (located above your profile icon)
+1. Click on the **Alert & Notification** tab on the left hand-side
+1. Click on the **Notification Silencing Rules** tab
+1. You will be presented with a table of the configured alert notification silencing rules for:
+ * the space (if aren't an **observer**)
+ * yourself
+
+ You will be able to:
+ 1. **Add a new** alert notification silencing rule configuration.
+ - Choose if it applies to **All users** or **Myself** (All users is only available for **administrators** and **managers**)
+ - You need to provide a name for the configuration so you can easily refer to it
+ - Define criteria for Nodes: To which Rooms will this apply? What Nodes? Does it apply to host labels key-value pairs?
+ - Define criteria for Alerts: Which alert name is being targeted? What alert context? Will it apply to a specific alert role?
+ - Define when it will be applied:
+ - Immediately, from now till until it is turned off or until a specific duration (start and end date automatically set)
+ - Scheduled, you specify the start and end time for when the rule becomes active and then inactive (time is set according to your browser local timezone)
+ Note: You are only able to add a rule if your space is on a [paid plan](https://github.com/netdata/netdata/edit/master/docs/cloud/manage/plans.md).
+ 1. **Edit an existing** alert notification silencing rule configurations. You will be able to change:
+ - The name provided for it
+ - Who it applies to
+ - Selection criteria for Nodes and Alert
+ - When it will be applied
+ 1. **Enable/Disable** a given alert notification silencing rule configuration.
+ - Use the toggle to enable or disable
+ 1. **Delete an existing** alert notification silencing rule.
+ - Use the trash icon to delete your configuration
+
+## Silencing rules examples
+
+| Rule name | War Rooms | Nodes | Host Label | Alert name | Alert context | Alert role | Description |
+| :-- | :-- | :-- | :-- | :-- | :-- | :-- | :--|
+| Space silencing | All Rooms | * | * | * | * | * | This rule silences the entire space, targets all nodes and for all users. E.g. infrastructure wide maintenance window. |
+| DB Servers Rooms | PostgreSQL Servers | * | * | * | * | * | This rules silences the nodes in the room named PostgreSQL Servers, for example it doesn't silence the `All Nodes` room. E.g. My team with membership to this room doesn't want to receive notifications for these nodes. |
+| Node child1 | All Rooms | `child1` | * | * | * | * | This rule silences all alert state transitions for node `child1` on all rooms and for all users. E.g. node could be going under maintenance. |
+| Production nodes | All Rooms | * | `environment:production` | * | * | * | This rule silences all alert state transitions for nodes with the host label key-value pair `environment:production`. E.g. Maintenance window on nodes with specific host labels. |
+| Third party maintenance | All Rooms | * | * | `httpcheck_posthog_netdata_cloud.request_status` | * | * | This rule silences this specific alert since third party partner will be undergoing maintenance. |
+| Intended stress usage on CPU | All Rooms | * | * | * | `system.cpu` | * | This rule silences specific alerts across all nodes and their CPU cores. |
+| Silence role webmaster | All Rooms | * | * | * | * | `webmaster` | This rule silences all alerts configured with the role `webmaster`. |
+| Silence alert on node | All Rooms | `child1` | * | `httpcheck_posthog_netdata_cloud.request_status` | * | * | This rule silences the specific alert on the `child1` node. |
diff --git a/docs/cloud/alerts-notifications/manage-notification-methods.md b/docs/cloud/alerts-notifications/manage-notification-methods.md
index 17c7f879a..f61b6bf6f 100644
--- a/docs/cloud/alerts-notifications/manage-notification-methods.md
+++ b/docs/cloud/alerts-notifications/manage-notification-methods.md
@@ -27,7 +27,8 @@ Notes:
### Steps
1. Click on the **Space settings** cog (located above your profile icon)
-1. Click on the **Notification** tab
+1. Click on the **Alerts & Notification** tab on the left hand-side
+1. Click on the **Notification Methods** tab
1. You will be presented with a table of the configured notification methods for the space. You will be able to:
1. **Add a new** notification method configuration.
- Choose the service from the list of the available ones, you'll may see a list of unavailable options if your plan doesn't allow some of them (you will see on the
@@ -42,7 +43,7 @@ Notes:
- Service specific inputs
1. **Enable/Disable** a given notification method configuration.
- Use the toggle to enable or disable the notification method configuration
- 1. **Delete an existing** notification method configuartion. Netdata provided ones can't be deleted, e.g. Email
+ 1. **Delete an existing** notification method configuration. Netdata provided ones can't be deleted, e.g. Email
- Use the trash icon to delete your configuration
## Manage user notification settings
diff --git a/docs/cloud/alerts-notifications/notifications.md b/docs/cloud/alerts-notifications/notifications.md
index 94cd2dc3f..ad115d43f 100644
--- a/docs/cloud/alerts-notifications/notifications.md
+++ b/docs/cloud/alerts-notifications/notifications.md
@@ -31,7 +31,7 @@ or add new alert that you see in Netdata Cloud, and receive via centralized aler
</Callout>
-### Alert notifications
+## Alert notifications
Netdata Cloud can send centralized alert notifications to your team whenever a node enters a warning, critical, or unreachable state. By enabling notifications,
you ensure no alert, on any node in your infrastructure, goes unnoticed by you or your team.
@@ -51,9 +51,9 @@ All users in a Space can personalize their notifications settings, for Personal
> ⚠️ Netdata Cloud supports different notification methods and their availability will depend on the plan you are at.
> For more details check [Service classification](#service-classification) or [netdata.cloud/pricing](https://www.netdata.cloud/pricing).
-#### Service level
+### Service level
-##### Personal
+#### Personal
The notifications methods classified as **Personal** are what we consider generic, meaning that these can't have specific rules for them set by the administrators.
@@ -63,7 +63,7 @@ manage what specific configurations they want for the Space / Room(s) and the de
One example of such a notification method is the E-mail.
-##### System
+#### System
For **System** notification methods, the destination of the channel will be a target that usually isn't specific to a single user, e.g. slack channel.
@@ -72,23 +72,49 @@ different targets depending on Rooms or Notification level settings.
Some examples of such notification methods are: Webhook, PagerDuty, Slack.
-#### Service classification
+### Service classification
-##### Community
+#### Community
Notification methods classified as Community can be used by everyone independent on the plan your space is at.
These are: Email and discord
-##### Pro
+#### Pro
Notification methods classified as Pro are only available for **Pro** and **Business** plans
These are: webhook
-##### Business
+#### Business
Notification methods classified as Business are only available for **Business** plans
These are: PagerDuty, Slack, Opsgenie
+## Silencing Alert notifications
+
+Netdata Cloud provides you a Silencing Rule engine which allows you to mute alert notifications. This muting action is specific to alert state transition notifications, it doesn't include node unreachable state transitions.
+
+The Silencing Rule engine is flexible and allows you to enter silence rules for the two main entities involved on alert notifications and can be set using different attributes. The main entities you can enter are **Nodes** and **Alerts** which can be used in combination or isolation to target specific needs - see some examples [here](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/manage-alert-notification-silencing-rules.md#silencing-rules-examples).
+
+### Scope definition for Nodes
+* **Space:** silencing the space, selecting `All Rooms`, silences all alert state transitions from any node claimed to the space.
+* **War Room:** silencing a specific room will silence all alert state transitions from any node in that room. Please note if the node belongs to
+another room which isn't silenced it can trigger alert notifications to the users with membership to that other room.
+* **Node:** silencing a specific node can be done for the entire space, selecting `All Rooms`, or for specific war room(s). The main difference is
+if the node should be silenced for the entire space or just for specific rooms (when specific rooms are selected only users with membership to that room won't receive notifications).
+
+### Scope definition for Alerts
+* **Alert name:** silencing a specific alert name silences all alert state transitions for that specific alert.
+* **Alert context:** silencing a specific alert context will silence all alert state transitions for alerts targeting that chart context, for more details check [alert configuration docs](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alarm-line-on).
+* **Alert role:** silencing a specific alert role will silence all the alert state transitions for alerts that are configured to be specific role recipients, for more details check [alert configuration docs](https://github.com/netdata/netdata/blob/master/health/REFERENCE.md#alarm-line-to).
+
+Beside the above two main entities there are another two important settings that you can define on a silencing rule:
+* Who does the rule affect? **All user** in the space or **Myself**
+* When does is to apply? **Immediately** or on a **Schedule** (when setting immediately you can set duration)
+
+For further help on setting alert notification silencing rules go to [Manage Alert Notification Silencing Rules](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/manage-alert-notification-silencing-rules.md).
+
+> ⚠️ This feature is only available for [Netdata paid plans](https://github.com/netdata/netdata/edit/master/docs/cloud/manage/plans.md).
+
## Flood protection
If a node has too many state changes like firing too many alerts or going from reachable to unreachable, Netdata Cloud
diff --git a/docs/cloud/insights/events-feed.md b/docs/cloud/insights/events-feed.md
index 0e297ba81..a56877ab1 100644
--- a/docs/cloud/insights/events-feed.md
+++ b/docs/cloud/insights/events-feed.md
@@ -21,10 +21,30 @@ At a high-level view, these are the domains from which the Events feed will prov
| **Domains of events** | **Community** | **Pro** | **Business** |
| :-- | :-- | :-- | :-- |
-| **Auditing events** - COMING SOON<br/>Events related to actions done on your Space, e.g. invite user, change user role or change plan.| 4 hours | 7 days | 90 days |
+| **[Auditing events](#auditing-events)** - <br/>Events related to actions done on your Space, e.g. invite user, change user role or change plan.| 4 hours | 7 days | 90 days |
| **[Topology events](#topology-events)**<br/>Node state transition events, e.g. live or offline.| 4 hours | 7 days | 14 days |
| **[Alert events](#alert-events)**<br/>Alert state transition events, can be seen as an alert history log.| 4 hours | 7 days | 90 days |
+### Auditing events
+
+| **Event name** | **Description** | **Example** |
+| :-- | :-- | :-- |
+| Space Created | The space was created.| Space `Acme Space` was **created** |
+| Room Created | A room was created on the Space.| Room `DB Servers` was **created** by `John Doe` |
+| Room Deleted | A room was deleted from the Space. | Room `DB servers` was **deleted** by `John Doe` |
+| User Invited to Space | A user was invited to join the Space.| User `John Smith` was **invited** to this space by `Alan Doe` |
+| User Uninvited from Space | An invitation for a user to join the space was revoked.| User `John Smith` was **uninvited** from this space |
+| User Added to Space | A user was added to the Space from an invitation (user accepted the invitation).| User `John Smith` was **added** to this space by invite of `Alan Doe` |
+| User Removed from Space | A user was added to the Space from an invitation. | User `John Smith` was **removed** from this space by `Alan Doe` |
+| User Added to Room | A user was added to a room on the Space. | User `John Smith` was **added** to room `DB servers` |
+| User Removed from Room | A user was removed from a room on the Space. | User `John Smith` was **removed** from room `DB Servers` by `Alan Doe` |
+| User Space Properties Changed | The properties of a user on the Space have changed, e.g. change user role | User role for `John Smith` was **changed** to `troubleshooter` by `Alan Doe` |
+| Node Added To Room | The node was added to a room on the Space. | Node `ip-xyz.ec2.internal` was **added** to room `DB Servers` by `John Doe` |
+| Node Removed To Room | The node was removed from a room on the Space. | Node `ip-xyz.ec2.internal` was **removed** from room `DB Servers` by `John Doe` |
+| Silencing Rule Created | A new alert notification silencing rule was created on the Space. | Silencing rule `DB Servers schedule silencing` on rooms `All nodes` and `DB Servers` was **created** by `John Smith` |
+| Silencing Rule Changed | An existing alert notification silencing rule was modified on the Space. | Silencing rule `DB Servers schedule silencing` on rooms `All nodes` and `DB Servers` was **changed** by `John Doe` |
+| Silencing Rule Deleted | An existing alert notifications silencing rule was removed from the Space. | Silencing rule `DB Servers schedule silencing` on rooms `All nodes` and `DB Servers` was **changed** by `Alan Smith` |
+
### Topology events
| **Event name** | **Description** | **Example** |
diff --git a/docs/cloud/manage/plans.md b/docs/cloud/manage/plans.md
index 9180ab5a0..23077f898 100644
--- a/docs/cloud/manage/plans.md
+++ b/docs/cloud/manage/plans.md
@@ -19,7 +19,7 @@ The plan is an attribute that is directly attached to your space(s) and that dic
Netdata Cloud plans, with the exception of Community, work as subscriptions and overall consist of two pricing components:
-* A flat fee component, that is a price per space, and
+* A flat fee component, that is applied on yearly subscriptions for the [comitted-nodes](#committed-nodes) charte (space subscription fee has been waived off)
* An on-demand metered component, that is related to your usage of Netdata which directly links to the [number of nodes you have running](#running-nodes-and-billing)
Netdata provides two billing frequency options:
@@ -55,16 +55,13 @@ If, for a given month, your usage is over these committed nodes we will charge t
It is ok to change your mind. We allow to change your plan, billing frequency or adjust the committed nodes, on yearly plans, at any time.
-To achieve this you will need to:
+To achieve this you can check the [Update plan](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/view-plan-billing.md#update-plan) section.
-* Move to the Community plan, where we will cancel the current subscription and:
- * Issue a credit to you for the unused period, in case you are on a **yearly plan**
- * Charge you only for the current used period and issue a credit for the unused period related to the flat fee, in case you are on a **monthly plan**
-* Select the new subscription with the change that you want
-
-> ⚠️ On a move to Community (cancellation of an active subscription), please note that you will have all your notification methods configurations active **for a period of 24 hours**.
+> ⚠️ On a downgrade (going to a new plan with less benefits) or cancellation of an active subscription, please note that you will have all your notification methods configurations active **for a period of 24 hours**.
> After that, any notification methods unavailable in your new plan at that time will be automatically disabled. You can always re-enable them once you move to a paid plan that includes them.
+> ⚠️ Downgrade or cancellation may affect users in your Space. Please check what roles are available on the [each plans](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/plans.md#areas-impacted-by-plans). Users with unavailable roles on the new plan will immediately have restricted access to the Space.
+
> ⚠️ Any credit given to you will be available to use on future paid subscriptions with us. It will be available until the the **end of the following year**.
### Areas impacted by plans
@@ -104,7 +101,13 @@ The plan on your space will determine what type of notifications methods will be
* **Pro** - Email, Discord and webhook
* **Business** - Unlimited, this includes Slack, PagerDuty, Opsgenie etc.
-For mode details check the documentation under [Alert Notifications](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.md).
+For more details check the documentation under [Alert Notifications](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.md#alert-notifications).
+
+##### Alert notification silencing rules
+
+The plan on your space will determine if you are able to add alert notification silencing rules since this feature will only be available for paid plans: **Pro** or **Business**.
+
+For more details check the documentation under [Alert Notifications](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.md#silencing-alert-notifications).
### Related Topics
diff --git a/docs/cloud/manage/role-based-access.md b/docs/cloud/manage/role-based-access.md
index 1696e0964..a0b387749 100644
--- a/docs/cloud/manage/role-based-access.md
+++ b/docs/cloud/manage/role-based-access.md
@@ -84,6 +84,13 @@ In more detail, you can find on the following tables which functionalities are a
| Edit configuration | :heavy_check_mark: | - | - | - | - | - | Some exceptions apply depending on [service level](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/manage-notification-methods.md#available-actions-per-notification-methods-based-on-service-level) |
| Delete configuration | :heavy_check_mark: | - | - | - | - | - | |
| Edit personal level notification settings | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | [Manage user notification settings](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/manage-notification-methods.md#manage-user-notification-settings) |
+| See space alert notification silencing rules | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | - | - | - | |
+| Add new space alert notification silencing rule | :heavy_check_mark: | :heavy_check_mark: | - | - | - | - | |
+| Enable/Disable space alert notification silencing rule | :heavy_check_mark: | :heavy_check_mark: | - | - | - | - | |
+| Edit space alert notification silencing rule | :heavy_check_mark: | :heavy_check_mark: | - | - | - | - | |
+| Delete space alert notification silencing rule | :heavy_check_mark: | :heavy_check_mark: | - | - | - | - | |
+| See, add, edit or delete personal level alert notification silencing rule | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | - | - | |
+
Notes:
* Enable, Edit and Add actions over specific notification methods will only be allowed if your plan has access to those ([service classification](https://github.com/netdata/netdata/blob/master/docs/cloud/alerts-notifications/notifications.md#service-classification))
diff --git a/docs/cloud/manage/view-plan-billing.md b/docs/cloud/manage/view-plan-billing.md
index d29f93f98..5d381f952 100644
--- a/docs/cloud/manage/view-plan-billing.md
+++ b/docs/cloud/manage/view-plan-billing.md
@@ -13,11 +13,13 @@ To see your plan and billing setting you need:
## Steps
+### View current plan and Billing options and Invoices
+
1. Click on the **Space settings** cog (located above your profile icon)
1. Click on the **Plan & Billing** tab
1. On this page you will be presented with information on your current plan, billing settings, and usage information:
1. At the top of the page you will see:
- - **Credit** amount which refers to any amount you have available to use on future invoices or subscription changes (<https://github.com/netdata/netdata/blob/master/docs/cloud/manage/plans.md#plan-changes-and-credit-balance>) - this is displayed once you have had an active paid subscription with us
+ - **Credit** amount which refers to any amount you have available to use on future invoices or subscription changes ([Plan changes and credit balance](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/plans.md#plan-changes-and-credit-balance)) - this is displayed once you have had an active paid subscription with us
- **Billing email** the email that was specified to be linked to tha plan subscription. This is where invoices, payment, and subscription-related notifications will be sent.
- **Billing options and Invoices** is the link to our billing provider Customer Portal where you will be able to:
- See the current subscription. There will always be 2 subscriptions active for the two pricing components mentioned on [Netdata Plans documentation page](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/plans.md#plans)
@@ -31,19 +33,51 @@ To see your plan and billing setting you need:
- View your invoice history
1. At the middle, you'll see details on your current plan as well as means to:
- Upgrade or cancel your plan
- - View full plan details page
+ - View **All Plans** details page
1. At the bottom, you will find your Usage chart that displays:
- Daily count - The weighted 90th percentile of the live node count during the day, taking time as the weight. If you have 30 live nodes throughout the day
except for a two hour peak of 44 live nodes, the daily value is 31.
- Period count: The 90th percentile of the daily counts for this period up to the date. The last value for the period is used as the number of nodes for the bill for that period. See more details in [running nodes and billing](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/plans.md#running-nodes-and-billing) (only applicable if you are on a paid plan subscription)
- Committed nodes: The number of nodes committed to in the yearly plan. In case the period count is higher than the number of committed nodes, the difference is billed as overage.
-> ⚠️ At the moment, any changes to an active paid plan, upgrades, change billing frequency or committed nodes, will be a manual two-setup flow:
->
-> 1. cancel your current subscription - move you to the Community plan
-> 2. chose the plan with the intended changes
->
-> This is a temporary process that we aim to sort out soon so that it will effortless for you to do any of these actions.
+
+### Update plan
+
+1. Click on the **Space settings** cog (located above your profile icon)
+1. Click on the **Plan & Billing** tab
+1. On this page you will be presented with information on your current plan, billing settings, and usage information
+ 1. Depending on your plan there could be shortcuts to immediately take you to change, for example, the billing frequency to **Yearly**
+ 1. Most actions will be available under the **Change plan** link that take you to the **All plans** details page where you can
+ 1. Downgrade or upgrade your plan
+ 1. Change the billing frequency
+ 1. Change committed nodes, in case you are on a Yearly plan
+ 1. Once you chose an action to update your plan a modal will pop-up on the right with
+ 1. Billing frequency displayed on the top right-corner
+ 1. Committed Nodes, when applicable
+ 1. Current billing information:
+ - Billing email
+ - Default payment method
+ - Business name and VAT number, when these are applicable
+ - Billing Address
+ Note: Any changes to these need to done through our billing provider Customer Portal prior to confirm the checkout. You can click on the link **Change billing info and payment method** to access it.
+ 1. Promotion code, so you can review any applied promotion or enter one you may have
+ 1. Detailed view on Node and Space charges
+ 1. Breakdown of:
+ - Subscription Total
+ - Discount from promotion codes, if applicable
+ - credit value for Unused time from current plan, if applicable
+ - Credit amount used from balance, if applicable
+ - Total Before Tax
+ - VAT rate and amount, if applicable
+ 1. Summary of:
+ - Total payable amount
+ - credit adjustment value for any Remaining Unused time from current plan, if applicable
+ - Final credit balance
+
+Notes:
+* Since there is an active plan you won't be redirected to our billing provider, the checkout if performed as soon as you click on **Checkout**
+* The change to your plan will be applied as soon as the checkout process is completed successfully
+* Downgrade or cancellations may have impacts on some of notification method settings or user accesses to your space, for more details please check [Plan changes and credit balance](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/plans.md#plan-changes-and-credit-balance)
## FAQ
@@ -81,6 +115,8 @@ Every time you purchase or renew a Plan, two separate Invoices are generated:
- One Invoice includes the recurring fees of the Plan you have chosen
+ We have waived off the space subscription free ($0.00), so the only recurring fee will be on annual plans for the committed nodes.
+
- The other Invoice includes your monthly “On Demand - Usage”.
Right after the activation of your subscription, you will receive a zero value Invoice since you had no usage when you subscribed.
@@ -90,3 +126,16 @@ Every time you purchase or renew a Plan, two separate Invoices are generated:
You can find some further details on the [Netdata Plans page](https://github.com/netdata/netdata/blob/master/docs/cloud/manage/plans.md#plans).
> ⚠️ We expect this to change to a single invoice in the future, but currently do not have a concrete timeline for when this change will happen.
+
+### 8. How is the **Total Before Tax** value calculated on plan changes?
+
+When you change your plan we will be calculating the residual before tax value you have from the _Unused time on your current plan_ in order to credit you with this value.
+
+After that, we will be performing the following calculations:
+
+1. Get the **Subscription total** (total amount to be paid for Nodes and Space)
+2. Deduct any Discount applicable from promotion codes
+3. If an amount remains, then we deduct the sum of the _Unused time on current plan_ then and the Credit amount from any existing credit balance.
+4. The result, if positive, is the Total Before Tax, if applicable, any sales tax (VAT or other) will apply.
+
+If the calculation of step 3 returns a negative amount then this amount will be your new customer credit balance.
diff --git a/docs/cloud/visualize/interact-new-charts.md b/docs/cloud/visualize/interact-new-charts.md
index 4c6c2ebf5..3707e945f 100644
--- a/docs/cloud/visualize/interact-new-charts.md
+++ b/docs/cloud/visualize/interact-new-charts.md
@@ -8,129 +8,136 @@ To make sense of all the metrics, Netdata offers an enhanced version of charts t
These charts provide a lot of useful information, so that you can:
- Enjoy the high-resolution, granular metrics collected by Netdata
-- Explore visualization with more options such as _line_, _stacked_ and _area_ types (other types like _bar_, _pie_ and
- _gauges_ are to be added shortly)
- Examine all the metrics by hovering over them with your cursor
-- Use intuitive tooling and shortcuts to pan, zoom or highlight your charts
-- On highlight, ease access
- to [Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md) to
- see other metrics with similar patterns
+- Filter the metrics in any way you want using the [Definition bar](#definition-bar)
+- View the combined anomaly rate of all underlying data with the [Anomaly Rate ribbon](#anomaly-rate-ribbon)
+- Explore even more details about a chart's metrics through [hovering over certain elements of it](#hover-over-the-chart)
+- Use intuitive tooling and shortcuts to pan, zoom or highlight areas of interest in your charts
+- On highlight, get easy access to [Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md) to see other metrics with similar patterns
- Have the dimensions sorted based on name or value
- View information about the chart, its plugin, context, and type
-- Get the chart status and possible errors. On top, reload functionality
+- View individual metric collection status about a chart
-These charts are available on Netdata Cloud's
+These charts are available on Netdata Cloud's
[Overview tab](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md), Single Node tab and
on your [Custom Dashboards](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/dashboards.md).
-Some of the features listed below are also available on the simpler charts that are available on each agent's user interface.
-
## Overview
-Have a look at the can see the overall look and feel of the charts for both with a composite chart from
-the [Overview tab](https://github.com/netdata/netdata/blob/master/docs/cloud/visualize/overview.md) and a simple chart
-from the Single Node tab:
+A Netdata chart looks like this:
-<img width="678" alt="image" src="https://user-images.githubusercontent.com/43294513/220913360-f3f2ac06-b715-4e99-a933-f3bcb776636f.png"/>
+<img src="https://user-images.githubusercontent.com/70198089/236133212-353c102f-a6ed-45b7-9251-34e004c7a10a.png" width="900"/>
With a quick glance you have immediate information available at your disposal:
-- Chart title and units
-- Definition bar
-- Action bars
-- Chart area
-- Legend with dimensions
+- [Chart title and units](#title-bar)
+- [Anomaly Rate ribbon](#anomaly-rate-ribbon)
+- [Definition bar](#definition-bar)
+- [Tool bar](#tool-bar)
+- [Chart area](#hover-over-the-chart)
+- [Legend with dimensions](#dimensions-bar)
-## Play, Pause and Reset
+## Title bar
-Your charts are controlled using the available
-[Time controls](https://github.com/netdata/netdata/blob/master/docs/dashboard/visualization-date-and-time-controls.md#time-controls).
-Besides these, when interacting with the chart you can also activate these controls by:
+When you start interacting with a chart, you'll notice valuable information on the top bar:
-- Hovering over any chart to temporarily pause it - this momentarily switches time control to Pause, so that you can
- hover over a specific timeframe. When moving out of the chart time control will go back to Play (if it was it's
- previous state)
-- Clicking on the chart to lock it - this enables the Pause option on the time controls, to the current timeframe. This
- is if you want to jump to a different chart to look for possible correlations.
-- Double clicking to release a previously locked chart - move the time control back to Play
+<img src="https://user-images.githubusercontent.com/70198089/236133832-fad45e65-5bd6-4fd1-8d68-33acf69fff5c.png" width="900"/>
-| Interaction | Keyboard/mouse | Touchpad/touchscreen | Time control |
-|:------------------|:---------------|:---------------------|:----------------------|
-| **Pause** a chart | `hover` | `n/a` | Temporarily **Pause** |
-| **Stop** a chart | `click` | `tap` | **Pause** |
-| **Reset** a chart | `double click` | `n/a` | **Play** |
+The elements that you can find on this top bar are:
-Note: These interactions are available when the default "Pan" action is used. Other actions are accessible via
-the [Exploration action bar](#exploration-action-bar).
+- **Netdata icon**: this indicates that data is continuously being updated, this happens if [Time controls](https://github.com/netdata/netdata/blob/master/docs/dashboard/visualization-date-and-time-controls.md#time-controls) are in Play or Force Play mode.
+- **Chart title**: on the chart title you can see the title together with the metric being displayed, as well as the unit of measurement.
+- **Chart status icon**: possible values are: Loading, Timeout, Error or No data, otherwise this icon is not shown.
-## Title and chart action bar
+Along with viewing chart type, context and units, on this bar you have access to immediate actions over the chart:
-When you start interacting with a chart, you'll notice valuable information on the top bar. You will see information
-from the chart title to a chart action bar.
+<img src="https://user-images.githubusercontent.com/70198089/236134195-ecb08f79-1355-4bce-8449-e829f4a6b1c0.png" width="200" />
-The elements that you can find on this top bar are:
+- **Chart info**: get more information relevant to the chart you are interacting with.
+- **Chart type**: change the chart type from **line**, **stacked**, **area**, **stacked bar** and **multi bar**.
+- **Enter fullscreen mode**: expand the current chart to the full size of your screen.
+- **Add chart to dashboard**: add the chart to an existing custom dashboard or directly create a new one that includes the chart.
-- Netdata icon: this indicates that data is continuously being updated, this happens
- if [Time controls](https://github.com/netdata/netdata/blob/master/docs/dashboard/visualization-date-and-time-controls.md#time-controls)
- are in Play or Force Play mode
-- Chart status icon: indicates the status of the chart. Possible values are: Loading, Timeout, Error or No data
-- Chart title: on the chart title you can see the title together with the metric being displayed, as well as the unit of
- measurement
-- Chart action bar: here you'll have access to chart info, change chart types, enables fullscreen mode, and the ability
- to add the chart to a custom dashboard
+## Definition bar
-![image](https://user-images.githubusercontent.com/70198089/222689197-f9506ca7-a869-40a9-871f-8c4e1fa4b927.png)
+Each composite chart has a definition bar to provide information and options about the following:
+<img src="https://user-images.githubusercontent.com/70198089/236134615-e53a1d68-8a0f-466b-b2ef-1974085f0e8d.png" width="900"/>
+
+- Group by option
+- Aggregate function to be applied in case multiple data sources exist
+- Nodes filter
+- Instances filter
+- Dimensions filter
+- Labels filter
+- The aggregate function over time to be applied if one point in the chart consists of multiple data points aggregated
+- Resetting the Definition bar
+
+### NIDL framework
+
+To help users instantly understand and validate the data they see on charts, we developed the NIDL (Nodes, Instances, Dimensions, Labels) framework. This information is visualized on all charts.
+
+
+> You can explore the in-depth infographic, by clicking on this image and opening it in a new tab,
+> allowing you to zoom in to the different parts of it.
+>
+> <a href="https://user-images.githubusercontent.com/2662304/235475061-44628011-3b1f-4c44-9528-34452018eb89.png" target="_blank">
+> <img src="https://user-images.githubusercontent.com/2662304/235475061-44628011-3b1f-4c44-9528-34452018eb89.png" width="400" border="0" align="center"/>
+> </a>
-## Definition bar
-Each composite chart has a definition bar to provide information about the following:
+You can rapidly access condensed information for collected metrics, grouped by node, monitored instances, dimension, or any key/value label pair.
-* Grouping option
-* Aggregate function to be applied in case multiple data sources exist
-* Instances
-* Nodes
-* Dimensions, and
-* Aggregate function over time to be applied if one point in the chart consists of multiple data points aggregated
+At the Definition bar of each chart, there are a few dropdown menus:
-### Group by dimension, node, or chart
+<img src="https://user-images.githubusercontent.com/43294513/235470150-62a3b9ac-51ca-4c0d-81de-8804e3d733eb.png" width="900"/>
-Click on the **dimension** dropdown to change how a composite chart groups metrics.
+These dropdown menus have 2 functions:
-The default option is by _dimension_, so that each line/area in the visualization is the aggregation of a single
-dimension.
-This provides a per dimension view of the data from all the nodes in the War Room, taking into account filtering
-criteria if defined.
+1. Provide additional information about the visualized chart, to help with understanding the data that is presented.
+2. Provide filtering and grouping capabilities, altering the query on the fly, to help get different views of the dataset.
-A composite chart grouped by _node_ visualizes a single metric across contributing nodes. If the composite chart has
-five
-contributing nodes, there will be five lines/areas. This is typically an absolute value of the sum of the dimensions
-over each node but there
-are some opinionated-but-valuable exceptions where a specific dimension is selected.
-Grouping by nodes allows you to quickly understand which nodes in your infrastructure are experiencing anomalous
-behavior.
+The NIDL framework attaches metadata to every metric that is collected to provide for each of them the following consolidated data for the visible time frame:
-A composite chart grouped by _instance_ visualizes each instance of one software or hardware on a node and displays
-these as a separate dimension. By grouping the
-`disk.io` chart by _instance_, you can visualize the activity of each disk on each node that contributes to the
-composite
-chart.
+1. The volume contribution of each metric into the final query. So even if a query comes from 1000 nodes, the contribution of each node in the result can instantly be visualized. The same goes for instances, dimensions and labels. Especially for labels, Netdata also provides the volume contribution of each label `key:value` pair to the final query, so that you can immediately see how much every label value involved in the query affected the chart.
+2. The anomaly rate of each of them for the time-frame of the query. This is used to quickly spot which of the nodes, instances, dimensions or labels have anomalies in the requested time-frame.
+3. The minimum, average and maximum values of all the points used for the query. This is used to quickly spot which of the nodes, instances, dimensions or labels are responsible for a spike or a dive in the chart.
-Another very pertinent example is composite charts over contexts related to cgroups (VMs and containers). You have the
-means to change the default group by or apply filtering to
-get a better view into what data your are trying to analyze. For example, if you change the group by to _instance_ you
-get a view with the data of all the instances (cgroups) that
-contribute to that chart. Then you can use further filtering tools to focus the data that is important to you and even
-save the result to your own dashboards.
+All of these dropdown menus can be used for instantly filtering the information shown, by including or excluding specific nodes, instances, dimensions or labels. Directly from the dropdown menu, without the need to edit a query string and without any additional knowledge of the underlying data.
-![image](https://user-images.githubusercontent.com/82235632/201902017-04b76701-0ff9-4498-aa9b-6d507b567bea.png)
+### Group by dropdown
-### Aggregate functions over data sources
+The "Group by" dropdown menu allows selecting 1 or more groupings to be applied at once on the same dataset.
-Each chart uses an opinionated-but-valuable default aggregate function over the data sources. For example,
-the `system.cpu` chart shows the
-average for each dimension from every contributing chart, while the `net.net` chart shows the sum for each dimension
-from every contributing chart, which can also come from multiple networking interfaces.
+<img src="https://user-images.githubusercontent.com/43294513/235468819-3af5a1d3-8619-48fb-a8b7-8e8b4cf6a8ff.png" width="900"/>
+
+It supports:
+
+1. **Group by Node**, to summarize the data of each node, and provide one dimension on the chart for each of the nodes involved. Filtering nodes is supported at the same time, using the nodes dropdown menu.
+2. **Group by Instance**, to summarize the data of each instance and provide one dimension on the chart for each of the instances involved. Filtering instances is supported at the same time, using the instances dropdown menu.
+3. **Group by Dimension**, so that each metric in the visualization is the aggregation of a single dimension. This provides a per dimension view of the data from all the nodes in the War Room, taking into account filtering criteria if defined.
+4. **Group by Label**, to summarize the data for each label value. Multiple label keys can be selected at the same time.
+
+Using this menu, you can slice and dice the data in any possible way, to quickly get different views of it, without the need to edit a query string and without any need to better understand the format of the underlying data.
+
+> ### Tip
+>
+> A very pertinent example is composite charts over contexts related to cgroups (VMs and containers).
+> You have the means to change the default group by or apply filtering to get a better view into what data your are trying to analyze.
+> For example, if you change the group by to _instance_ you get a view with the data of all the instances (cgroups) that contribute to that chart.
+> Then you can use further filtering tools to focus the data that is important to you and even save the result to your own dashboards.
+
+> ### Tip
+>
+> Group by instance, dimension to see the time series of every individual collected metric participating in the chart.
+
+### Aggregate functions over data sources dropdown
+
+Each chart uses an opinionated-but-valuable default aggregate function over the data sources.
+
+<img src="https://user-images.githubusercontent.com/70198089/236136725-778670b4-7e81-44a8-8d3d-f38ded823c94.png" width="500"/>
+
+For example, the `system.cpu` chart shows the average for each dimension from every contributing chart, while the `net.net` chart shows the sum for each dimension from every contributing chart, which can also come from multiple networking interfaces.
The following aggregate functions are available for each selected dimension:
@@ -144,105 +151,148 @@ The following aggregate functions are available for each selected dimension:
- **Max**: Displays a maximum value. For dimensions with positive values, the max is the value with the largest
magnitude. For charts with negative values, the max is the value closet to zero.
-### Dimensions
-
-Select which dimensions to display on the composite chart. You can choose **All dimensions**, a single dimension, or any
-number of dimensions available on that context.
+### Nodes dropdown
-### Instances
+In this dropdown, you can view or filter the nodes contributing time-series metrics to the chart.
+This menu also provides the contribution of each node to the volume of the chart, and a break down of the anomaly rate of the queried data per node.
-Click on **X Instances** to display a dropdown of instances and nodes contributing to that composite chart. Each line in
-the dropdown displays an instance name and the associated node's hostname.
-
-### Nodes
-
-Click on **X Nodes** to display a dropdown of nodes contributing to that composite chart. Each line displays a hostname
-to help you identify which nodes contribute to a chart. You can also use this component to filter nodes directly on the
-chart.
+<img src="https://user-images.githubusercontent.com/70198089/236137765-b57d5443-3d4b-42f4-9e3d-db1eb606626f.png" width="900"/>
If one or more nodes can't contribute to a given chart, the definition bar shows a warning symbol plus the number of
affected nodes, then lists them in the dropdown along with the associated error. Nodes might return errors because of
networking issues, a stopped `netdata` service, or because that node does not have any metrics for that context.
+### Instances dropdown
+
+In this dropdown, you can view or filter the instances contributing time-series metrics to the chart.
+This menu also provides the contribution of each instance to the volume of the chart, and a break down of the anomaly rate of the queried data per instance.
+
+<img src="https://user-images.githubusercontent.com/70198089/236138302-4dd4072e-3a0d-43bb-a9d8-4dde79c65e92.png" width="900"/>
+
+### Dimensions dropdown
+
+In this dropdown, you can view or filter the original dimensions contributing time-series metrics to the chart.
+This menu also presents the contribution of each original dimensions on the chart, and a break down of the anomaly rate of the data per dimension.
+
+<img src="https://user-images.githubusercontent.com/70198089/236138796-08dc6ac6-9a50-4913-a46d-d9bbcedd48f6.png" width="900"/>
+
+
+### Labels dropdown
+
+In this dropdown, you can view or filter the contributing time-series labels of the chart.
+This menu also presents the contribution of each label on the chart,and a break down of the anomaly rate of the data per label.
+
+<img src="https://user-images.githubusercontent.com/70198089/236139027-8a51a958-2074-4675-a41b-efff30d8f51a.png" width="900"/>
+
### Aggregate functions over time
When the granularity of the data collected is higher than the plotted points on the chart an aggregation function over
-time
-is applied. By default the aggregation applied is _average_ but the user can choose different options from the
-following:
-
-* Min
-* Max
-* Average
-* Sum
-* Incremental sum (Delta)
-* Standard deviation
-* Median
-* Single exponential smoothing
-* Double exponential smoothing
-* Coefficient variation
-* Trimmed Median `*`
-* Trimmed Mean `*`
-* Percentile `**`
-
-> ### Info
->
-> - `*` For **Trimmed Median and Mean** you can choose the percentage of data tha you want to focus on: 1%, 2%, 3%, 5%, 10%, 15%, 20% and 25%.
-> - `**` For **Percentile** you can specify the percentile you want to focus on: 25th, 50th, 75th, 80th, 90th, 95th, 97th, 98th and 99th.
+time is applied.
+
+<img src="https://user-images.githubusercontent.com/70198089/236411297-e123db06-0117-4e24-a5ac-955b980a8f55.png" width="400"/>
+
+By default the aggregation applied is _average_ but the user can choose different options from the following:
-For more details on each, you can refer to our Agent's HTTP API details
-on [Data Queries - Data Grouping](https://github.com/netdata/netdata/blob/master/web/api/queries/README.md#data-grouping).
+- Min, Max, Average or Sum
+- Percentile
+ - you can specify the percentile you want to focus on: 25th, 50th, 75th, 80th, 90th, 95th, 97th, 98th and 99th.
+ <img src="https://user-images.githubusercontent.com/70198089/236410299-de5f3367-f3b0-4beb-a73f-a49007c543d4.png" width="250"/>
+- Trimmed Mean or Trimmed Median
+ - you can choose the percentage of data tha you want to focus on: 1%, 2%, 3%, 5%, 10%, 15%, 20% and 25%.
+ <img src="https://user-images.githubusercontent.com/70198089/236410858-74b46af9-280a-4ab2-ad26-5a6aa9403aa8.png" width="250"/>
+- Median
+- Standard deviation
+- Coefficient of variation
+- Delta
+- Single or Double exponential smoothing
+
+For more details on each, you can refer to our Agent's HTTP API details on [Data Queries - Data Grouping](https://github.com/netdata/netdata/blob/master/web/api/queries/README.md#data-grouping).
### Reset to defaults
-Click on the 3-dot icon (**⋮**) on any chart, then **Reset to Defaults**, to reset the definition bar to its initial
-state.
+Finally, you can reset everything to its defaults by clicking the green "Reset" prompt at the end of the definition bar.
+
+## Anomaly Rate ribbon
+
+Netdata's unsupervised machine learning algorithm creates a unique model for each metric collected by your agents, using exclusively the metric's past data.
+It then uses these unique models during data collection to predict the value that should be collected and check if the collected value is within the range of acceptable values based on past patterns and behavior.
+
+If the value collected is an outlier, it is marked as anomalous.
+
+<img src="https://user-images.githubusercontent.com/70198089/236139886-79d63cf6-61ed-4aa7-842c-b5a1728c870d.png" width="900"/>
+
+This unmatched capability of real-time predictions as data is collected allows you to **detect anomalies for potentially millions of metrics across your entire infrastructure within a second of occurrence**.
+
+The Anomaly Rate ribbon on top of each chart visualizes the combined anomaly rate of all the underlying data, highlighting areas of interest that may not be easily visible to the naked eye.
+
+Hovering over the Anomaly Rate ribbon provides a histogram of the anomaly rates per presented dimension, for the specific point in time.
+
+Anomaly Rate visualization does not make Netdata slower. Anomaly rate is saved in the the Netdata database, together with metric values, and due to the smart design of Netdata, it does not even incur a disk footprint penalty.
-## Jump to single-node dashboards
+## Hover over the chart
-Click on **X Charts**/**X Nodes** to display one of the two dropdowns that list the charts and nodes contributing to a
-given composite chart. For example, the nodes dropdown.
+Hovering over any point in the chart will reveal a more informative overlay.
+It includes a bar indicating the volume percentage of each time series compared to the total, the anomaly rate, and a notification on if there are data collection issues.
-![The nodes dropdown in a composite chart](https://user-images.githubusercontent.com/1153921/99305049-7c019b80-2810-11eb-942a-8ebfcf236b7f.png)
+This overlay sorts all dimensions by value, makes bold the closest dimension to the mouse and presents a histogram based on the values of the dimensions.
-To jump to a single-node dashboard, click on the link icon
-<img class="img__inline img__inline--link" src="https://user-images.githubusercontent.com/1153921/95762109-1d219300-0c62-11eb-8daa-9ba509a8e71c.png" /> next to the
-node you're interested in.
+<img src="https://user-images.githubusercontent.com/70198089/236141460-bfa66b99-d63c-4a2c-84b1-2509ed94857f.png" width="500"/>
-The single-node dashboard opens in a new tab. From there, you can continue to troubleshoot or run
-[Metric Correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md) for faster root
-cause analysis.
+When hovering the anomaly ribbon, the overlay sorts all dimensions by anomaly rate, and presents a histogram of these anomaly rates.
-## Add composite charts to a dashboard
+#### Info column
-Click on the 3-dot icon (**⋮**) on any chart, then click on **Add to Dashboard**. Click the **+** button for any
-dashboard you'd like to add this composite chart to, or create a new dashboard an initiate it with your chosen chart by
-entering the name and clicking **New Dashboard**.
+Additionally, when hovering over the chart, the overlay may display an indication in the "Info" column.
-## Chart action bar
+Currently, this column is used to inform users of any data collection issues that might affect the chart.
+Below each chart, there is an information ribbon. This ribbon currently shows 3 states related to the points presented in the chart:
-On this bar you have access to immediate actions over the chart, the available actions are:
+1. **[P]: Partial Data**
+ At least one of the dimensions in the chart has partial data, meaning that not all instances available contributed data to this point. This can happen when a container is stopped, or when a node is restarted. This indicator helps to gain confidence of the dataset, in situations when unusual spikes or dives appear due to infrastructure maintenance, or due to failures to part of the infrastructure.
-- Chart info: you will be able to get more information relevant to the chart you are interacting with
-- Chart type: change the chart type from _line_, _stacked_ or _area_
-- Enter fullscreen mode: allows you expand the current chart to the full size of your screen
-- Add chart to dashboard: This allows you to add the chart to an existing custom dashboard or directly create a new one
- that includes the chart.
+2. **[O]: Overflown**
+ At least one of the data sources included in the chart has a counter that has overflowed at this point.
-<img src="https://user-images.githubusercontent.com/70198089/222689501-4116f5fe-e447-4359-83b5-62dadb33f4ef.png" width="40%" height="40%" />
+3. **[E]: Empty Data**
+ At least one of the dimensions included in the chart has no data at all for the given points.
+All these indicators are also visualized per dimension, in the pop-over that appears when hovering the chart.
-## Exploration action bar
+<img src="https://user-images.githubusercontent.com/70198089/236145768-8ffadd02-93a4-4e9e-b4ae-c1367f614a7e.png" width="700"/>
-When exploring the chart you will see a second action bar. This action bar is there to support you on this task. The
-available actions that you can see are:
+## Play, Pause and Reset
+
+Your charts are controlled using the available [Time controls](https://github.com/netdata/netdata/blob/master/docs/dashboard/visualization-date-and-time-controls.md#time-controls).
+Besides these, when interacting with the chart you can also activate these controls by:
+
+- Hovering over any chart to temporarily pause it - this momentarily switches time control to Pause, so that you can
+ hover over a specific timeframe. When moving out of the chart time control will go back to Play (if it was it's
+ previous state)
+- Clicking on the chart to lock it - this enables the Pause option on the time controls, to the current timeframe. This
+ is if you want to jump to a different chart to look for possible correlations.
+- Double clicking to release a previously locked chart - move the time control back to Play
+
+| Interaction | Keyboard/mouse | Touchpad/touchscreen | Time control |
+|:------------------|:---------------|:---------------------|:----------------------|
+| **Pause** a chart | `hover` | `n/a` | Temporarily **Pause** |
+| **Stop** a chart | `click` | `tap` | **Pause** |
+| **Reset** a chart | `double click` | `n/a` | **Play** |
+
+Note: These interactions are available when the default "Pan" action is used from the [Tool Bar](#tool-bar).
+
+## Tool bar
+
+While exploring the chart, a tool bar will appear. This tool bar is there to support you on this task.
+The available manipulation tools you can select are:
+
+<img src="https://user-images.githubusercontent.com/70198089/236143292-c1d75528-263d-4ddd-9db8-b8d6a31cb83e.png" width="400" />
- Pan
- Highlight
-- Horizontal and Vertical zooms
-- In-context zoom in and out
+- Select and zoom
+- Chart zoom
+- Reset zoom
-<img src="https://user-images.githubusercontent.com/70198089/222689556-58ad77bc-924f-4c3f-b38b-fc63de2f5773.png" width="40%" height="40%" />
### Pan
@@ -258,47 +308,45 @@ it like pushing the current timeframe off the screen to see what came before or
Selecting timeframes is useful when you see an interesting spike or change in a chart and want to investigate further by:
- Looking at the same period of time on other charts/sections
-- Running [metric correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md)
- to filter metrics that also show something different in the selected period, vs the previous one
-
-<img alt="image" src="https://user-images.githubusercontent.com/43294513/221365853-1142944a-ace5-484a-a108-a205d050c594.png" />
+- Running [metric correlations](https://github.com/netdata/netdata/blob/master/docs/cloud/insights/metric-correlations.md) to filter metrics that also show something different in the selected period, vs the previous one
| Interaction | Keyboard/mouse | Touchpad/touchscreen |
|:-----------------------------------|:---------------------------------------------------------|:---------------------|
| **Highlight** a specific timeframe | `Alt + mouse selection` or `⌘ + mouse selection` (macOS) | `n/a` |
-### Zoom
+### Select and zoom
-Zooming in helps you see metrics with maximum granularity, which is useful when you're trying to diagnose the root cause
-of an anomaly or outage. Zooming out lets you see metrics within the larger context, such as the last hour, day, or
-week, which is useful in understanding what "normal" looks like, or to identify long-term trends, like a slow creep in
-memory usage.
+You can zoom to a specific timeframe, either horizontally of vertically, by selecting a timeframe.
-The actions above are _normal_ vertical zoom actions. We also provide an horizontal zoom action that helps you focus on
-a specific Y-axis area to further investigate a spike or dive on your charts.
+| Interaction | Keyboard/mouse | Touchpad/touchscreen |
+|:-------------------------------------------|:-------------------------------------|:-----------------------------------------------------|
+| **Zoom** to a specific timeframe | `Shift + mouse vertical selection` | `n/a` |
+| **Horizontal Zoom** a specific Y-axis area | `Shift + mouse horizontal selection` | `n/a` |
-![f8722ee8-e69b-426c-8bcb-6cb79897c177](https://user-images.githubusercontent.com/70198089/222689676-ad16a2a0-3c3d-48fa-87af-c40ae142dd79.gif)
+### Chart zoom
+Zooming in helps you see metrics with maximum granularity, which is useful when you're trying to diagnose the root cause
+of an anomaly or outage.
+Zooming out lets you see metrics within the larger context, such as the last hour, day, or week, which is useful in understanding what "normal" looks like, or to identify long-term trends, like a slow creep in memory usage.
| Interaction | Keyboard/mouse | Touchpad/touchscreen |
|:-------------------------------------------|:-------------------------------------|:-----------------------------------------------------|
| **Zoom** in or out | `Shift + mouse scrollwheel` | `two-finger pinch` <br />`Shift + two-finger scroll` |
-| **Zoom** to a specific timeframe | `Shift + mouse vertical selection` | `n/a` |
-| **Horizontal Zoom** a specific Y-axis area | `Shift + mouse horizontal selection` | `n/a` |
-
-You also have two direct action buttons on the exploration action bar for in-context `Zoom in` and `Zoom out`.
-## Other interactions
+## Dimensions bar
### Order dimensions legend
-The bottom legend of the chart where you can see the dimensions of the chart can now be ordered by:
+The bottom legend where you can see the dimensions of the chart can be ordered by:
+
+
+<img src="https://user-images.githubusercontent.com/70198089/236144658-6c3d0e31-9bcb-45f3-bb95-4eafdcbb0a58.png" width="300" />
+
- Dimension name (Ascending or Descending)
- Dimension value (Ascending or Descending)
-
-<img src="https://user-images.githubusercontent.com/70198089/222689791-48c77890-1093-4beb-84c2-7598353ca049.png" width="50%" height="50%" />
+- Dimension Anomaly Rate (Ascending or Descending)
### Show and hide dimensions
@@ -310,10 +358,6 @@ behaving strangely.
| **Show one** dimension and hide others | `click` | `tap` |
| **Toggle (show/hide)** one dimension | `Shift + click` | `n/a` |
-### Resize
-
-To resize the chart, click-and-drag the icon on the bottom-right corner of any chart. To restore the chart to its
-original height,
-double-click the same icon.
+## Resize a chart
-![1bcc6a0a-a58e-457b-8a0c-e5d361a3083c](https://user-images.githubusercontent.com/70198089/222689845-51a9c054-a57d-49dc-925d-39b924dae2f8.gif)
+To resize the chart, click-and-drag the icon on the bottom-right corner of any chart. To restore the chart to its original height, double-click the same icon.
diff --git a/docs/guides/troubleshoot/troubleshooting-agent-with-cloud-connection.md b/docs/guides/troubleshoot/troubleshooting-agent-with-cloud-connection.md
index a0e8973f7..ad747cb76 100644
--- a/docs/guides/troubleshoot/troubleshooting-agent-with-cloud-connection.md
+++ b/docs/guides/troubleshoot/troubleshooting-agent-with-cloud-connection.md
@@ -1,31 +1,71 @@
# Troubleshoot Agent-Cloud connectivity issues
-Learn how to troubleshoot the Netdata Agent showing as offline after claiming, so you can connect the Agent to Netdata Cloud.
+Learn how to troubleshoot connectivity issues leading to agents not appearing at all in Netdata Cloud, or
+appearing with a status other than `live`.
-When you are claiming a node, you might not be able to immediately see it online in Netdata Cloud.
-This could be due to an error in the claiming process or a temporary outage of some services.
+After installing an agent with the claiming token provided by Netdata Cloud, you should see charts from that node on
+Netdata Cloud within seconds. If you don't see charts, check if the node appears in the list of nodes
+(Nodes tab, top right Node filter, or Manage Nodes screen). If your node does not appear in the list, or it does appear with a status other than "Live", this guide will help you troubleshoot what's happening.
-We identified some scenarios that might cause this delay and possible actions you could take to overcome each situation.
+ The most common explanation for connectivity issues usually falls into one of the following three categories:
-The most common explanation for the delay usually falls into one of the following three categories:
+- If the node does not appear at all in Netdata Cloud, [the claiming process was unsuccessful](#the-claiming-process-was-unsuccessful).
+- If the node appears as in Netdata Cloud, but is in the "Unseen" state, [the Agent was claimed but can not connect](#the-agent-was-claimed-but-can-not-connect).
+- If the node appears as in Netdata Cloud as "Offline" or "Stale", it is a [previously connected agent that can no longer connect](#previously-connected-agent-that-can-no-longer-connect).
-- [Troubleshoot Agent-Cloud connectivity issues](#troubleshoot-agent-cloud-connectivity-issues)
- - [The claiming process of the kickstart script was unsuccessful](#the-claiming-process-of-the-kickstart-script-was-unsuccessful)
- - [The kickstart script auto-claimed the Agent but there was no error message displayed](#the-kickstart-script-auto-claimed-the-agent-but-there-was-no-error-message-displayed)
- - [Claiming on an older, deprecated version of the Agent](#claiming-on-an-older-deprecated-version-of-the-agent)
- - [Network issues while connecting to the Cloud](#network-issues-while-connecting-to-the-cloud)
- - [Verify that your IP is whitelisted from Netdata Cloud](#verify-that-your-ip-is-whitelisted-from-netdata-cloud)
- - [Make sure that your node has internet connectivity and can resolve network domains](#make-sure-that-your-node-has-internet-connectivity-and-can-resolve-network-domains)
+## The claiming process was unsuccessful
-## The claiming process of the kickstart script was unsuccessful
+If the claiming process fails, the node will not appear at all in Netdata Cloud.
-Here, we will try to define some edge cases you might encounter when claiming a node.
+First ensure that you:
+- Use the newest possible stable or nightly version of the agent (at least v1.32).
+- Your node can successfully issue an HTTPS request to https://api.netdata.cloud
-### The kickstart script auto-claimed the Agent but there was no error message displayed
+Other possible causes differ between kickstart installations and Docker installations.
-The kickstart script will install/update your Agent and then try to claim the node to the Cloud (if tokens are provided). To
-complete the second part, the Agent must be running. In some platforms, the Netdata service cannot be enabled by default
-and you must do it manually, using the following steps:
+### Verify your node can access Netdata Cloud
+
+If you run either `curl` or `wget` to do an HTTPS request to https://api.netdata.cloud, you should get
+back a 404 response. If you do not, check your network connectivity, domain resolution,
+and firewall settings for outbound connections.
+
+If your firewall is configured to completely prevent outbound connections, you need to whitelist `api.netdata.cloud` and `mqtt.netdata.cloud`. If you can't whitelist domains in your firewall, you can whitelist the IPs that the hostnames resolve to, but keep in mind that they can change without any notice.
+
+If you use an outbound proxy, you need to [take some extra steps]( https://github.com/netdata/netdata/blob/master/claim/README.md#connect-through-a-proxy).
+
+### Troubleshoot claiming with kickstart.sh
+
+Claiming is done by executing `netdata-claim.sh`, a script that is usually located under `${INSTALL_PREFIX}/netdata/usr/sbin/netdata-claim.sh`. Possible error conditions we have identified are:
+- No script found at all in any of our search paths.
+- The path where the claiming script should be does not exist.
+- The path exists, but is not a file.
+- The path is a file, but is not executable.
+Check the output of the kickstart script for any reported errors claiming and verify that the claiming script exists
+and can be executed.
+
+### Troubleshoot claiming with Docker
+
+First verify that the NETDATA_CLAIM_TOKEN parameter is correctly configured and then check for any errors during
+initialization of the container.
+
+The most common issue we have seen claiming nodes in Docker is [running on older hosts with seccomp enabled](https://github.com/netdata/netdata/blob/master/claim/README.md#known-issues-on-older-hosts-with-seccomp-enabled).
+
+## The Agent was claimed but can not connect
+
+Agents that appear on the cloud with state "Unseen" have successfully been claimed, but have never
+been able to successfully establish an ACLK connection.
+
+Agents that appear with state "Offline" or "Stale" were able to connect at some point, but are currently not
+connected. The difference between the two is that "Stale" nodes had some of their data replicated to a
+parent node that is still connected.
+
+### Verify that the agent is running
+
+#### Troubleshoot connection establishment with kickstart.sh
+
+The kickstart script will install/update your Agent and then try to claim the node to the Cloud
+(if tokens are provided). To complete the second part, the Agent must be running. In some platforms,
+the Netdata service cannot be enabled by default and you must do it manually, using the following steps:
1. Check if the Agent is running:
@@ -53,17 +93,39 @@ and you must do it manually, using the following steps:
> In some cases a simple restart of the Agent can fix the issue.
> Read more about [Starting, Stopping and Restarting the Agent](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md).
-## Claiming on an older, deprecated version of the Agent
+#### Troubleshoot connection establishment with Docker
+
+If a Netdata container exits or is killed before it properly starts, it may be able to complete the claiming
+process, but not have enough time to establish the ACLK connection.
+
+### Verify that your firewall allows websockets
+
+The agent initiates an SSL connection to `api.netdata.cloud` and then upgrades that connection to use secure
+websockets. Some firewalls completely prevent the use of websockets, even for outbound connections.
+
+## Previously connected agent that can no longer connect
-Make sure that you are using the latest version of Netdata if you are using the [Claiming script](https://github.com/netdata/netdata/blob/master/claim/README.md#claiming-script).
+The states "Offline" and "Stale" suggest that the agent was able to connect at some point in the past, but
+that it is currently not connected.
-With the introduction of our new architecture, Agents running versions lower than `v1.32.0` can face claiming problems, so we recommend you [update the Netdata Agent](https://github.com/netdata/netdata/blob/master/packaging/installer/UPDATE.md) to the latest stable version.
+### Verify that network connectivity is still possible
-## Network issues while connecting to the Cloud
+Verify that you can still issue HTTPS requests to api.netdata.cloud and that no firewall or proxy changes were made.
-### Verify that your IP is whitelisted from Netdata Cloud
+### Verify that the claiming info is persisted
-Most of the nodes change IPs dynamically. It is possible that your current IP has been restricted from accessing `api.netdata.cloud` due to security concerns.
+If you use Docker, verify that the contents of `/var/lib/netdata` are preserved across container restarts, using a persistent volume.
+
+### Verify that the claiming info is not cloned
+
+A relatively common case we have seen especially with VMs is two or more nodes sharing the same credentials.
+This happens if you claim a node in a VM and then create an image based on that node. Netdata can't properly
+work this way, as we have unique node identification information under `/var/lib/netdata`.
+
+### Verify that your IP is not blocked by Netdata Cloud
+
+Most of the nodes change IPs dynamically. It is possible that your current IP has been restricted from accessing `api.netdata.cloud` due to security concerns, usually because it was spamming Netdata Coud with too many
+failed requests (old versions of the agent).
To verify this:
@@ -83,31 +145,3 @@ To verify this:
- Contact our team to whitelist your IP by submitting a ticket in the [Netdata forum](https://community.netdata.cloud/)
- Change your node's IP
-
-### Make sure that your node has internet connectivity and can resolve network domains
-
-1. Try to reach a well known host:
-
- ```bash
- ping 8.8.8.8
- ```
-
-2. If you can reach external IPs, then check your domain resolution.
-
- ```bash
- host api.netdata.cloud
- ```
-
- The expected output should be something like this:
-
- ```bash
- api.netdata.cloud is an alias for main-ingress-545609a41fcaf5d6.elb.us-east-1.amazonaws.com.
- main-ingress-545609a41fcaf5d6.elb.us-east-1.amazonaws.com has address 54.198.178.11
- main-ingress-545609a41fcaf5d6.elb.us-east-1.amazonaws.com has address 44.207.131.212
- main-ingress-545609a41fcaf5d6.elb.us-east-1.amazonaws.com has address 44.196.50.41
- ```
-
- > ### Info
- >
- > There will be cases in which the firewall restricts network access. In those cases, you need to whitelist `api.netdata.cloud` and `mqtt.netdata.cloud` domains to be able to see your nodes in Netdata Cloud.
- > If you can't whitelist domains in your firewall, you can whitelist the IPs that the above command will produce, but keep in mind that they can change without any notice.
diff --git a/docs/netdata-security.md b/docs/netdata-security.md
index 6cd33c061..2716e08e2 100644
--- a/docs/netdata-security.md
+++ b/docs/netdata-security.md
@@ -1,196 +1,429 @@
# Security and privacy design
-This document serves as the relevant Annex to the [Terms of Service](https://www.netdata.cloud/service-terms/), the [Privacy Policy](https://www.netdata.cloud/privacy/) and
-the Data Processing Addendum, when applicable. It provides more information regarding Netdata’s technical and organizational security and privacy measures.
+This document serves as the relevant Annex to the [Terms of Service](https://www.netdata.cloud/service-terms/),
+the [Privacy Policy](https://www.netdata.cloud/privacy/) and
+the Data Processing Addendum, when applicable. It provides more information regarding Netdata’s technical and
+organizational security and privacy measures.
-We have given special attention to all aspects of Netdata, ensuring that everything throughout its operation is as secure as possible. Netdata has been designed with security in mind.
+We have given special attention to all aspects of Netdata, ensuring that everything throughout its operation is as
+secure as possible. Netdata has been designed with security in mind.
-> When running Netdata in environments requiring Payment Card Industry Data Security Standard (**PCI DSS**), Systems and Organization Controls (**SOC 2**),
-or Health Insurance Portability and Accountability Act (**HIPAA**) compliance, please keep in mind that
-**even when the user uses Netdata Cloud, all collected data is always stored inside their infrastructure**.
+## Netdata's Security Principles
-Dashboard data a user views and alert notifications do travel
-over Netdata Cloud, as they also travel over third party networks, to reach the user's web browser or the notification integrations the user has configured,
-but Netdata Cloud does not store metric data. It only transforms them as they pass through it, aggregating them from multiple Agents and Parents,
-to appear as one data source on the user's browser.
+### Security by Design
-## Cloud design
+Netdata, an open-source software widely installed across the globe, prioritizes security by design, showcasing our
+commitment to safeguarding user data. The entire structure and internal architecture of the software is built to ensure
+maximum security. We aim to provide a secure environment from the ground up, rather than as an afterthought.
-### User identification and authorization
+### Compliance with Open Source Security Foundation Best Practices
-Netdata ensures that only an email address is stored to create an account and use the Service.
-User identification and authorization is done
-either via third parties (Google, GitHub accounts), or short-lived access tokens, sent to the user’s email account.
+Netdata is committed to adhering to the best practices laid out by the Open Source Security Foundation (OSSF).
+Currently, the Netdata Agent follows the OSSF best practices at the passing level. Feel free to audit our approach to
+the [OSSF guidelines](https://bestpractices.coreinfrastructure.org/en/projects/2231)
-### Personal Data stored
+Netdata Cloud boasts of comprehensive end-to-end automated testing, encompassing the UI, back-end, and agents, where
+involved. In addition, the Netdata Agent uses an array of third-party services for static code analysis, static code
+security analysis, and CI/CD integrations to ensure code quality on a per pull request basis. Tools like Github's
+CodeQL, Github's Dependabot, our own unit tests, various types of linters,
+and [Coverity](https://scan.coverity.com/projects/netdata-netdata?tab=overview) are utilized to this end.
-Netdata ensures that only an email address is stored to create an account and use the Service. The same email
-address is used for Netdata product and marketing communications (via Hubspot and Sendgrid).
+Moreover, each PR requires two code reviews from our senior engineers before being merged. We also maintain two
+high-performance environments (a production-like kubernetes cluster and a highly demanding stress lab) for
+stress-testing our entire solution. This robust pipeline ensures the delivery of high-quality software consistently.
-Email addresses are stored in our production database on AWS and copied to Google BigQuery, our data lake,
-for analytics purposes. These analytics are crucial for our product development process.
+### Regular Third-Party Testing and Isolation
-If the user accepts the use of analytical cookies, the email address is also stored in the systems we use to track the
-usage of the application (Posthog and Gainsight PX)
+While Netdata doesn't have a dedicated internal security team, the open-source Netdata Agent undergoes regular testing
+by third parties. Any security reports received are addressed immediately. In contrast, Netdata Cloud operates in a
+fully automated and isolated environment with Infrastructure as Code (IaC), ensuring no direct access to production
+applications. Monitoring and reporting is also fully automated.
-The IP address used to access Netdata Cloud is stored in web proxy access logs. If the user accepts the use of analytical
-cookies, the IP is also stored in the systems we use to track the usage of the application (Posthog and Gainsight PX).
+### Security Vulnerability Response
-### Infrastructure data stored
+Netdata has a transparent and structured process for handling security vulnerabilities. We appreciate and value the
+contributions of security researchers and users who report vulnerabilities to us. All reports are thoroughly
+investigated, and any identified vulnerabilities trigger a Security Release Process.
-The metric data that a user sees in the web browser when using Netdata Cloud is streamed directly from the Netdata Agent
-to the Netdata Cloud dashboard, via the Agent-Cloud link (see [data transfer](#data-transfer)). The data passes through our systems, but it isn’t stored.
+We aim to fully disclose any bugs as soon as a user mitigation is available, typically within a week of the report. In
+case of security fixes, we promptly release a new version of the software. Users can subscribe to our releases on GitHub
+to stay updated about all security incidents. More details about our vulnerability response process can be
+found [here](https://github.com/netdata/netdata/security/policy).
-The metadata we do store for each node connected to the user's Spaces in Netdata Cloud is:
- - Hostname (as it appears in Netdata Cloud)
- - Information shown in `/api/v1/info`. For example: [https://frankfurt.my-netdata.io/api/v1/info](https://frankfurt.my-netdata.io/api/v1/info).
- - Metric metadata information shown in `/api/v1/contexts`. For example: [https://frankfurt.my-netdata.io/api/v1/contexts](https://frankfurt.my-netdata.io/api/v1/contexts).
- - Alarm configurations shown in `/api/v1/alarms?all`. For example: [https://frankfurt.my-netdata.io/api/v1/alarms?all](https://frankfurt.my-netdata.io/api/v1/alarms?all).
- - Active alarms shown in `/api/v1/alarms`. For example: [https://frankfurt.my-netdata.io/api/v1/alarms](https://frankfurt.my-netdata.io/api/v1/alarms).
+### Adherence to Open Source Security Foundation Best Practices
-The infrastructure data is stored in our production database on AWS and copied to Google BigQuery, our data lake, for
- analytics purposes.
+In line with our commitment to security, we uphold the best practices as outlined by the Open Source Security
+Foundation. This commitment reflects in every aspect of our operations, from the design phase to the release process,
+ensuring the delivery of a secure and reliable product to our users. For more information
+check [here](https://bestpractices.coreinfrastructure.org/en/projects/2231).
-### Data transfer
+## Netdata Agent Security
-All infrastructure data visible on Netdata Cloud has to pass through the Agent-Cloud link (ACLK) mechanism, which
-securely connects a Netdata Agent to Netdata Cloud. The Netdata agent initiates and establishes an outgoing secure
-WebSocket (WSS) connection to Netdata Cloud. The ACLK is encrypted, safe, and is only established if the user connects their node.
+### Security by Design
-Data is encrypted when in transit between a user and Netdata Cloud using TLS.
+Netdata Agent is designed with a security-first approach. Its structure ensures data safety by only exposing chart
+metadata and metric values, not the raw data collected. This design principle allows Netdata to be used in environments
+requiring the highest level of data isolation, such as PCI Level 1. Even though Netdata plugins connect to a user's
+database server or read application log files to collect raw data, only the processed metrics are stored in Netdata
+databases, sent to upstream Netdata servers, or archived to external time-series databases.
-### Data retention
+### User Data Protection
-Netdata may maintain backups of Netdata Cloud Customer Content, which would remain in place for approximately ninety
-(90) days following a deletion in Netdata Cloud.
+The Netdata Agent is programmed to safeguard user data. When collecting data, the raw data does not leave the host. All
+plugins, even those running with escalated capabilities or privileges, perform a hard-coded data collection job. They do
+not accept commands from Netdata, and the original application data collected do not leave the process they are
+collected in, are not saved, and are not transferred to the Netdata daemon. For the “Functions” feature, the data
+collection plugins offer Functions, and the user interface merely calls them back as defined by the data collector. The
+Netdata Agent main process does not require any escalated capabilities or privileges from the operating system, and
+neither do most of the data collecting plugins.
-### Data portability and erasure
+### Communication and Data Encryption
-Netdata will, as necessary to enable the Customer to meet its obligations under Data Protection Law, provide the Customer
-via the availability of Netdata Cloud with the ability to access, retrieve, correct and delete the Personal Data stored in
-Netdata Cloud. The Customer acknowledges that such ability may from time to time be limited due to temporary service outages
-for maintenance or other updates to Netdata Cloud, or technically not feasible.
+Data collection plugins communicate with the main Netdata process via ephemeral, in-memory, pipes that are inaccessible
+to any other process.
-To the extent that the Customer, in its fulfillment of its Data Protection Law obligations, is unable to access, retrieve,
-correct or delete Customer Personal Data in Netdata Cloud due to prolonged unavailability of Netdata Cloud due to an issue
-within Netdata’s control, Netdata will where possible use reasonable efforts to provide, correct or delete such Customer Personal Data.
+Streaming of metrics between Netdata agents requires an API key and can also be encrypted with TLS if the user
+configures it.
-If a Customer is unable to delete Personal Data via the self-services functionality, then Netdata deletes Personal Data upon
-the Customer’s written request, within the timeframe specified in the DPA and in accordance with applicable data protection law.
+The Netdata agent's web API can also use TLS if configured.
-#### Delete all personal data
+When Netdata agents are claimed to Netdata Cloud, the communication happens via MQTT over Web Sockets over TLS, and
+public/private keys are used for authorizing access. These keys are exchanged during the claiming process (usually
+during the provisioning of each agent).
-To remove all personal info we have about a user (email and activities) they need to delete their cloud account by logging into https://app.netdata.cloud and accessing their profile, at the bottom left of the screen.
+### Authentication
+Direct user access to the agent is not authenticated, considering that users should either use Netdata Cloud, or they
+are already on the same LAN, or they have configured proper firewall policies. However, Netdata agents can be hidden
+behind an authenticating web proxy if required.
-## Agent design
+For other Netdata agents streaming metrics to an agent, authentication via API keys is required and TLS can be used if
+configured.
-### User data is safe with Netdata
+For Netdata Cloud accessing Netdata agents, public/private key cryptography is used and TLS is mandatory.
-Netdata collects raw data from many sources. For each source, Netdata uses a plugin that connects to the source (or reads the
-relative files produced by the source), receives raw data and processes them to calculate the metrics shown on Netdata dashboards.
+### Security Vulnerability Response
-Even if Netdata plugins connect to the user's database server, or read user's application log file to collect raw data, the product of
-this data collection process is always a number of **chart metadata and metric values** (summarized data for dashboard visualization).
-All Netdata plugins (internal to the Netdata daemon, and external ones written in any computer language), convert raw data collected
-into metrics, and only these metrics are stored in Netdata databases, sent to upstream Netdata servers, or archived to external
-time-series databases.
+If a security vulnerability is found in the Netdata Agent, the Netdata team acknowledges and analyzes each report within
+three working days, kicking off a Security Release Process. Any vulnerability information shared with the Netdata team
+stays within the Netdata project and is not disseminated to other projects unless necessary for fixing the issue. The
+reporter is kept updated as the security issue moves from triage to identified fix, to release planning. More
+information can be found [here](https://github.com/netdata/netdata/security/policy).
-The **raw data** collected by Netdata does not leave the host when collected. **The only data Netdata exposes are chart metadata and metric values.**
+### Protection Against Common Security Threats
-This means that Netdata can safely be used in environments that require the highest level of data isolation (like PCI Level 1).
+The Netdata agent is resilient against common security threats such as DDoS attacks and SQL injections. For DDoS,
+Netdata agent uses a fixed number of threads for processing requests, providing a cap on the resources that can be
+consumed. It also automatically manages its memory to prevent overutilization. SQL injections are prevented as nothing
+from the UI is passed back to the data collection plugins accessing databases.
-### User systems are safe with Netdata
+Additionally, the Netdata agent is running as a normal, unprivileged, operating system user (a few data collections
+require escalated privileges, but these privileges are isolated to just them), every netdata process runs by default
+with a nice priority to protect production applications in case the system is starving for CPU resources, and Netdata
+agents are configured by default to be the first processes to be killed by the operating system in case the operating
+system starves for memory resources (OS-OOM - Operating System Out Of Memory events).
-We are very proud that **the Netdata daemon runs as a normal system user, without any special privileges**. This is quite an
-achievement for a monitoring system that collects all kinds of system and application metrics.
+### User Customizable Security Settings
-There are a few cases, however, that raw source data are only exposed to processes with escalated privileges. To support these
-cases, Netdata attempts to minimize and completely isolate the code that runs with escalated privileges.
+Netdata provides users with the flexibility to customize agent security settings. Users can configure TLS across the
+system, and the agent provides extensive access control lists on all its interfaces to limit access to its endpoints
+based on IP. Additionally, users can configure the CPU and Memory priority of Netdata agents.
-So, Netdata **plugins**, even those running with escalated capabilities or privileges, perform a **hard coded data collection job**.
-They do not accept commands from Netdata. The communication is **unidirectional** from the plugin towards the Netdata daemon, except
-for Functions (see below). The original application data collected by each plugin do not leave the process they are collected, are
-not saved and are not transferred to the Netdata daemon. The communication from the plugins to the Netdata daemon includes only chart
-metadata and processed metric values.
+## Netdata Cloud Security
-Child nodes use the same protocol when streaming metrics to their parent nodes. The raw data collected by the plugins of
-child Netdata servers are **never leaving the host they are collected**. The only data appearing on the wire are chart
-metadata and metric values. This communication is also **unidirectional**: child nodes never accept commands from
-parent Netdata servers (except for Functions).
+Netdata Cloud is designed with a security-first approach to ensure the highest level of protection for user data. When
+using Netdata Cloud in environments that require compliance with standards like PCI DSS, SOC 2, or HIPAA, users can be
+confident that all collected data is stored within their infrastructure. Data viewed on dashboards and alert
+notifications travel over Netdata Cloud, but are not stored—instead, they're transformed in transit, aggregated from
+multiple agents and parents (centralization points), to appear as one data source in the user's browser.
-[Functions](https://github.com/netdata/netdata/blob/master/docs/cloud/netdata-functions.md) is currently
-the only feature that routes requests back to origin Netdata Agents via Netdata Parents. The feature allows Netdata Cloud to send
-a request to the Netdata Agent data collection plugin running at the
-edge, to provide additional information, such as the process tree of a server, or the long queries of a DB.
+### User Identification and Authorization
-<!-- The user has full control over the available functions. For more information see “Controlling Access to Functions” and “Disabling Functions”. -->
+Netdata Cloud requires only an email address to create an account and use the service. User identification and
+authorization are conducted either via third-party integrations (Google, GitHub accounts) or through short-lived access
+tokens sent to the user’s email account. Email addresses are stored securely in our production database on AWS and are
+also used for product and marketing communications. Netdata Cloud does not store user credentials.
-### Netdata is read-only
+### Data Storage and Transfer
-Netdata **dashboards are read-only**. Dashboard users can view and examine metrics collected by Netdata, but cannot
-instruct Netdata to do something other than present the already collected metrics.
+Although Netdata Cloud does not store metric data, it does keep some metadata for each node connected to user spaces.
+This metadata includes the hostname, information from the `/api/v1/info` endpoint, metric metadata
+from `/api/v1/contexts`, and alerts configurations from `/api/v1/alarms`. This data is securely stored in our production
+database on AWS and copied to Google BigQuery for analytics purposes.
-Netdata dashboards do not expose sensitive information. Business data of any kind, the kernel version, O/S version,
-application versions, host IPs, etc. are not stored and are not exposed by Netdata on its dashboards.
+All data visible on Netdata Cloud is transferred through the Agent-Cloud link (ACLK) mechanism, which securely connects
+a Netdata Agent to Netdata Cloud. The ACLK is encrypted and safe, and is only established if the user connects/claims
+their node. Data in transit between a user and Netdata Cloud is encrypted using TLS.
-### Protect Netdata from the internet
+### Data Retention and Erasure
-Users are responsible to take all appropriate measures to secure their Netdata agent installations and especially the Netdata web user interface and API against unauthorized access. Netdata comes with a wide range of options to
-[secure user nodes](https://github.com/netdata/netdata/blob/master/docs/category-overview-pages/secure-nodes.md) in
-compliance with the user organization's security policy.
+Netdata Cloud maintains backups of customer content for approximately 90 days following a deletion. Users have the
+ability to access, retrieve, correct, and delete personal data stored in Netdata Cloud. In case a user is unable to
+delete personal data via self-services functionality, Netdata will delete personal data upon the customer's written
+request, in accordance with applicable data protection law.
-### Anonymous statistics
+### Infrastructure and Authentication
-#### Netdata registry
+Netdata Cloud operates on an Infrastructure as Code (IaC) model. Its microservices environment is completely isolated,
+and all changes occur through Terraform. At the edge of Netdata Cloud, there is a TLS termination and an Identity and
+Access Management (IAM) service that validates JWT tokens included in request cookies.
-The default configuration uses a public [registry](https://github.com/netdata/netdata/blob/master/registry/README.md) under registry.my-netdata.io.
-If the user uses that public registry, they submit the following information to a third party server:
- - The URL of the agent's web user interface (via http request referrer)
- - The hostnames of the user's Netdata servers
+Netdata Cloud does not store user credentials.
-If sending this information to the central Netdata registry violates user's security policies, they can configure Netdata to
-[run their own registry](https://github.com/netdata/netdata/blob/master/registry/README.md#run-your-own-registry).
+### Security Features and Response
-#### Anonymous telemetry events
+Netdata Cloud offers a variety of security features, including infrastructure-level dashboards, centralized alerts
+notifications, auditing logs, and role-based access to different segments of the infrastructure. The cloud service
+employs several protection mechanisms against DDoS attacks, such as rate-limiting and automated blacklisting. It also
+uses static code analysers to prevent other types of attacks.
-Starting with v1.30, Netdata collects anonymous usage information by default and sends it to a self hosted PostHog instance within the Netdata infrastructure. Read
-about the information collected and learn how to opt-out, on our
-[anonymous telemetry events](https://github.com/netdata/netdata/blob/master/docs/anonymous-statistics.md) page.
+In the event of potential security vulnerabilities or incidents, Netdata Cloud follows the same process as the Netdata
+agent. Every report is acknowledged and analyzed by the Netdata team within three working days, and the team keeps the
+reporter updated throughout the process.
-### Netdata directories
+### User Customization
-The agent stores data in 6 different directories on the user's system.
-
-| path|owner|permissions|Netdata|comments|
-|:---|:----|:----------|:------|:-------|
-| `/etc/netdata`|user `root`<br/>group `netdata`|dirs `0755`<br/>files `0640`|reads|**Netdata config files**<br/>may contain sensitive information, so group `netdata` is allowed to read them.|
-| `/usr/libexec/netdata`|user `root`<br/>group `root`|executable by anyone<br/>dirs `0755`<br/>files `0644` or `0755`|executes|**Netdata plugins**<br/>permissions depend on the file - not all of them should have the executable flag.<br/>there are a few plugins that run with escalated privileges (Linux capabilities or `setuid`) - these plugins should be executable only by group `netdata`.|
-| `/usr/share/netdata`|user `root`<br/>group `netdata`|readable by anyone<br/>dirs `0755`<br/>files `0644`|reads and sends over the network|**Netdata web static files**<br/>these files are sent over the network to anyone that has access to the Netdata web server. Netdata checks the ownership of these files (using settings at the `[web]` section of `netdata.conf`) and refuses to serve them if they are not properly owned. Symbolic links are not supported. Netdata also refuses to serve URLs with `..` in their name.|
-| `/var/cache/netdata`|user `netdata`<br/>group `netdata`|dirs `0750`<br/>files `0660`|reads, writes, creates, deletes|**Netdata ephemeral database files**<br/>Netdata stores its ephemeral real-time database here.|
-| `/var/lib/netdata`|user `netdata`<br/>group `netdata`|dirs `0750`<br/>files `0660`|reads, writes, creates, deletes|**Netdata permanent database files**<br/>Netdata stores here the registry data, health alarm log db, etc.|
-| `/var/log/netdata`|user `netdata`<br/>group `root`|dirs `0755`<br/>files `0644`|writes, creates|**Netdata log files**<br/>all the Netdata applications, logs their errors or other informational messages to files in this directory. These files should be log rotated.|
+Netdata Cloud uses the highest level of security. There is no user customization available out of the box. Its security
+settings are designed to provide maximum protection for all users. We are offering customization (like custom SSO
+integrations, custom data retention policies, advanced user access controls, tailored audit logs, integration with other
+security tools, etc.) on a per contract basis.
-## Organization processes
+### Deleting Personal Data
-### Employee identification and authorization
+Users who wish to remove all personal data (including email and activities) can delete their cloud account by logging
+into Netdata Cloud and accessing their profile.
-Netdata operates technical and organizational measures for employee identification and authentication, such as logs, policies,
-assigning distinct usernames for each employee and utilizing password complexity requirements for access to all platforms.
+## User Privacy and Data Protection
-The COO or HR are the primary system owners for all platforms and may designate additional system owners, as needed. Additional
-user access is also established on a role basis, requires the system owner’s approval, and is tracked by HR. User access to each
-platform is subject to periodic review and testing. When an employee changes roles, HR updates the employee’s access to all systems.
-Netdata uses on-boarding and off-boarding processes to regulate access by Netdata Personnel.
+Netdata Cloud is built with an unwavering commitment to user privacy and data protection. We understand that our users'
+data is both sensitive and valuable, and we have implemented stringent measures to ensure its safety.
-Second-layer authentication is employed where available, by way of multi-factor authentication.
+### Data Collection
-Netdata’s IT control environment is based upon industry-accepted concepts, such as multiple layers of preventive and detective
-controls, working in concert to provide for the overall protection of Netdata’s computing environment and data assets.
+Netdata Cloud collects minimal personal information from its users. The only personal data required to create an account
+and use the service is an email address. This email address is used for product and marketing communications.
+Additionally, the IP address used to access Netdata Cloud is stored in web proxy access logs.
-### Systems security
+### Data Usage
+
+The collected email addresses are stored in our production database on Amazon Web Services (AWS) and copied to Google
+BigQuery, our data lake, for analytics purposes. These analytics are crucial for our product development process. If a
+user accepts the use of analytical cookies, their email address and IP are stored in the systems we use to track
+application usage (Google Analytics, Posthog, and Gainsight PX). Subscriptions and Payments data are handled by Stripe.
+
+### Data Sharing
+
+Netdata Cloud does not share any personal data with third parties, ensuring the privacy of our users' data, but Netdata
+Cloud does use third parties for its services, including, but not limited to, Google Cloud and Amazon Web Services for
+its infrastructure, Stripe for payment processing, Google Analytics, Posthog and Gainsight PX for analytics.
+
+### Data Protection
+
+We use state-of-the-art security measures to protect user data from unauthorized access, use, or disclosure. All
+infrastructure data visible on Netdata Cloud passes through the Agent-Cloud Link (ACLK) mechanism, which securely
+connects a Netdata Agent to Netdata Cloud. The ACLK is encrypted, safe, and is only established if the user connects
+their node. All data in transit between a user and Netdata Cloud is encrypted using TLS.
+
+### User Control over Data
+
+Netdata provides its users with the ability to access, retrieve, correct, and delete their personal data stored in
+Netdata Cloud. This ability may occasionally be limited due to temporary service outages for maintenance or other
+updates to Netdata Cloud, or when it is technically not feasible. If a customer is unable to delete personal data via
+the self-services functionality, Netdata deletes the data upon the customer's written request, within the timeframe
+specified in the Data Protection Agreement (DPA), and in accordance with applicable data protection laws.
+
+### Compliance with Data Protection Laws
+
+Netdata Cloud is fully compliant with data protection laws like the General Data Protection Regulation (GDPR) and the
+California Consumer Privacy Act (CCPA).
+
+### Data Transfer
+
+Data transfer within Netdata Cloud is secure and respects the privacy of the user data. The Netdata Agent establishes an
+outgoing secure WebSocket (WSS) connection to Netdata Cloud, ensuring that the data is encrypted when in transit.
+
+### Use of Tracking Technologies
+
+Netdata Cloud uses analytical cookies if a user consents to their use. These cookies are used to track the usage of the
+application and are stored in systems like Google Analytics, Posthog and Gainsight PX.
+
+### Data Breach Notification Process
+
+In the event of a data breach, Netdata has a well-defined process in place for notifying users. The details of this
+process align with the standard procedures and timelines defined in the Data Protection Agreement (DPA).
+
+We continually review and update our privacy and data protection practices to ensure the highest level of data safety
+and privacy for our users.
+
+## Compliance with Regulations
+
+Netdata is committed to ensuring the security, privacy, and integrity of user data. It complies with both the General
+Data Protection Regulation (GDPR), a regulation in EU law on data protection and privacy, and the California Consumer
+Privacy Act (CCPA), a state statute intended to enhance privacy rights and consumer protection for residents of
+California.
+
+### Compliance with GDPR and CCPA
+
+Compliance with GDPR and CCPA are self-assessment processes, and Netdata has undertaken thorough internal audits and
+controls to ensure it meets all requirements.
+
+As per request basis, any customer may enter with Netdata into a data processing addendum (DPA) governing customer’s
+ability to load and permit Netdata to process any personal data or information regulated under applicable data
+protection laws, including the GDPR and CCPA.
+
+### Data Transfers
+
+While Netdata Agent itself does not engage in any cross-border data transfers, certain personal and infrastructure data
+is transferred to Netdata Cloud for the purpose of providing its services. The metric data collected and processed by
+Netdata Agents, however, stays strictly within the user's infrastructure, eliminating any concerns about cross-border
+data transfer issues.
+
+When users utilize Netdata Cloud, the metric data is streamed directly from the Netdata Agent to the users’ web browsers
+via Netdata Cloud, without being stored on Netdata Cloud's servers. However, user identification data (such as email
+addresses) and infrastructure metadata necessary for Netdata Cloud's operation are stored in data centers in the United
+States, using compliant infrastructure providers such as Google Cloud and Amazon Web Services. These transfers and
+storage are carried out in full compliance with applicable data protection laws, including GDPR and CCPA.
+
+### Privacy Rights
+
+Netdata ensures user privacy rights as mandated by the GDPR and CCPA. This includes the right to access, correct, and
+delete personal data. These functions are all available online via the Netdata Cloud User Interface (UI). In case a user
+wants to remove all personal information (email and activities), they can delete their cloud account by logging
+into https://app.netdata.cloud and accessing their profile, at the bottom left of the screen.
+
+### Regular Review and Updates
+
+Netdata is dedicated to keeping its practices up-to-date with the latest developments in data protection regulations.
+Therefore, as soon as updates or changes are made to these regulations, Netdata reviews and updates its policies and
+practices accordingly to ensure continual compliance.
+
+While Netdata is confident in its compliance with GDPR and CCPA, users are encouraged to review Netdata's privacy policy
+and reach out with any questions or concerns they may have about data protection and privacy.
+
+## Anonymous Statistics
+
+The anonymous statistics collected by the Netdata Agent are related to the installations and not to individual users.
+This data includes community size, types of plugins used, possible crashes, operating systems installed, and the use of
+the registry feature. No IP addresses are collected, but each Netdata installation has a unique ID.
+
+Netdata also collects anonymous telemetry events, which provide information on the usage of various features, errors,
+and performance metrics. This data is used to understand how the software is being used and to identify areas for
+improvement.
+
+The purpose of collecting these statistics and telemetry data is to guide the development of the open-source agent,
+focusing on areas that are most beneficial to users.
+
+Users have the option to opt out of this data collection during the installation of the agent, or at any time by
+removing a specific file from their system.
+
+Netdata retains this data indefinitely in order to track changes and trends within the community over time.
+
+Netdata does not share these anonymous statistics or telemetry data with any third parties.
+
+By collecting this data, Netdata is able to continuously improve their service and identify any issues or areas for
+improvement, while respecting user privacy and maintaining transparency.
+
+## Internal Security Measures
+
+Internal Security Measures at Netdata are designed with an emphasis on data privacy and protection. The measures
+include:
+
+1. **Infrastructure as Code (IaC)** :
+ Netdata Cloud follows the IaC model, which means it is a microservices environment that is completely isolated. All
+ changes are managed through Terraform, an open-source IaC software tool that provides a consistent CLI workflow for
+ managing cloud services.
+2. **TLS Termination and IAM Service** :
+ At the edge of Netdata Cloud, there is a TLS termination, which provides the decryption point for incoming TLS
+ connections. Additionally, an Identity Access Management (IAM) service validates JWT tokens included in request
+ cookies or denies access to them.
+3. **Session Identification** :
+ Once inside the microservices environment, all requests are associated with session IDs that identify the user making
+ the request. This approach provides additional layers of security and traceability.
+4. **Data Storage** :
+ Data is stored in various NoSQL and SQL databases and message brokers. The entire environment is fully isolated,
+ providing a secure space for data management.
+5. **Authentication** :
+ Netdata Cloud does not store credentials. It offers three types of authentication: GitHub Single Sign-On (SSO),
+ Google SSO, and email validation.
+6. **DDoS Protection** :
+ Netdata Cloud has multiple protection mechanisms against Distributed Denial of Service (DDoS) attacks, including
+ rate-limiting and automated blacklisting.
+7. **Security-Focused Development Process** :
+ To ensure a secure environment, Netdata employs a security-focused development process. This includes the use of
+ static code analysers to identify potential security vulnerabilities in the codebase.
+8. **High Security Standards** :
+ Netdata Cloud maintains high security standards and can provide additional customization on a per contract basis.
+9. **Employee Security Practices** :
+ Netdata ensures its employees follow security best practices, including role-based access, periodic access review,
+ and multi-factor authentication. This helps to minimize the risk of unauthorized access to sensitive data.
+10. **Experienced Developers** :
+ Netdata hires senior developers with vast experience in security-related matters. It enforces two code reviews for
+ every Pull Request (PR), ensuring that any potential issues are identified and addressed promptly.
+11. **DevOps Methodologies** :
+ Netdata's DevOps methodologies use the highest standards in access control in all places, utilizing the best
+ practices available.
+12. **Risk-Based Security Program** :
+ Netdata has a risk-based security program that continually assesses and mitigates risks associated with data
+ security. This program helps maintain a secure environment for user data.
+
+These security measures ensure that Netdata Cloud is a secure environment for users to monitor and troubleshoot their
+systems. The company remains committed to continuously improving its security practices to safeguard user data
+effectively.
+
+## PCI DSS
+
+PCI DSS (Payment Card Industry Data Security Standard) is a set of security standards designed to ensure that all
+companies that accept, process, store or transmit credit card information maintain a secure environment.
+
+Netdata is committed to providing secure and privacy-respecting services, and it aligns its practices with many of the
+key principles of the PCI DSS. However, it's important to clarify that Netdata is not officially certified as PCI
+DSS-compliant. While Netdata follows practices that align with PCI DSS's key principles, the company itself has not
+undergone the formal certification process for PCI DSS compliance.
+
+PCI DSS compliance is not just about the technical controls but also involves a range of administrative and procedural
+safeguards that go beyond the scope of Netdata's services. These include, among other things, maintaining a secure
+network, implementing strong access control measures, regularly monitoring and testing networks, and maintaining an
+information security policy.
+
+Therefore, while Netdata can support entities with their data security needs in relation to PCI DSS, it is ultimately
+the responsibility of the entity to ensure full PCI DSS compliance across all of their operations. Entities should
+always consult with a legal expert or a PCI DSS compliance consultant to ensure that their use of any product, including
+Netdata, aligns with PCI DSS regulations.
+
+## HIPAA
+
+HIPAA stands for the Health Insurance Portability and Accountability Act, which is a United States federal law enacted
+in 1996. HIPAA is primarily focused on protecting the privacy and security of individuals' health information.
+
+Netdata is committed to providing secure and privacy-respecting services, and it aligns its practices with many key
+principles of HIPAA. However, it's important to clarify that Netdata is not officially certified as HIPAA-compliant.
+While Netdata follows practices that align with HIPAA's key principles, the company itself has not undergone the formal
+certification process for HIPAA compliance.
+
+HIPAA compliance is not just about technical controls but also involves a range of administrative and procedural
+safeguards that go beyond the scope of Netdata's services. These include, among other things, employee training,
+physical security, and contingency planning.
+
+Therefore, while Netdata can support HIPAA-regulated entities with their data security needs and is prepared to sign a
+Business Associate Agreement (BAA), it is ultimately the responsibility of the healthcare entity to ensure full HIPAA
+compliance across all of their operations. Entities should always consult with a legal expert or a HIPAA compliance
+consultant to ensure that their use of any product, including Netdata, aligns with HIPAA regulations.
+
+## Conclusion
+
+In conclusion, Netdata Cloud's commitment to data security and user privacy is paramount. From the careful design of the
+infrastructure and stringent internal security measures to compliance with international regulations and standards like
+GDPR and CCPA, Netdata Cloud ensures a secure environment for users to monitor and troubleshoot their systems.
+
+The use of advanced encryption techniques, role-based access control, and robust authentication methods further
+strengthen the security of user data. Netdata Cloud also maintains transparency in its data handling practices, giving
+users control over their data and the ability to easily access, retrieve, correct, and delete their personal data.
+
+Netdata's approach to anonymous statistics collection respects user privacy while enabling the company to improve its
+product based on real-world usage data. Even in such cases, users have the choice to opt-out, underlining Netdata's
+respect for user autonomy.
+
+In summary, Netdata Cloud offers a highly secure, user-centric environment for system monitoring and troubleshooting.
+The company's emphasis on continuous security improvement and commitment to user privacy make it a trusted choice in the
+data monitoring landscape.
-Netdata maintains a risk-based assessment security program. The framework for Netdata’s security program includes administrative,
-organizational, technical, and physical safeguards reasonably designed to protect the services and confidentiality, integrity,
-and availability of user data. The program is intended to be appropriate to the nature of the services and the size and complexity
-of Netdata’s business operations.
diff --git a/exporting/README.md b/exporting/README.md
index c6ce32b65..013f86f32 100644
--- a/exporting/README.md
+++ b/exporting/README.md
@@ -284,7 +284,7 @@ Configure individual connectors and override any global settings with the follow
and names are human friendly labels (also unique). Most charts and metrics have the same ID and name, but in several
cases they are different: disks with device-mapper, interrupts, QoS classes, statsd synthetic charts, etc.
-- `send configured labels = yes | no` controls if labels defined in the `[host labels]` section in `netdata.conf`
+- `send configured labels = yes | no` controls if host labels defined in the `[host labels]` section in `netdata.conf`
should be sent to the external database
- `send automatic labels = yes | no` controls if automatically created labels, like `_os_name` or `_architecture`
diff --git a/exporting/WALKTHROUGH.md b/exporting/WALKTHROUGH.md
index 49cf6587b..86be758e4 100644
--- a/exporting/WALKTHROUGH.md
+++ b/exporting/WALKTHROUGH.md
@@ -74,10 +74,10 @@ this is your first time using Netdata I suggest you take a look around. The amou
Next I want to draw your attention to a particular endpoint. Navigate to
<http://localhost:19999/api/v1/allmetrics?format=prometheus&help=yes> In your browser. This is the endpoint which
publishes all the metrics in a format which Prometheus understands. Let's take a look at one of these metrics.
-`netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0831255 1501271696000` This
-metric is representing several things which I will go in more details in the section on Prometheus. For now understand
-that this metric: `netdata_system_cpu_percentage_average` has several labels: (`chart`, `family`, `dimension`). This
-corresponds with the first cpu chart you see on the Netdata dashboard.
+`netdata_disk_space_GiB_average{chart="disk_space._run",dimension="avail",family="/run",mount_point="/run",filesystem="tmpfs",mount_root="/"} 0.0298195 1684951093000`
+This metric is representing several things which I will go in more details in the section on Prometheus. For now understand
+that this metric: `netdata_disk_space_GiB_average` has several labels: (`chart`, `family`, `dimension`, `mountt_point`, `filesystem`, `mount_root`).
+This corresponds with disk space you see on the Netdata dashboard.
![](https://github.com/ldelossa/NetdataTutorial/raw/master/Screen%20Shot%202017-07-28%20at%204.00.45%20PM.png)
@@ -138,12 +138,13 @@ As explained we have two key elements in Prometheus metrics. We have the _metric
granularity between metrics. Let's use our previous example to further explain.
```conf
-netdata_system_cpu_percentage_average{chart="system.cpu",family="cpu",dimension="system"} 0.0831255 1501271696000
+netdata_disk_space_GiB_average{chart="disk_space._run",dimension="avail",family="/run",mount_point="/run",filesystem="tmpfs",mount_root="/"} 0.0298195 1684951093000
```
-Here our metric is `netdata_system_cpu_percentage_average` and our labels are `chart`, `family`, and `dimension`. The
-last two values constitute the actual metric value for the metric type (gauge, counter, etc…). We can begin graphing
-system metrics with this information, but first we need to hook up Prometheus to poll Netdata stats.
+Here our metric is `netdata_disk_space_GiB_average` and our common labels are `chart`, `family`, and `dimension`. The
+last two values constitute the actual metric value for the metric type (gauge, counter, etc…). We also have specific
+label for this chart named `mount_point`,`filesystem`, and `mount_root`. We can begin graphing system metrics with this information,
+but first we need to hook up Prometheus to poll Netdata stats.
Let's move our attention to Prometheus's configuration. Prometheus gets it config from the file located (in our example)
at `/opt/prometheus/prometheus.yml`. I won't spend an extensive amount of time going over the configuration values
diff --git a/exporting/clean_connectors.c b/exporting/clean_connectors.c
index e93563741..ab1fb5dd7 100644
--- a/exporting/clean_connectors.c
+++ b/exporting/clean_connectors.c
@@ -68,8 +68,7 @@ void simple_connector_cleanup(struct instance *instance)
}
#ifdef ENABLE_HTTPS
- if (simple_connector_data->conn)
- SSL_free(simple_connector_data->conn);
+ netdata_ssl_close(&simple_connector_data->ssl);
#endif
freez(simple_connector_data);
@@ -80,6 +79,4 @@ void simple_connector_cleanup(struct instance *instance)
info("EXPORTING: instance %s exited", instance->config.name);
instance->exited = 1;
-
- return;
}
diff --git a/exporting/exporting_engine.c b/exporting/exporting_engine.c
index 2ad8cdd96..8f957c7c5 100644
--- a/exporting/exporting_engine.c
+++ b/exporting/exporting_engine.c
@@ -10,7 +10,7 @@ void analytics_exporting_connectors_ssl(BUFFER *b)
if (netdata_ssl_exporting_ctx) {
for (struct instance *instance = engine->instance_root; instance; instance = instance->next) {
struct simple_connector_data *connector_specific_data = instance->connector_specific_data;
- if (connector_specific_data->flags == NETDATA_SSL_HANDSHAKE_COMPLETE) {
+ if (SSL_connection(&connector_specific_data->ssl)) {
buffer_strcat(b, "exporting");
break;
}
diff --git a/exporting/exporting_engine.h b/exporting/exporting_engine.h
index 5f961c303..c04bbeec3 100644
--- a/exporting/exporting_engine.h
+++ b/exporting/exporting_engine.h
@@ -126,8 +126,7 @@ struct simple_connector_data {
struct simple_connector_buffer *last_buffer;
#ifdef ENABLE_HTTPS
- SSL *conn; //SSL connection
- int flags; //The flags for SSL connection
+ NETDATA_SSL ssl;
#endif
};
diff --git a/exporting/graphite/graphite.c b/exporting/graphite/graphite.c
index f1964f3e5..3aff24926 100644
--- a/exporting/graphite/graphite.c
+++ b/exporting/graphite/graphite.c
@@ -20,10 +20,9 @@ int init_graphite_instance(struct instance *instance)
instance->connector_specific_data = connector_specific_data;
#ifdef ENABLE_HTTPS
- connector_specific_data->flags = NETDATA_SSL_START;
- connector_specific_data->conn = NULL;
+ connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION;
if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
- security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX);
}
#endif
diff --git a/exporting/json/json.c b/exporting/json/json.c
index 4cafd4c04..edbb98ef6 100644
--- a/exporting/json/json.c
+++ b/exporting/json/json.c
@@ -71,10 +71,9 @@ int init_json_http_instance(struct instance *instance)
instance->connector_specific_data = connector_specific_data;
#ifdef ENABLE_HTTPS
- connector_specific_data->flags = NETDATA_SSL_START;
- connector_specific_data->conn = NULL;
+ connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION;
if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
- security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX);
}
#endif
diff --git a/exporting/opentsdb/opentsdb.c b/exporting/opentsdb/opentsdb.c
index fc01ae461..0248469fa 100644
--- a/exporting/opentsdb/opentsdb.c
+++ b/exporting/opentsdb/opentsdb.c
@@ -21,10 +21,9 @@ int init_opentsdb_telnet_instance(struct instance *instance)
instance->connector_specific_data = connector_specific_data;
#ifdef ENABLE_HTTPS
- connector_specific_data->flags = NETDATA_SSL_START;
- connector_specific_data->conn = NULL;
+ connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION;
if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
- security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX);
}
#endif
@@ -77,10 +76,9 @@ int init_opentsdb_http_instance(struct instance *instance)
struct simple_connector_data *connector_specific_data = callocz(1, sizeof(struct simple_connector_data));
#ifdef ENABLE_HTTPS
- connector_specific_data->flags = NETDATA_SSL_START;
- connector_specific_data->conn = NULL;
+ connector_specific_data->ssl = NETDATA_SSL_UNSET_CONNECTION;
if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
- security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX);
}
#endif
instance->connector_specific_data = connector_specific_data;
diff --git a/exporting/prometheus/prometheus.c b/exporting/prometheus/prometheus.c
index 24bd215f4..0e0e8abf0 100644
--- a/exporting/prometheus/prometheus.c
+++ b/exporting/prometheus/prometheus.c
@@ -326,6 +326,53 @@ void format_host_labels_prometheus(struct instance *instance, RRDHOST *host)
rrdlabels_walkthrough_read(host->rrdlabels, format_prometheus_label_callback, &tmp);
}
+/**
+ * Format host labels for the Prometheus exporter
+ * We are using a structure instead a direct buffer to expand options quickly.
+ *
+ * @param labels_buffer is the buffer used to add labels.
+ */
+
+struct format_prometheus_chart_label_callback {
+ BUFFER *labels_buffer;
+};
+
+static int format_prometheus_chart_label_callback(const char *name, const char *value, RRDLABEL_SRC ls, void *data) {
+ struct format_prometheus_chart_label_callback *d = (struct format_prometheus_chart_label_callback *)data;
+
+ (void)ls;
+
+ if (name[0] == '_' )
+ return 1;
+
+ char k[PROMETHEUS_ELEMENT_MAX + 1];
+ char v[PROMETHEUS_ELEMENT_MAX + 1];
+
+ prometheus_name_copy(k, name, PROMETHEUS_ELEMENT_MAX);
+ prometheus_label_copy(v, value, PROMETHEUS_ELEMENT_MAX);
+
+ if (*k && *v) {
+ buffer_sprintf(d->labels_buffer, ",%s=\"%s\"", k, v);
+ }
+ return 1;
+}
+
+void format_chart_labels_prometheus(struct format_prometheus_chart_label_callback *plabel,
+ const char *chart,
+ const char *family,
+ const char *dim,
+ RRDSET *st)
+{
+ if (likely(plabel->labels_buffer))
+ buffer_reset(plabel->labels_buffer);
+ else {
+ plabel->labels_buffer = buffer_create(1024, NULL);
+ }
+ buffer_sprintf(plabel->labels_buffer, "chart=\"%s\",dimension=\"%s\",family=\"%s\"", chart, dim, family);
+
+ rrdlabels_walkthrough_read(st->rrdlabels, format_prometheus_chart_label_callback, plabel);
+}
+
struct host_variables_callback_options {
RRDHOST *host;
BUFFER *wb;
@@ -462,19 +509,31 @@ static void generate_as_collected_prom_help(BUFFER *wb, struct gen_parameters *p
* @param p parameters for generating the metric string.
* @param homogeneous a flag for homogeneous charts.
* @param prometheus_collector a flag for metrics from prometheus collector.
+ * @param chart_labels the dictionary with chart labels
*/
-static void generate_as_collected_prom_metric(BUFFER *wb, struct gen_parameters *p, int homogeneous, int prometheus_collector)
+static void generate_as_collected_prom_metric(BUFFER *wb,
+ struct gen_parameters *p,
+ int homogeneous,
+ int prometheus_collector,
+ DICTIONARY *chart_labels)
{
+ struct format_prometheus_chart_label_callback local_label;
+ local_label.labels_buffer = wb;
+
buffer_sprintf(wb, "%s_%s", p->prefix, p->context);
if (!homogeneous)
buffer_sprintf(wb, "_%s", p->dimension);
- buffer_sprintf(wb, "%s{chart=\"%s\",family=\"%s\"", p->suffix, p->chart, p->family);
+ buffer_sprintf(wb, "%s{chart=\"%s\"", p->suffix, p->chart);
if (homogeneous)
buffer_sprintf(wb, ",dimension=\"%s\"", p->dimension);
+ buffer_sprintf(wb, ",family=\"%s\"", p->family);
+
+ rrdlabels_walkthrough_read(chart_labels, format_prometheus_chart_label_callback, &local_label);
+
buffer_sprintf(wb, "%s} ", p->labels);
if (prometheus_collector)
@@ -564,6 +623,10 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
// for each chart
RRDSET *st;
+
+ static struct format_prometheus_chart_label_callback plabels = {
+ .labels_buffer = NULL,
+ };
rrdset_foreach_read(st, host) {
if (likely(can_send_rrdset(instance, st, filter))) {
@@ -655,7 +718,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
if (unlikely(output_options & PROMETHEUS_OUTPUT_TYPES))
buffer_sprintf(wb, "# TYPE %s_%s%s %s\n", prefix, context, suffix, p.type);
- generate_as_collected_prom_metric(wb, &p, homogeneous, prometheus_collector);
+ generate_as_collected_prom_metric(wb, &p, homogeneous, prometheus_collector, st->rrdlabels);
}
else {
// the dimensions of the chart, do not have the same algorithm, multiplier or divisor
@@ -673,7 +736,7 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
buffer_sprintf(
wb, "# TYPE %s_%s_%s%s %s\n", prefix, context, dimension, suffix, p.type);
- generate_as_collected_prom_metric(wb, &p, homogeneous, prometheus_collector);
+ generate_as_collected_prom_metric(wb, &p, homogeneous, prometheus_collector, st->rrdlabels);
}
}
else {
@@ -694,6 +757,8 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
(output_options & PROMETHEUS_OUTPUT_NAMES && rd->name) ? rrddim_name(rd) : rrddim_id(rd),
PROMETHEUS_ELEMENT_MAX);
+ format_chart_labels_prometheus(&plabels, chart, family, dimension, st);
+
if (unlikely(output_options & PROMETHEUS_OUTPUT_HELP))
buffer_sprintf(
wb,
@@ -713,30 +778,26 @@ static void rrd_stats_api_v1_charts_allmetrics_prometheus(
if (output_options & PROMETHEUS_OUTPUT_TIMESTAMPS)
buffer_sprintf(
wb,
- "%s_%s%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " NETDATA_DOUBLE_FORMAT
+ "%s_%s%s%s{%s%s} " NETDATA_DOUBLE_FORMAT
" %llu\n",
prefix,
context,
units,
suffix,
- chart,
- family,
- dimension,
+ buffer_tostring(plabels.labels_buffer),
labels,
value,
last_time * MSEC_PER_SEC);
else
buffer_sprintf(
wb,
- "%s_%s%s%s{chart=\"%s\",family=\"%s\",dimension=\"%s\"%s} " NETDATA_DOUBLE_FORMAT
+ "%s_%s%s%s{%s%s} " NETDATA_DOUBLE_FORMAT
"\n",
prefix,
context,
units,
suffix,
- chart,
- family,
- dimension,
+ buffer_tostring(plabels.labels_buffer),
labels,
value);
}
diff --git a/exporting/prometheus/remote_write/remote_write.c b/exporting/prometheus/remote_write/remote_write.c
index 1857ca333..660b798e4 100644
--- a/exporting/prometheus/remote_write/remote_write.c
+++ b/exporting/prometheus/remote_write/remote_write.c
@@ -115,10 +115,9 @@ int init_prometheus_remote_write_instance(struct instance *instance)
instance->connector_specific_data = simple_connector_data;
#ifdef ENABLE_HTTPS
- simple_connector_data->flags = NETDATA_SSL_START;
- simple_connector_data->conn = NULL;
+ simple_connector_data->ssl = NETDATA_SSL_UNSET_CONNECTION;
if (instance->config.options & EXPORTING_OPTION_USE_TLS) {
- security_start_ssl(NETDATA_SSL_CONTEXT_EXPORTING);
+ netdata_ssl_initialize_ctx(NETDATA_SSL_EXPORTING_CTX);
}
#endif
diff --git a/exporting/prometheus/remote_write/remote_write_request.cc b/exporting/prometheus/remote_write/remote_write_request.cc
index ecfa11fa8..a628082d1 100644
--- a/exporting/prometheus/remote_write/remote_write_request.cc
+++ b/exporting/prometheus/remote_write/remote_write_request.cc
@@ -45,16 +45,16 @@ void add_host_info(
label->set_name("__name__");
label->set_value(name);
- label = timeseries->add_labels();
- label->set_name("instance");
- label->set_value(instance);
-
if (application) {
label = timeseries->add_labels();
label->set_name("application");
label->set_value(application);
}
+ label = timeseries->add_labels();
+ label->set_name("instance");
+ label->set_value(instance);
+
if (version) {
label = timeseries->add_labels();
label->set_name("version");
@@ -118,10 +118,6 @@ void add_metric(
label->set_name("chart");
label->set_value(chart);
- label = timeseries->add_labels();
- label->set_name("family");
- label->set_value(family);
-
if (dimension) {
label = timeseries->add_labels();
label->set_name("dimension");
@@ -129,6 +125,10 @@ void add_metric(
}
label = timeseries->add_labels();
+ label->set_name("family");
+ label->set_value(family);
+
+ label = timeseries->add_labels();
label->set_name("instance");
label->set_value(instance);
diff --git a/exporting/send_data.c b/exporting/send_data.c
index 045aab6ed..d91fc50d7 100644
--- a/exporting/send_data.c
+++ b/exporting/send_data.c
@@ -81,37 +81,11 @@ void simple_connector_receive_response(int *sock, struct instance *instance)
while (*sock != -1 && errno != EWOULDBLOCK) {
ssize_t r;
#ifdef ENABLE_HTTPS
- if (exporting_tls_is_enabled(instance->config.type, options) &&
- connector_specific_data->conn &&
- connector_specific_data->flags == NETDATA_SSL_HANDSHAKE_COMPLETE) {
- r = (ssize_t)SSL_read(connector_specific_data->conn,
- &response->buffer[response->len],
- (int) (response->size - response->len));
-
- if (likely(r > 0)) {
- // we received some data
- response->len += r;
- stats->received_bytes += r;
- stats->receptions++;
- continue;
- } else {
- int sslerrno = SSL_get_error(connector_specific_data->conn, (int) r);
- u_long sslerr = ERR_get_error();
- char buf[256];
- switch (sslerrno) {
- case SSL_ERROR_WANT_READ:
- case SSL_ERROR_WANT_WRITE:
- goto endloop;
- default:
- ERR_error_string_n(sslerr, buf, sizeof(buf));
- error("SSL error (%s)",
- ERR_error_string((long)SSL_get_error(connector_specific_data->conn, (int)r), NULL));
- goto endloop;
- }
- }
- } else {
+ if (SSL_connection(&connector_specific_data->ssl))
+ r = netdata_ssl_read(&connector_specific_data->ssl, &response->buffer[response->len],
+ (int) (response->size - response->len));
+ else
r = recv(*sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT);
- }
#else
r = recv(*sock, &response->buffer[response->len], response->size - response->len, MSG_DONTWAIT);
#endif
@@ -120,11 +94,13 @@ void simple_connector_receive_response(int *sock, struct instance *instance)
response->len += r;
stats->received_bytes += r;
stats->receptions++;
- } else if (r == 0) {
+ }
+ else if (r == 0) {
error("EXPORTING: '%s' closed the socket", instance->config.destination);
close(*sock);
*sock = -1;
- } else {
+ }
+ else {
// failed to receive data
if (errno != EAGAIN && errno != EWOULDBLOCK) {
error("EXPORTING: cannot receive data from '%s'.", instance->config.destination);
@@ -135,9 +111,6 @@ void simple_connector_receive_response(int *sock, struct instance *instance)
break;
#endif
}
-#ifdef ENABLE_HTTPS
-endloop:
-#endif
// if we received data, process them
if (buffer_strlen(response))
@@ -174,14 +147,16 @@ void simple_connector_send_buffer(
size_t buffer_len = buffer_strlen(buffer);
#ifdef ENABLE_HTTPS
- if (exporting_tls_is_enabled(instance->config.type, options) &&
- connector_specific_data->conn &&
- connector_specific_data->flags == NETDATA_SSL_HANDSHAKE_COMPLETE) {
+ if (SSL_connection(&connector_specific_data->ssl)) {
+
if (header_len)
- header_sent_bytes = (ssize_t)SSL_write(connector_specific_data->conn, buffer_tostring(header), header_len);
+ header_sent_bytes = netdata_ssl_write(&connector_specific_data->ssl, buffer_tostring(header), header_len);
+
if ((size_t)header_sent_bytes == header_len)
- buffer_sent_bytes = (ssize_t)SSL_write(connector_specific_data->conn, buffer_tostring(buffer), buffer_len);
- } else {
+ buffer_sent_bytes = netdata_ssl_write(&connector_specific_data->ssl, buffer_tostring(buffer), buffer_len);
+
+ }
+ else {
if (header_len)
header_sent_bytes = send(*sock, buffer_tostring(header), header_len, flags);
if ((size_t)header_sent_bytes == header_len)
@@ -326,43 +301,19 @@ void simple_connector_worker(void *instance_p)
if (sock_delnonblock(sock) < 0)
error("Exporting cannot remove the non-blocking flag from socket %d", sock);
- if (connector_specific_data->conn == NULL) {
- connector_specific_data->conn = SSL_new(netdata_ssl_exporting_ctx);
- if (connector_specific_data->conn == NULL) {
- error("Failed to allocate SSL structure to socket %d.", sock);
- connector_specific_data->flags = NETDATA_SSL_NO_HANDSHAKE;
- }
- } else {
- SSL_clear(connector_specific_data->conn);
- }
+ if(netdata_ssl_open(&connector_specific_data->ssl, netdata_ssl_exporting_ctx, sock)) {
+ if(netdata_ssl_connect(&connector_specific_data->ssl)) {
+ info("Exporting established a SSL connection.");
+
+ struct timeval tv;
+ tv.tv_sec = timeout.tv_sec / 4;
+ tv.tv_usec = 0;
+
+ if (!tv.tv_sec)
+ tv.tv_sec = 2;
- if (connector_specific_data->conn) {
- if (SSL_set_fd(connector_specific_data->conn, sock) != 1) {
- error("Failed to set the socket to the SSL on socket fd %d.", sock);
- connector_specific_data->flags = NETDATA_SSL_NO_HANDSHAKE;
- } else {
- connector_specific_data->flags = NETDATA_SSL_HANDSHAKE_COMPLETE;
- SSL_set_connect_state(connector_specific_data->conn);
- int err = SSL_connect(connector_specific_data->conn);
- if (err != 1) {
- err = SSL_get_error(connector_specific_data->conn, err);
- error(
- "SSL cannot connect with the server: %s ",
- ERR_error_string((long)SSL_get_error(connector_specific_data->conn, err), NULL));
- connector_specific_data->flags = NETDATA_SSL_NO_HANDSHAKE;
- } else {
- info("Exporting established a SSL connection.");
-
- struct timeval tv;
- tv.tv_sec = timeout.tv_sec / 4;
- tv.tv_usec = 0;
-
- if (!tv.tv_sec)
- tv.tv_sec = 2;
-
- if (setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (const char *)&tv, sizeof(tv)))
- error("Cannot set timeout to socket %d, this can block communication", sock);
- }
+ if (setsockopt(sock, SOL_SOCKET, SO_RCVTIMEO, (const char *)&tv, sizeof(tv)))
+ error("Cannot set timeout to socket %d, this can block communication", sock);
}
}
}
diff --git a/health/Makefile.am b/health/Makefile.am
index ea1b6e961..0ef55c75e 100644
--- a/health/Makefile.am
+++ b/health/Makefile.am
@@ -69,6 +69,7 @@ dist_healthconfig_DATA = \
health.d/nvme.conf \
health.d/nut.conf \
health.d/pihole.conf \
+ health.d/plugin.conf \
health.d/ping.conf \
health.d/postgres.conf \
health.d/portcheck.conf \
diff --git a/health/REFERENCE.md b/health/REFERENCE.md
index b95dc852e..a36edd8cf 100644
--- a/health/REFERENCE.md
+++ b/health/REFERENCE.md
@@ -241,7 +241,8 @@ Netdata parses the following lines. Beneath the table is an in-depth explanation
| [`delay`](#alarm-line-delay) | no | Optional hysteresis settings to prevent floods of notifications. |
| [`repeat`](#alarm-line-repeat) | no | The interval for sending notifications when an alarm is in WARNING or CRITICAL mode. |
| [`options`](#alarm-line-options) | no | Add an option to not clear alarms. |
-| [`host labels`](#alarm-line-host-labels) | no | List of labels present on a host. |
+| [`host labels`](#alarm-line-host-labels) | no | Restrict an alarm or template to a list of matching labels present on a host. |
+| [`chart labels`](#alarm-line-chart-labels) | no | Restrict an alarm or template to a list of matching labels present on a host. |
| [`info`](#alarm-line-info) | no | A brief description of the alarm. |
The `alarm` or `template` line must be the first line of any entity.
@@ -446,6 +447,9 @@ For example, you can create a template on the `disk.io` context, but filter it t
families: sda sdb
```
+Please note that the use of the `families` filter is planned to be deprecated in upcoming Netdata releases.
+Please use [`chart labels`](#alarm-line-chart-labels) instead.
+
#### Alarm line `lookup`
This line makes a database lookup to find a value. This result of this lookup is available as `$this`.
@@ -696,6 +700,35 @@ host labels: installed = 201*
See our [simple patterns docs](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) for more examples.
+#### Alarm line `chart labels`
+
+Similar to host labels, the `chart labels` key can be used to filter if an alarm will load or not for a specific chart, based on
+whether these chart labels match or not.
+
+The list of chart labels present on each chart can be obtained from http://localhost:19999/api/v1/charts?all
+
+For example, each `disk_space` chart defines a chart label called `mount_point` with each instance of this chart having
+a value there of which mount point it monitors.
+
+If you have an e.g. external disk mounted on `/mnt/disk1` and you don't wish any related disk space alerts running for
+it (but you do for all other mount points), you can add the following to the alert's configuration:
+
+```yaml
+chart labels: mount_point=!/mnt/disk1 *`
+```
+
+The `chart labels` is a space-separated list that accepts simple patterns. If you use multiple different chart labels,
+then the result is an OR between them. i.e. the following:
+
+```yaml
+chart labels: mount_point=/mnt/disk1 device=sda`
+```
+
+Will create the alert if the `mount_point` is `/mnt/disk1` or the `device` is `sda`. Furthermore, if a chart label name
+is specified that does not exist in the chart, the chart won't be matched.
+
+See our [simple patterns docs](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md) for more examples.
+
#### Alarm line `info`
The info field can contain a small piece of text describing the alarm or template. This will be rendered in
diff --git a/health/health.c b/health/health.c
index 5c2b85bc5..df4798a20 100644
--- a/health/health.c
+++ b/health/health.c
@@ -412,17 +412,13 @@ static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) {
// find the previous notification for the same alarm
// which we have run the exec script
// exception: alarms with HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION set
+ RRDCALC_STATUS last_executed_status = -3;
if(likely(!(ae->flags & HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION))) {
- uint32_t id = ae->alarm_id;
- ALARM_ENTRY *t;
- for(t = ae->next; t ; t = t->next) {
- if(t->alarm_id == id && t->flags & HEALTH_ENTRY_FLAG_EXEC_RUN)
- break;
- }
+ int ret = sql_health_get_last_executed_event(host, ae, &last_executed_status);
- if(likely(t)) {
+ if (likely(ret == 1)) {
// we have executed this alarm notification in the past
- if(t && t->new_status == ae->new_status) {
+ if(last_executed_status == ae->new_status) {
// don't send the notification for the same status again
debug(D_HEALTH, "Health not sending again notification for alarm '%s.%s' status %s", ae_chart_name(ae), ae_name(ae)
, rrdcalc_status2string(ae->new_status));
@@ -561,6 +557,7 @@ static inline void health_alarm_execute(RRDHOST *host, ALARM_ENTRY *ae) {
ae->flags |= HEALTH_ENTRY_FLAG_EXEC_IN_PROGRESS;
ae->exec_spawn_serial = spawn_enq_cmd(command_to_run);
enqueue_alarm_notify_in_progress(ae);
+ health_alarm_log_save(host, ae);
} else {
error("Failed to format command arguments");
}
@@ -628,35 +625,32 @@ static inline void health_alarm_log_process(RRDHOST *host) {
// remember this for the next iteration
host->health_last_processed_id = first_waiting;
- bool cleanup_excess_log_entries = host->health_log.count > host->health_log.max;
-
- if (!cleanup_excess_log_entries)
- return;
-
- // cleanup excess entries in the log
+ //delete those that are updated, no in progress execution, and is not repeating
netdata_rwlock_wrlock(&host->health_log.alarm_log_rwlock);
- ALARM_ENTRY *last = NULL;
- unsigned int count = host->health_log.max * 2 / 3;
- for(ae = host->health_log.alarms; ae && count ; count--, last = ae, ae = ae->next) ;
-
- if(ae && last && last->next == ae)
- last->next = NULL;
- else
- ae = NULL;
-
- while(ae) {
- debug(D_HEALTH, "Health removing alarm log entry with id: %u", ae->unique_id);
-
- ALARM_ENTRY *t = ae->next;
-
- if(likely(!(ae->flags & HEALTH_ENTRY_FLAG_IS_REPEATING))) {
- health_alarm_wait_for_execution(ae);
+ ALARM_ENTRY *prev = host->health_log.alarms;
+ for(ae = host->health_log.alarms; ae ; ae = ae->next) {
+
+ if((likely(!(ae->flags & HEALTH_ENTRY_FLAG_IS_REPEATING)) &&
+ (ae->flags & HEALTH_ENTRY_FLAG_UPDATED) &&
+ (ae->flags & HEALTH_ENTRY_FLAG_SAVED) &&
+ !(ae->flags & HEALTH_ENTRY_FLAG_EXEC_IN_PROGRESS))
+ ||
+ ((ae->new_status == RRDCALC_STATUS_REMOVED) &&
+ (ae->flags & HEALTH_ENTRY_FLAG_SAVED) &&
+ (ae->when + 3600 < now_realtime_sec())))
+ {
+
+ if (ae == host->health_log.alarms) {
+ host->health_log.alarms = ae->next;
+ prev = ae->next;
+ } else {
+ prev->next = ae->next;
+ }
health_alarm_log_free_one_nochecks_nounlink(ae);
- host->health_log.count--;
- }
-
- ae = t;
+ ae = prev;
+ } else
+ prev = ae;
}
netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
@@ -904,8 +898,24 @@ static int update_disabled_silenced(RRDHOST *host, RRDCALC *rc) {
return 0;
}
+static void sql_health_postpone_queue_removed(RRDHOST *host __maybe_unused) {
+#ifdef ENABLE_ACLK
+ if (netdata_cloud_setting) {
+ struct aclk_sync_host_config *wc = (struct aclk_sync_host_config *)host->aclk_sync_host_config;
+ if (unlikely(!wc)) {
+ return;
+ }
+
+ if (wc->alert_queue_removed >= 1) {
+ wc->alert_queue_removed+=6;
+ }
+ }
+#endif
+}
+
static void health_execute_delayed_initializations(RRDHOST *host) {
RRDSET *st;
+ bool must_postpone = false;
if (!rrdhost_flag_check(host, RRDHOST_FLAG_PENDING_HEALTH_INITIALIZATION)) return;
rrdhost_flag_clear(host, RRDHOST_FLAG_PENDING_HEALTH_INITIALIZATION);
@@ -941,8 +951,11 @@ static void health_execute_delayed_initializations(RRDHOST *host) {
rrdvar_store_for_chart(host, st);
}
rrddim_foreach_done(rd);
+ must_postpone = true;
}
rrdset_foreach_done(st);
+ if (must_postpone)
+ sql_health_postpone_queue_removed(host);
}
/**
diff --git a/health/health.d/boinc.conf b/health/health.d/boinc.conf
index 7d7a4fdae..6f37787d7 100644
--- a/health/health.d/boinc.conf
+++ b/health/health.d/boinc.conf
@@ -8,7 +8,6 @@
component: BOINC
os: *
hosts: *
- families: *
lookup: average -10m unaligned of comperror
units: tasks
every: 1m
@@ -26,7 +25,6 @@ component: BOINC
component: BOINC
os: *
hosts: *
- families: *
lookup: average -10m unaligned of upload_failed
units: tasks
every: 1m
@@ -44,7 +42,6 @@ component: BOINC
component: BOINC
os: *
hosts: *
- families: *
lookup: average -10m unaligned of total
units: tasks
every: 1m
@@ -62,7 +59,6 @@ component: BOINC
component: BOINC
os: *
hosts: *
- families: *
lookup: average -10m unaligned of active
calc: ($boinc_total_tasks >= 1) ? ($this) : (inf)
units: tasks
diff --git a/health/health.d/btrfs.conf b/health/health.d/btrfs.conf
index ab63ff28d..97b7a3a94 100644
--- a/health/health.d/btrfs.conf
+++ b/health/health.d/btrfs.conf
@@ -6,7 +6,6 @@
component: File system
os: *
hosts: *
- families: *
calc: 100 - ($unallocated * 100 / ($unallocated + $data_used + $data_free + $meta_used + $meta_free + $sys_used + $sys_free))
units: %
every: 10s
@@ -23,7 +22,6 @@ component: File system
component: File system
os: *
hosts: *
- families: *
calc: $used * 100 / ($used + $free)
units: %
every: 10s
@@ -40,7 +38,6 @@ component: File system
component: File system
os: *
hosts: *
- families: *
calc: ($used + $reserved) * 100 / ($used + $free + $reserved)
units: %
every: 10s
@@ -57,7 +54,6 @@ component: File system
component: File system
os: *
hosts: *
- families: *
calc: $used * 100 / ($used + $free)
units: %
every: 10s
@@ -74,7 +70,6 @@ component: File system
component: File system
os: *
hosts: *
- families: *
units: errors
lookup: max -10m every 1m of read_errs
warn: $this > 0
@@ -89,7 +84,6 @@ component: File system
component: File system
os: *
hosts: *
- families: *
units: errors
lookup: max -10m every 1m of write_errs
warn: $this > 0
@@ -104,7 +98,6 @@ component: File system
component: File system
os: *
hosts: *
- families: *
units: errors
lookup: max -10m every 1m of flush_errs
warn: $this > 0
@@ -119,7 +112,6 @@ component: File system
component: File system
os: *
hosts: *
- families: *
units: errors
lookup: max -10m every 1m of corruption_errs
warn: $this > 0
@@ -134,7 +126,6 @@ component: File system
component: File system
os: *
hosts: *
- families: *
units: errors
lookup: max -10m every 1m of generation_errs
warn: $this > 0
diff --git a/health/health.d/cockroachdb.conf b/health/health.d/cockroachdb.conf
index 1f227841e..09e4f9d40 100644
--- a/health/health.d/cockroachdb.conf
+++ b/health/health.d/cockroachdb.conf
@@ -6,7 +6,7 @@
class: Utilization
type: Database
component: CockroachDB
- calc: $capacity_used_percent
+ calc: $total
units: %
every: 10s
warn: $this > (($status >= $WARNING) ? (80) : (85))
@@ -20,7 +20,7 @@ component: CockroachDB
class: Utilization
type: Database
component: CockroachDB
- calc: $capacity_usable_used_percent
+ calc: $usable
units: %
every: 10s
warn: $this > (($status >= $WARNING) ? (80) : (85))
@@ -36,7 +36,7 @@ component: CockroachDB
class: Errors
type: Database
component: CockroachDB
- calc: $ranges_unavailable
+ calc: $unavailable
units: num
every: 10s
warn: $this > 0
@@ -49,7 +49,7 @@ component: CockroachDB
class: Errors
type: Database
component: CockroachDB
- calc: $ranges_underreplicated
+ calc: $under_replicated
units: num
every: 10s
warn: $this > 0
@@ -64,7 +64,7 @@ component: CockroachDB
class: Utilization
type: Database
component: CockroachDB
- calc: $sys_fd_open/$sys_fd_softlimit * 100
+ calc: $open/$sys_fd_softlimit * 100
units: %
every: 10s
warn: $this > 80
diff --git a/health/health.d/disks.conf b/health/health.d/disks.conf
index fd207fbc1..7bd4f120c 100644
--- a/health/health.d/disks.conf
+++ b/health/health.d/disks.conf
@@ -16,7 +16,7 @@
component: Disk
os: linux freebsd
hosts: *
- families: !/dev !/dev/* !/run !/run/* *
+chart labels: mount_point=!/dev !/dev/* !/run !/run/* *
calc: $used * 100 / ($avail + $used)
units: %
every: 1m
@@ -33,7 +33,7 @@ component: Disk
component: Disk
os: linux freebsd
hosts: *
- families: !/dev !/dev/* !/run !/run/* *
+chart labels: mount_point=!/dev !/dev/* !/run !/run/* *
calc: $used * 100 / ($avail + $used)
units: %
every: 1m
@@ -59,7 +59,6 @@ component: Disk
# on: disk.space
# os: linux freebsd
# hosts: *
-# families: *
# lookup: min -10m at -50m unaligned of avail
# calc: ($this - $avail) / (($now - $after) / 3600)
# every: 1m
@@ -75,7 +74,6 @@ component: Disk
# on: disk.space
# os: linux freebsd
# hosts: *
-# families: *
# calc: ($disk_fill_rate > 0) ? ($avail / $disk_fill_rate) : (inf)
# units: hours
# every: 10s
@@ -101,7 +99,6 @@ component: Disk
# on: disk.inodes
# os: linux freebsd
# hosts: *
-# families: *
# lookup: min -10m at -50m unaligned of avail
# calc: ($this - $avail) / (($now - $after) / 3600)
# every: 1m
@@ -116,7 +113,6 @@ component: Disk
# on: disk.inodes
# os: linux freebsd
# hosts: *
-# families: *
# calc: ($disk_inode_rate > 0) ? ($avail / $disk_inode_rate) : (inf)
# units: hours
# every: 10s
@@ -141,7 +137,6 @@ component: Disk
component: Disk
os: linux freebsd
hosts: *
- families: *
lookup: average -10m unaligned
units: %
every: 1m
@@ -163,7 +158,6 @@ component: Disk
component: Disk
os: linux
hosts: *
- families: *
lookup: average -10m unaligned
units: ms
every: 1m
diff --git a/health/health.d/exporting.conf b/health/health.d/exporting.conf
index 06f398c6e..f1030a317 100644
--- a/health/health.d/exporting.conf
+++ b/health/health.d/exporting.conf
@@ -1,6 +1,5 @@
template: exporting_last_buffering
- families: *
on: exporting_data_size
class: Latency
type: Netdata
@@ -15,7 +14,6 @@ component: Exporting engine
to: dba
template: exporting_metrics_sent
- families: *
on: exporting_data_size
class: Workload
type: Netdata
diff --git a/health/health.d/httpcheck.conf b/health/health.d/httpcheck.conf
index 2008b000d..81748b9e0 100644
--- a/health/health.d/httpcheck.conf
+++ b/health/health.d/httpcheck.conf
@@ -1,7 +1,6 @@
# This is a fast-reacting no-notification alarm ideal for custom dashboards or badges
template: httpcheck_web_service_up
- families: *
on: httpcheck.status
class: Utilization
type: Web Server
@@ -14,7 +13,6 @@ component: HTTP endpoint
to: silent
template: httpcheck_web_service_bad_content
- families: *
on: httpcheck.status
class: Workload
type: Web Server
@@ -29,7 +27,6 @@ component: HTTP endpoint
to: webmaster
template: httpcheck_web_service_bad_status
- families: *
on: httpcheck.status
class: Workload
type: Web Server
@@ -44,7 +41,6 @@ component: HTTP endpoint
to: webmaster
template: httpcheck_web_service_timeouts
- families: *
on: httpcheck.status
class: Latency
type: Web Server
@@ -59,7 +55,6 @@ component: HTTP endpoint
to: webmaster
template: httpcheck_web_service_no_connection
- families: *
on: httpcheck.status
class: Errors
type: Other
diff --git a/health/health.d/ioping.conf b/health/health.d/ioping.conf
index 8b498ad3c..2786cbd62 100644
--- a/health/health.d/ioping.conf
+++ b/health/health.d/ioping.conf
@@ -1,5 +1,4 @@
template: ioping_disk_latency
- families: *
on: ioping.latency
class: Latency
type: System
diff --git a/health/health.d/mdstat.conf b/health/health.d/mdstat.conf
index ed980a26a..b90455a58 100644
--- a/health/health.d/mdstat.conf
+++ b/health/health.d/mdstat.conf
@@ -29,7 +29,7 @@ component: RAID
class: Errors
type: System
component: RAID
- families: !*(raid1) !*(raid10) *
+chart labels: raid_level=!raid1 !raid10 *
units: unsynchronized blocks
calc: $count
every: 60s
diff --git a/health/health.d/net.conf b/health/health.d/net.conf
index a0723f303..08a4eecb4 100644
--- a/health/health.d/net.conf
+++ b/health/health.d/net.conf
@@ -11,7 +11,6 @@
component: Network
os: *
hosts: *
- families: *
calc: ( $nic_speed_max > 0 ) ? ( $nic_speed_max) : ( nan )
units: Mbit
every: 10s
@@ -24,7 +23,6 @@ component: Network
component: Network
os: linux
hosts: *
- families: *
lookup: average -1m unaligned absolute of received
calc: ($interface_speed > 0) ? ($this * 100 / ($interface_speed)) : ( nan )
units: %
@@ -41,7 +39,6 @@ component: Network
component: Network
os: linux
hosts: *
- families: *
lookup: average -1m unaligned absolute of sent
calc: ($interface_speed > 0) ? ($this * 100 / ($interface_speed)) : ( nan )
units: %
@@ -68,7 +65,6 @@ component: Network
component: Network
os: linux
hosts: *
- families: *
lookup: sum -10m unaligned absolute of inbound
units: packets
every: 1m
@@ -81,7 +77,6 @@ component: Network
component: Network
os: linux
hosts: *
- families: *
lookup: sum -10m unaligned absolute of outbound
units: packets
every: 1m
@@ -94,7 +89,7 @@ component: Network
component: Network
os: linux
hosts: *
- families: !wl* *
+chart labels: device=!wl* *
lookup: sum -10m unaligned absolute of received
calc: (($inbound_packets_dropped != nan AND $this > 10000) ? ($inbound_packets_dropped * 100 / $this) : (0))
units: %
@@ -111,7 +106,7 @@ component: Network
component: Network
os: linux
hosts: *
- families: !wl* *
+chart labels: device=!wl* *
lookup: sum -10m unaligned absolute of sent
calc: (($outbound_packets_dropped != nan AND $this > 1000) ? ($outbound_packets_dropped * 100 / $this) : (0))
units: %
@@ -128,7 +123,7 @@ component: Network
component: Network
os: linux
hosts: *
- families: wl*
+chart labels: device=wl*
lookup: sum -10m unaligned absolute of received
calc: (($inbound_packets_dropped != nan AND $this > 10000) ? ($inbound_packets_dropped * 100 / $this) : (0))
units: %
@@ -145,7 +140,7 @@ component: Network
component: Network
os: linux
hosts: *
- families: wl*
+chart labels: device=wl*
lookup: sum -10m unaligned absolute of sent
calc: (($outbound_packets_dropped != nan AND $this > 1000) ? ($outbound_packets_dropped * 100 / $this) : (0))
units: %
@@ -165,7 +160,6 @@ component: Network
component: Network
os: freebsd
hosts: *
- families: *
lookup: sum -10m unaligned absolute of inbound
units: errors
every: 1m
@@ -181,7 +175,6 @@ component: Network
component: Network
os: freebsd
hosts: *
- families: *
lookup: sum -10m unaligned absolute of outbound
units: errors
every: 1m
@@ -205,7 +198,6 @@ component: Network
component: Network
os: linux
hosts: *
- families: *
lookup: sum -10m unaligned absolute
units: errors
every: 1m
@@ -230,7 +222,6 @@ component: Network
component: Network
os: linux freebsd
hosts: *
- families: *
lookup: average -1m unaligned of received
units: packets
every: 10s
@@ -243,7 +234,6 @@ component: Network
component: Network
os: linux freebsd
hosts: *
- families: *
lookup: average -10s unaligned of received
calc: $this * 100 / (($1m_received_packets_rate < 1000)?(1000):($1m_received_packets_rate))
every: 10s
diff --git a/health/health.d/nvme.conf b/health/health.d/nvme.conf
index b7c0e6fd4..742ffbc93 100644
--- a/health/health.d/nvme.conf
+++ b/health/health.d/nvme.conf
@@ -1,7 +1,6 @@
# you can disable an alarm notification by setting the 'to' line to: silent
template: nvme_device_critical_warnings_state
- families: *
on: nvme.device_critical_warnings_state
class: Errors
type: System
diff --git a/health/health.d/ping.conf b/health/health.d/ping.conf
index fa8213ad3..b8d39bbad 100644
--- a/health/health.d/ping.conf
+++ b/health/health.d/ping.conf
@@ -1,7 +1,6 @@
# you can disable an alarm notification by setting the 'to' line to: silent
template: ping_host_reachable
- families: *
on: ping.host_packet_loss
class: Errors
type: Other
@@ -16,7 +15,6 @@ component: Network
to: sysadmin
template: ping_packet_loss
- families: *
on: ping.host_packet_loss
class: Errors
type: Other
@@ -33,7 +31,6 @@ component: Network
to: sysadmin
template: ping_host_latency
- families: *
on: ping.host_rtt
class: Latency
type: Other
diff --git a/health/health.d/plugin.conf b/health/health.d/plugin.conf
new file mode 100644
index 000000000..0a891db79
--- /dev/null
+++ b/health/health.d/plugin.conf
@@ -0,0 +1,11 @@
+ template: plugin_availability_status
+ on: netdata.plugin_availability_status
+ class: Errors
+ type: Netdata
+ calc: $now - $last_collected_t
+ units: seconds ago
+ every: 10s
+ warn: $this > (($status >= $WARNING) ? ($update_every) : (20 * $update_every))
+ delay: down 5m multiplier 1.5 max 1h
+ info: the amount of time that ${label:_collect_plugin} did not report its availability status
+ to: sysadmin
diff --git a/health/health.d/portcheck.conf b/health/health.d/portcheck.conf
index e8908404c..34550ea02 100644
--- a/health/health.d/portcheck.conf
+++ b/health/health.d/portcheck.conf
@@ -1,7 +1,6 @@
# This is a fast-reacting no-notification alarm ideal for custom dashboards or badges
template: portcheck_service_reachable
- families: *
on: portcheck.status
class: Workload
type: Other
@@ -14,7 +13,6 @@ component: TCP endpoint
to: silent
template: portcheck_connection_timeouts
- families: *
on: portcheck.status
class: Errors
type: Other
@@ -29,7 +27,6 @@ component: TCP endpoint
to: sysadmin
template: portcheck_connection_fails
- families: *
on: portcheck.status
class: Errors
type: Other
diff --git a/health/health.d/redis.conf b/health/health.d/redis.conf
index 34d00b5df..a58fa34d1 100644
--- a/health/health.d/redis.conf
+++ b/health/health.d/redis.conf
@@ -1,7 +1,6 @@
# you can disable an alarm notification by setting the 'to' line to: silent
template: redis_connections_rejected
- families: *
on: redis.connections
class: Errors
type: KV Storage
@@ -15,7 +14,6 @@ component: Redis
to: dba
template: redis_bgsave_broken
- families: *
on: redis.bgsave_health
class: Errors
type: KV Storage
@@ -28,7 +26,6 @@ component: Redis
to: dba
template: redis_bgsave_slow
- families: *
on: redis.bgsave_now
class: Latency
type: KV Storage
@@ -43,7 +40,6 @@ component: Redis
to: dba
template: redis_master_link_down
- families: *
on: redis.master_link_down_since_time
class: Errors
type: KV Storage
diff --git a/health/health.d/vsphere.conf b/health/health.d/vsphere.conf
index d8fc899b9..1d8be6cb5 100644
--- a/health/health.d/vsphere.conf
+++ b/health/health.d/vsphere.conf
@@ -43,7 +43,6 @@ component: Memory
type: Virtual Machine
component: Network
hosts: *
- families: *
lookup: sum -10m unaligned absolute match-names of rx
units: packets
every: 1m
@@ -55,7 +54,6 @@ component: Network
type: Virtual Machine
component: Network
hosts: *
- families: *
lookup: sum -10m unaligned absolute match-names of tx
units: packets
every: 1m
@@ -69,7 +67,6 @@ component: Network
type: Virtual Machine
component: Network
hosts: *
- families: *
lookup: sum -10m unaligned absolute match-names of rx
calc: (($vsphere_inbound_packets_errors != nan AND $this > 1000) ? ($vsphere_inbound_packets_errors * 100 / $this) : (0))
units: %
@@ -85,7 +82,6 @@ component: Network
type: Virtual Machine
component: Network
hosts: *
- families: *
lookup: sum -10m unaligned absolute match-names of tx
calc: (($vsphere_outbound_packets_errors != nan AND $this > 1000) ? ($vsphere_outbound_packets_errors * 100 / $this) : (0))
units: %
@@ -121,7 +117,6 @@ component: CPU
type: Virtual Machine
component: Network
hosts: *
- families: *
lookup: sum -10m unaligned absolute match-names of rx
units: packets
every: 1m
@@ -133,7 +128,6 @@ component: Network
type: Virtual Machine
component: Network
hosts: *
- families: *
lookup: sum -10m unaligned absolute match-names of tx
units: packets
every: 1m
@@ -147,7 +141,6 @@ component: Network
type: Virtual Machine
component: Network
hosts: *
- families: *
lookup: sum -10m unaligned absolute match-names of rx
calc: (($vsphere_inbound_packets_dropped != nan AND $this > 1000) ? ($vsphere_inbound_packets_dropped * 100 / $this) : (0))
units: %
@@ -163,7 +156,6 @@ component: Network
type: Virtual Machine
component: Network
hosts: *
- families: *
lookup: sum -10m unaligned absolute match-names of tx
calc: (($vsphere_outbound_packets_dropped != nan AND $this > 1000) ? ($vsphere_outbound_packets_dropped * 100 / $this) : (0))
units: %
diff --git a/health/health.d/web_log.conf b/health/health.d/web_log.conf
index c33c4664c..3fd01831b 100644
--- a/health/health.d/web_log.conf
+++ b/health/health.d/web_log.conf
@@ -13,7 +13,6 @@
class: Workload
type: Web Server
component: Web log
- families: *
lookup: sum -1m unaligned
calc: ($this == 0)?(1):($this)
units: requests
@@ -25,7 +24,6 @@ component: Web log
class: Errors
type: Web Server
component: Web log
- families: *
lookup: sum -1m unaligned of unmatched
calc: $this * 100 / $web_log_1m_total_requests
units: %
@@ -50,7 +48,6 @@ component: Web log
class: Workload
type: Web Server
component: Web log
- families: *
lookup: sum -1m unaligned
calc: ($this == 0)?(1):($this)
units: requests
@@ -62,7 +59,6 @@ component: Web log
class: Workload
type: Web Server
component: Web log
- families: *
lookup: sum -1m unaligned of success
calc: $this * 100 / $web_log_1m_requests
units: %
@@ -78,7 +74,6 @@ component: Web log
class: Workload
type: Web Server
component: Web log
- families: *
lookup: sum -1m unaligned of redirect
calc: $this * 100 / $web_log_1m_requests
units: %
@@ -93,7 +88,6 @@ component: Web log
class: Errors
type: Web Server
component: Web log
- families: *
lookup: sum -1m unaligned of bad
calc: $this * 100 / $web_log_1m_requests
units: %
@@ -108,7 +102,6 @@ component: Web log
class: Errors
type: Web Server
component: Web log
- families: *
lookup: sum -1m unaligned of error
calc: $this * 100 / $web_log_1m_requests
units: %
@@ -134,7 +127,6 @@ component: Web log
class: Latency
type: System
component: Web log
- families: *
lookup: average -10m unaligned of avg
units: ms
every: 30s
@@ -145,7 +137,6 @@ component: Web log
class: Latency
type: Web Server
component: Web log
- families: *
lookup: average -1m unaligned of avg
units: ms
every: 10s
@@ -174,7 +165,6 @@ component: Web log
class: Workload
type: Web Server
component: Web log
- families: *
lookup: average -5m at -5m unaligned of success
units: requests/s
every: 30s
@@ -185,7 +175,6 @@ component: Web log
class: Workload
type: Web Server
component: Web log
- families: *
lookup: average -5m unaligned of success
units: requests/s
every: 30s
@@ -196,7 +185,6 @@ component: Web log
class: Workload
type: Web Server
component: Web log
- families: *
calc: ($web_log_5m_successful_old > 0)?($web_log_5m_successful * 100 / $web_log_5m_successful_old):(100)
units: %
every: 30s
diff --git a/health/health.d/windows.conf b/health/health.d/windows.conf
index d678ac3ae..d4bc7639c 100644
--- a/health/health.d/windows.conf
+++ b/health/health.d/windows.conf
@@ -62,7 +62,6 @@ component: Memory
component: Network
os: linux
hosts: *
- families: *
lookup: sum -10m unaligned absolute match-names of inbound
units: packets
every: 1m
@@ -78,7 +77,6 @@ component: Network
component: Network
os: linux
hosts: *
- families: *
lookup: sum -10m unaligned absolute match-names of outbound
units: packets
every: 1m
@@ -94,7 +92,6 @@ component: Network
component: Network
os: linux
hosts: *
- families: *
lookup: sum -10m unaligned absolute match-names of inbound
units: packets
every: 1m
@@ -110,7 +107,6 @@ component: Network
component: Network
os: linux
hosts: *
- families: *
lookup: sum -10m unaligned absolute match-names of outbound
units: packets
every: 1m
diff --git a/health/health.h b/health/health.h
index 902e36c62..c36aabac7 100644
--- a/health/health.h
+++ b/health/health.h
@@ -41,7 +41,6 @@ void health_reload(void);
void health_aggregate_alarms(RRDHOST *host, BUFFER *wb, BUFFER* context, RRDCALC_STATUS status);
void health_alarms2json(RRDHOST *host, BUFFER *wb, int all);
void health_alarms_values2json(RRDHOST *host, BUFFER *wb, int all);
-void health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after, char *chart);
void health_api_v1_chart_variables2json(RRDSET *st, BUFFER *buf);
void health_api_v1_chart_custom_variables2json(RRDSET *st, BUFFER *buf);
@@ -87,11 +86,10 @@ void health_alarm_log_free_one_nochecks_nounlink(ALARM_ENTRY *ae);
void *health_cmdapi_thread(void *ptr);
-void health_label_log_save(RRDHOST *host);
-
char *health_edit_command_from_source(const char *source);
void sql_refresh_hashes(void);
void health_add_host_labels(void);
+void health_string2json(BUFFER *wb, const char *prefix, const char *label, const char *value, const char *suffix);
#endif //NETDATA_HEALTH_H
diff --git a/health/health_config.c b/health/health_config.c
index 38857fc9a..a11fd51cd 100644
--- a/health/health_config.c
+++ b/health/health_config.c
@@ -32,6 +32,7 @@
#define HEALTH_REPEAT_KEY "repeat"
#define HEALTH_HOST_LABEL_KEY "host labels"
#define HEALTH_FOREACH_KEY "foreach"
+#define HEALTH_CHART_LABEL_KEY "chart labels"
static inline int health_parse_delay(
size_t line, const char *filename, char *string,
@@ -192,6 +193,50 @@ static inline int isvariableterm(const char s) {
return 1;
}
+// If needed, add a prefix key to all possible values in the range
+static inline char *health_config_add_key_to_values(char *value) {
+ BUFFER *wb = buffer_create(HEALTH_CONF_MAX_LINE + 1, NULL);
+ char key[HEALTH_CONF_MAX_LINE + 1];
+ char data[HEALTH_CONF_MAX_LINE + 1];
+
+ char *s = value;
+ size_t i = 0;
+
+ key[0] = '\0';
+ while(*s) {
+ if (*s == '=') {
+ //hold the key
+ data[i]='\0';
+ strncpyz(key, data, HEALTH_CONF_MAX_LINE);
+ i=0;
+ } else if (*s == ' ') {
+ data[i]='\0';
+ if (data[0]=='!')
+ buffer_snprintf(wb, HEALTH_CONF_MAX_LINE, "!%s=%s ", key, data + 1);
+ else
+ buffer_snprintf(wb, HEALTH_CONF_MAX_LINE, "%s=%s ", key, data);
+ i=0;
+ } else {
+ data[i++] = *s;
+ }
+ s++;
+ }
+
+ data[i]='\0';
+ if (data[0]) {
+ if (data[0]=='!')
+ buffer_snprintf(wb, HEALTH_CONF_MAX_LINE, "!%s=%s ", key, data + 1);
+ else
+ buffer_snprintf(wb, HEALTH_CONF_MAX_LINE, "%s=%s ", key, data);
+ }
+
+ char *final = mallocz(HEALTH_CONF_MAX_LINE + 1);
+ strncpyz(final, buffer_tostring(wb), HEALTH_CONF_MAX_LINE);
+ buffer_free(wb);
+
+ return final;
+}
+
static inline void parse_variables_and_store_in_health_rrdvars(char *value, size_t len) {
const char *s = value;
char buffer[RRDVAR_MAX_LENGTH];
@@ -453,6 +498,7 @@ static inline void alert_config_free(struct alert_config *cfg)
string_freez(cfg->host_labels);
string_freez(cfg->p_db_lookup_dimensions);
string_freez(cfg->p_db_lookup_method);
+ string_freez(cfg->chart_labels);
freez(cfg);
}
@@ -489,7 +535,8 @@ static int health_readfile(const char *filename, void *data) {
hash_delay = 0,
hash_options = 0,
hash_repeat = 0,
- hash_host_label = 0;
+ hash_host_label = 0,
+ hash_chart_label = 0;
char buffer[HEALTH_CONF_MAX_LINE + 1];
@@ -521,6 +568,7 @@ static int health_readfile(const char *filename, void *data) {
hash_options = simple_uhash(HEALTH_OPTIONS_KEY);
hash_repeat = simple_uhash(HEALTH_REPEAT_KEY);
hash_host_label = simple_uhash(HEALTH_HOST_LABEL_KEY);
+ hash_chart_label = simple_uhash(HEALTH_CHART_LABEL_KEY);
}
FILE *fp = fopen(filename, "r");
@@ -937,6 +985,27 @@ static int health_readfile(const char *filename, void *data) {
rc->module_match = string_strdupz(value);
rc->module_pattern = simple_pattern_create(rrdcalc_module_match(rc), NULL, SIMPLE_PATTERN_EXACT, true);
}
+ else if(hash == hash_chart_label && !strcasecmp(key, HEALTH_CHART_LABEL_KEY)) {
+ alert_cfg->chart_labels = string_strdupz(value);
+ if(rc->chart_labels) {
+ if(strcmp(rrdcalc_chart_labels(rc), value) != 0)
+ error("Health configuration at line %zu of file '%s' for alarm '%s' has key '%s' twice, once with value '%s' and later with value '%s'.",
+ line, filename, rrdcalc_name(rc), key, value, value);
+
+ string_freez(rc->chart_labels);
+ simple_pattern_free(rc->chart_labels_pattern);
+ }
+
+ {
+ char *tmp = simple_pattern_trim_around_equal(value);
+ char *tmp_2 = health_config_add_key_to_values(tmp);
+ rc->chart_labels = string_strdupz(tmp_2);
+ freez(tmp);
+ freez(tmp_2);
+ }
+ rc->chart_labels_pattern = simple_pattern_create(rrdcalc_chart_labels(rc), NULL, SIMPLE_PATTERN_EXACT,
+ true);
+ }
else {
error("Health configuration at line %zu of file '%s' for alarm '%s' has unknown key '%s'.",
line, filename, rrdcalc_name(rc), key);
@@ -1186,9 +1255,31 @@ static int health_readfile(const char *filename, void *data) {
rt->host_labels = string_strdupz(tmp);
freez(tmp);
}
+
rt->host_labels_pattern = simple_pattern_create(rrdcalctemplate_host_labels(rt), NULL,
SIMPLE_PATTERN_EXACT, true);
}
+ else if(hash == hash_chart_label && !strcasecmp(key, HEALTH_CHART_LABEL_KEY)) {
+ alert_cfg->chart_labels = string_strdupz(value);
+ if(rt->chart_labels) {
+ if(strcmp(rrdcalctemplate_chart_labels(rt), value) != 0)
+ error("Health configuration at line %zu of file '%s' for template '%s' has key '%s' twice, once with value '%s' and later with value '%s'. Using ('%s').",
+ line, filename, rrdcalctemplate_name(rt), key, rrdcalctemplate_chart_labels(rt), value, value);
+
+ string_freez(rt->chart_labels);
+ simple_pattern_free(rt->chart_labels_pattern);
+ }
+
+ {
+ char *tmp = simple_pattern_trim_around_equal(value);
+ char *tmp_2 = health_config_add_key_to_values(tmp);
+ rt->chart_labels = string_strdupz(tmp_2);
+ freez(tmp);
+ freez(tmp_2);
+ }
+ rt->chart_labels_pattern = simple_pattern_create(rrdcalctemplate_chart_labels(rt), NULL,
+ SIMPLE_PATTERN_EXACT, true);
+ }
else {
error("Health configuration at line %zu of file '%s' for template '%s' has unknown key '%s'.",
line, filename, rrdcalctemplate_name(rt), key);
diff --git a/health/health_json.c b/health/health_json.c
index ba18bddba..4f81998f0 100644
--- a/health/health_json.c
+++ b/health/health_json.c
@@ -13,136 +13,6 @@ void health_string2json(BUFFER *wb, const char *prefix, const char *label, const
buffer_sprintf(wb, "%s\"%s\":null%s", prefix, label, suffix);
}
-void health_alarm_entry2json_nolock(BUFFER *wb, ALARM_ENTRY *ae, RRDHOST *host) {
- char *edit_command = ae->source ? health_edit_command_from_source(ae_source(ae)) : strdupz("UNKNOWN=0=UNKNOWN");
- char config_hash_id[GUID_LEN + 1];
- uuid_unparse_lower(ae->config_hash_id, config_hash_id);
-
- buffer_sprintf(wb,
- "\n\t{\n"
- "\t\t\"hostname\": \"%s\",\n"
- "\t\t\"utc_offset\": %d,\n"
- "\t\t\"timezone\": \"%s\",\n"
- "\t\t\"unique_id\": %u,\n"
- "\t\t\"alarm_id\": %u,\n"
- "\t\t\"alarm_event_id\": %u,\n"
- "\t\t\"config_hash_id\": \"%s\",\n"
- "\t\t\"name\": \"%s\",\n"
- "\t\t\"chart\": \"%s\",\n"
- "\t\t\"context\": \"%s\",\n"
- "\t\t\"family\": \"%s\",\n"
- "\t\t\"class\": \"%s\",\n"
- "\t\t\"component\": \"%s\",\n"
- "\t\t\"type\": \"%s\",\n"
- "\t\t\"processed\": %s,\n"
- "\t\t\"updated\": %s,\n"
- "\t\t\"exec_run\": %lu,\n"
- "\t\t\"exec_failed\": %s,\n"
- "\t\t\"exec\": \"%s\",\n"
- "\t\t\"recipient\": \"%s\",\n"
- "\t\t\"exec_code\": %d,\n"
- "\t\t\"source\": \"%s\",\n"
- "\t\t\"command\": \"%s\",\n"
- "\t\t\"units\": \"%s\",\n"
- "\t\t\"when\": %lu,\n"
- "\t\t\"duration\": %lu,\n"
- "\t\t\"non_clear_duration\": %lu,\n"
- "\t\t\"status\": \"%s\",\n"
- "\t\t\"old_status\": \"%s\",\n"
- "\t\t\"delay\": %d,\n"
- "\t\t\"delay_up_to_timestamp\": %lu,\n"
- "\t\t\"updated_by_id\": %u,\n"
- "\t\t\"updates_id\": %u,\n"
- "\t\t\"value_string\": \"%s\",\n"
- "\t\t\"old_value_string\": \"%s\",\n"
- "\t\t\"last_repeat\": \"%lu\",\n"
- "\t\t\"silenced\": \"%s\",\n"
- , rrdhost_hostname(host)
- , host->utc_offset
- , rrdhost_abbrev_timezone(host)
- , ae->unique_id
- , ae->alarm_id
- , ae->alarm_event_id
- , config_hash_id
- , ae_name(ae)
- , ae_chart_name(ae)
- , ae_chart_context(ae)
- , ae_family(ae)
- , ae->classification?ae_classification(ae):"Unknown"
- , ae->component?ae_component(ae):"Unknown"
- , ae->type?ae_type(ae):"Unknown"
- , (ae->flags & HEALTH_ENTRY_FLAG_PROCESSED)?"true":"false"
- , (ae->flags & HEALTH_ENTRY_FLAG_UPDATED)?"true":"false"
- , (unsigned long)ae->exec_run_timestamp
- , (ae->flags & HEALTH_ENTRY_FLAG_EXEC_FAILED)?"true":"false"
- , ae->exec?ae_exec(ae):string2str(host->health.health_default_exec)
- , ae->recipient?ae_recipient(ae):string2str(host->health.health_default_recipient)
- , ae->exec_code
- , ae_source(ae)
- , edit_command
- , ae_units(ae)
- , (unsigned long)ae->when
- , (unsigned long)ae->duration
- , (unsigned long)ae->non_clear_duration
- , rrdcalc_status2string(ae->new_status)
- , rrdcalc_status2string(ae->old_status)
- , ae->delay
- , (unsigned long)ae->delay_up_to_timestamp
- , ae->updated_by_id
- , ae->updates_id
- , ae_new_value_string(ae)
- , ae_old_value_string(ae)
- , (unsigned long)ae->last_repeat
- , (ae->flags & HEALTH_ENTRY_FLAG_SILENCED)?"true":"false"
- );
-
- health_string2json(wb, "\t\t", "info", ae->info ? ae_info(ae) : "", ",\n");
-
- if(unlikely(ae->flags & HEALTH_ENTRY_FLAG_NO_CLEAR_NOTIFICATION)) {
- buffer_strcat(wb, "\t\t\"no_clear_notification\": true,\n");
- }
-
- buffer_strcat(wb, "\t\t\"value\":");
- buffer_print_netdata_double(wb, ae->new_value);
- buffer_strcat(wb, ",\n");
-
- buffer_strcat(wb, "\t\t\"old_value\":");
- buffer_print_netdata_double(wb, ae->old_value);
- buffer_strcat(wb, "\n");
-
- buffer_strcat(wb, "\t}");
-
- freez(edit_command);
-}
-
-void health_alarm_log2json(RRDHOST *host, BUFFER *wb, uint32_t after, char *chart) {
-
- buffer_strcat(wb, "[");
-
- unsigned int max = host->health_log.max;
- unsigned int count = 0;
-
- STRING *chart_string = string_strdupz(chart);
-
- netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
-
- ALARM_ENTRY *ae;
- for (ae = host->health_log.alarms; ae && count < max; ae = ae->next) {
- if ((ae->unique_id > after) && (!chart || chart_string == ae->chart)) {
- if (likely(count))
- buffer_strcat(wb, ",");
- health_alarm_entry2json_nolock(wb, ae, host);
- count++;
- }
- }
-
- netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
-
- string_freez(chart_string);
-
- buffer_strcat(wb, "\n]\n");
-}
-
static inline void health_rrdcalc_values2json_nolock(RRDHOST *host, BUFFER *wb, RRDCALC *rc) {
(void)host;
buffer_sprintf(wb,
@@ -397,43 +267,3 @@ void health_alarms_values2json(RRDHOST *host, BUFFER *wb, int all) {
buffer_strcat(wb, "\n\t}\n}\n");
}
-static int have_recent_alarm(RRDHOST *host, uint32_t alarm_id, uint32_t mark)
-{
- ALARM_ENTRY *ae = host->health_log.alarms;
-
- while(ae) {
- if (ae->alarm_id == alarm_id && ae->unique_id > mark &&
- (ae->new_status != RRDCALC_STATUS_WARNING && ae->new_status != RRDCALC_STATUS_CRITICAL))
- return 1;
- ae = ae->next;
- }
- return 0;
-}
-
-void health_active_log_alarms_2json(RRDHOST *host, BUFFER *wb) {
- netdata_rwlock_rdlock(&host->health_log.alarm_log_rwlock);
-
- buffer_sprintf(wb, "[\n");
-
- unsigned int max = host->health_log.max;
- unsigned int count = 0;
- ALARM_ENTRY *ae;
- for(ae = host->health_log.alarms; ae && count < max ; ae = ae->next) {
- if (!ae->updated_by_id &&
- ((ae->new_status == RRDCALC_STATUS_WARNING || ae->new_status == RRDCALC_STATUS_CRITICAL) ||
- ((ae->old_status == RRDCALC_STATUS_WARNING || ae->old_status == RRDCALC_STATUS_CRITICAL) &&
- ae->new_status == RRDCALC_STATUS_REMOVED))) {
-
- if (have_recent_alarm(host, ae->alarm_id, ae->unique_id))
- continue;
-
- if (likely(count))
- buffer_strcat(wb, ",");
- health_alarm_entry2json_nolock(wb, ae, host);
- count++;
- }
- }
- buffer_strcat(wb, "]");
-
- netdata_rwlock_unlock(&host->health_log.alarm_log_rwlock);
-}
diff --git a/health/health_log.c b/health/health_log.c
index b1f59a1a5..b62e0ace4 100644
--- a/health/health_log.c
+++ b/health/health_log.c
@@ -5,14 +5,7 @@
// ----------------------------------------------------------------------------
inline void health_alarm_log_save(RRDHOST *host, ALARM_ENTRY *ae) {
-
sql_health_alarm_log_save(host, ae);
-
-#ifdef ENABLE_ACLK
- if (netdata_cloud_setting) {
- sql_queue_alarm_to_aclk(host, ae, 0);
- }
-#endif
}
// ----------------------------------------------------------------------------
@@ -53,6 +46,8 @@ inline ALARM_ENTRY* health_create_alarm_entry(
uuid_copy(ae->config_hash_id, *((uuid_t *) config_hash_id));
+ uuid_generate_random(ae->transition_id);
+
ae->family = string_dup(family);
ae->classification = string_dup(class);
ae->component = string_dup(component);
diff --git a/httpd/h2o_utils.c b/httpd/h2o_utils.c
new file mode 100644
index 000000000..943216f59
--- /dev/null
+++ b/httpd/h2o_utils.c
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "h2o_utils.h"
+
+#include "h2o/string_.h"
+
+#include "libnetdata/libnetdata.h"
+
+char *iovec_to_cstr(h2o_iovec_t *str)
+{
+ char *c_str = mallocz(str->len + 1);
+ memcpy(c_str, str->base, str->len);
+ c_str[str->len] = 0;
+ return c_str;
+}
+
+#define KEY_VAL_BUFFER_GROWTH_STEP 5
+h2o_iovec_pair_vector_t *parse_URL_params(h2o_mem_pool_t *pool, h2o_iovec_t params_string)
+{
+ h2o_iovec_pair_vector_t *params_vec = h2o_mem_alloc_shared(pool, sizeof(h2o_iovec_pair_vector_t), NULL);
+ memset(params_vec, 0, sizeof(h2o_iovec_pair_vector_t));
+
+ h2o_iovec_pair_t param;
+ while ((param.name.base = (char*)h2o_next_token(&params_string, '&', &param.name.len, &param.value)) != NULL) {
+ if (params_vec->capacity == params_vec->size)
+ h2o_vector_reserve(pool, params_vec, params_vec->capacity + KEY_VAL_BUFFER_GROWTH_STEP);
+
+ params_vec->entries[params_vec->size++] = param;
+ }
+
+ return params_vec;
+}
+
+h2o_iovec_pair_t *get_URL_param_by_name(h2o_iovec_pair_vector_t *params_vec, const void *needle, size_t needle_len)
+{
+ for (size_t i = 0; i < params_vec->size; i++) {
+ h2o_iovec_pair_t *ret = &params_vec->entries[i];
+ if (h2o_memis(ret->name.base, ret->name.len, needle, needle_len))
+ return ret;
+ }
+ return NULL;
+}
+
+char *url_unescape(const char *url)
+{
+ char *result = mallocz(strlen(url) + 1);
+
+ int i, j;
+ for (i = 0, j = 0; url[i] != 0; i++, j++) {
+ if (url[i] == '%' && isxdigit(url[i+1]) && isxdigit(url[i+2])) {
+ char hex[3] = { url[i+1], url[i+2], 0 };
+ result[j] = strtol(hex, NULL, 16);
+ i += 2;
+ } else
+ result[j] = url[i];
+ }
+ result[j] = 0;
+
+ return result;
+}
diff --git a/httpd/h2o_utils.h b/httpd/h2o_utils.h
new file mode 100644
index 000000000..6760ed9a9
--- /dev/null
+++ b/httpd/h2o_utils.h
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_H2O_UTILS_H
+#define NETDATA_H2O_UTILS_H
+
+#include "h2o/memory.h"
+
+#define __HAS_URL_PARAMS(reqptr) ((reqptr)->query_at != SIZE_MAX && ((reqptr)->path.len - (reqptr)->query_at > 1))
+#define IF_HAS_URL_PARAMS(reqptr) if __HAS_URL_PARAMS(reqptr)
+#define UNLESS_HAS_URL_PARAMS(reqptr) if (!__HAS_URL_PARAMS(reqptr))
+#define URL_PARAMS_IOVEC_INIT(reqptr) { .base = &(reqptr)->path.base[(reqptr)->query_at + 1], \
+ .len = (reqptr)->path.len - (reqptr)->query_at - 1 }
+#define URL_PARAMS_IOVEC_INIT_WITH_QUESTIONMARK(reqptr) { .base = &(reqptr)->path.base[(reqptr)->query_at], \
+ .len = (reqptr)->path.len - (reqptr)->query_at }
+
+#define PRINTF_H2O_IOVEC_FMT "%.*s"
+#define PRINTF_H2O_IOVEC(iovec) ((int)(iovec)->len), ((iovec)->base)
+
+char *iovec_to_cstr(h2o_iovec_t *str);
+
+typedef struct h2o_iovec_pair {
+ h2o_iovec_t name;
+ h2o_iovec_t value;
+} h2o_iovec_pair_t;
+
+typedef H2O_VECTOR(h2o_iovec_pair_t) h2o_iovec_pair_vector_t;
+
+// Takes the part of url behind ? (the url encoded parameters)
+// and parse it to vector of name/value pairs without copying the actual strings
+h2o_iovec_pair_vector_t *parse_URL_params(h2o_mem_pool_t *pool, h2o_iovec_t params_string);
+
+// Searches for parameter by name (provided in needle)
+// returns pointer to it or NULL
+h2o_iovec_pair_t *get_URL_param_by_name(h2o_iovec_pair_vector_t *params_vec, const void *needle, size_t needle_len);
+
+char *url_unescape(const char *url);
+
+#endif /* NETDATA_H2O_UTILS_H */
diff --git a/httpd/http_server.c b/httpd/http_server.c
new file mode 100644
index 000000000..24b168d92
--- /dev/null
+++ b/httpd/http_server.c
@@ -0,0 +1,339 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "daemon/common.h"
+#include "http_server.h"
+#include "h2o.h"
+
+#include "h2o_utils.h"
+
+static h2o_globalconf_t config;
+static h2o_context_t ctx;
+static h2o_accept_ctx_t accept_ctx;
+
+#define CONTENT_JSON_UTF8 H2O_STRLIT("application/json; charset=utf-8")
+#define CONTENT_TEXT_UTF8 H2O_STRLIT("text/plain; charset=utf-8")
+#define NBUF_INITIAL_SIZE_RESP (4096)
+#define API_V1_PREFIX "/api/v1/"
+#define HOST_SELECT_PREFIX "/host/"
+
+#define HTTPD_CONFIG_SECTION "httpd"
+#define HTTPD_ENABLED_DEFAULT false
+
+static void on_accept(h2o_socket_t *listener, const char *err)
+{
+ h2o_socket_t *sock;
+
+ if (err != NULL) {
+ return;
+ }
+
+ if ((sock = h2o_evloop_socket_accept(listener)) == NULL)
+ return;
+ h2o_accept(&accept_ctx, sock);
+}
+
+static int create_listener(const char *ip, int port)
+{
+ struct sockaddr_in addr;
+ int fd, reuseaddr_flag = 1;
+ h2o_socket_t *sock;
+
+ memset(&addr, 0, sizeof(addr));
+ addr.sin_family = AF_INET;
+ addr.sin_addr.s_addr = inet_addr(ip);
+ addr.sin_port = htons(port);
+
+ if ((fd = socket(AF_INET, SOCK_STREAM, 0)) == -1 ||
+ setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &reuseaddr_flag, sizeof(reuseaddr_flag)) != 0 ||
+ bind(fd, (struct sockaddr *)&addr, sizeof(addr)) != 0 || listen(fd, SOMAXCONN) != 0) {
+ return -1;
+ }
+
+ sock = h2o_evloop_socket_create(ctx.loop, fd, H2O_SOCKET_FLAG_DONT_READ);
+ h2o_socket_read_start(sock, on_accept);
+
+ return 0;
+}
+
+static int ssl_init()
+{
+ if (!config_get_boolean(HTTPD_CONFIG_SECTION, "ssl", false))
+ return 0;
+
+ char default_fn[FILENAME_MAX + 1];
+
+ snprintfz(default_fn, FILENAME_MAX, "%s/ssl/key.pem", netdata_configured_user_config_dir);
+ const char *key_fn = config_get(HTTPD_CONFIG_SECTION, "ssl key", default_fn);
+
+ snprintfz(default_fn, FILENAME_MAX, "%s/ssl/cert.pem", netdata_configured_user_config_dir);
+ const char *cert_fn = config_get(HTTPD_CONFIG_SECTION, "ssl certificate", default_fn);
+
+#if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110
+ accept_ctx.ssl_ctx = SSL_CTX_new(SSLv23_server_method());
+#else
+ accept_ctx.ssl_ctx = SSL_CTX_new(TLS_server_method());
+#endif
+
+ SSL_CTX_set_options(accept_ctx.ssl_ctx, SSL_OP_NO_SSLv2);
+
+ /* load certificate and private key */
+ if (SSL_CTX_use_PrivateKey_file(accept_ctx.ssl_ctx, key_fn, SSL_FILETYPE_PEM) != 1) {
+ error("Could not load server key from \"%s\"", key_fn);
+ return -1;
+ }
+ if (SSL_CTX_use_certificate_file(accept_ctx.ssl_ctx, cert_fn, SSL_FILETYPE_PEM) != 1) {
+ error("Could not load certificate from \"%s\"", cert_fn);
+ return -1;
+ }
+
+ h2o_ssl_register_alpn_protocols(accept_ctx.ssl_ctx, h2o_http2_alpn_protocols);
+
+ info("SSL support enabled");
+
+ return 0;
+}
+
+// I did not find a way to do wildcard paths to make common handler for urls like:
+// /api/v1/info
+// /host/child/api/v1/info
+// /host/uuid/api/v1/info
+// ideally we could do something like "/*/api/v1/info" subscription
+// so we do it "manually" here with uberhandler
+static inline int _netdata_uberhandler(h2o_req_t *req, RRDHOST **host)
+{
+ if (!h2o_memis(req->method.base, req->method.len, H2O_STRLIT("GET")))
+ return -1;
+
+ static h2o_generator_t generator = { NULL, NULL };
+
+ h2o_iovec_t norm_path = req->path_normalized;
+
+ if (norm_path.len > strlen(HOST_SELECT_PREFIX) && !memcmp(norm_path.base, HOST_SELECT_PREFIX, strlen(HOST_SELECT_PREFIX))) {
+ h2o_iovec_t host_id; // host_id can be either and UUID or a hostname of the child
+
+ norm_path.base += strlen(HOST_SELECT_PREFIX);
+ norm_path.len -= strlen(HOST_SELECT_PREFIX);
+
+ host_id = norm_path;
+
+ size_t end_loc = h2o_strstr(host_id.base, host_id.len, "/", 1);
+ if (end_loc != SIZE_MAX) {
+ host_id.len = end_loc;
+ norm_path.base += end_loc;
+ norm_path.len -= end_loc;
+ }
+
+ char *c_host_id = iovec_to_cstr(&host_id);
+ *host = rrdhost_find_by_hostname(c_host_id);
+ if (!*host)
+ *host = rrdhost_find_by_guid(c_host_id);
+ if (!*host) {
+ req->res.status = HTTP_RESP_BAD_REQUEST;
+ req->res.reason = "Wrong host id";
+ h2o_send_inline(req, H2O_STRLIT("Host id provided was not found!\n"));
+ freez(c_host_id);
+ return 0;
+ }
+ freez(c_host_id);
+
+ // we have to rewrite URL here in case this is not an api call
+ // so that the subsequent file upload handler can send the correct
+ // files to the client
+ // if this is not an API call we will abort this handler later
+ // and let the internal serve file handler of h2o care for things
+
+ if (end_loc == SIZE_MAX) {
+ req->path.len = 1;
+ req->path_normalized.len = 1;
+ } else {
+ size_t offset = norm_path.base - req->path_normalized.base;
+ req->path.len -= offset;
+ req->path.base += offset;
+ req->query_at -= offset;
+ req->path_normalized.len -= offset;
+ req->path_normalized.base += offset;
+ }
+ }
+
+ // workaround for a dashboard bug which causes sometimes urls like
+ // "//api/v1/info" to be caled instead of "/api/v1/info"
+ if (norm_path.len > 2 &&
+ norm_path.base[0] == '/' &&
+ norm_path.base[1] == '/' ) {
+ norm_path.base++;
+ norm_path.len--;
+ }
+
+ size_t api_loc = h2o_strstr(norm_path.base, norm_path.len, H2O_STRLIT(API_V1_PREFIX));
+ if (api_loc == SIZE_MAX)
+ return 1;
+
+ h2o_iovec_t api_command = norm_path;
+ api_command.base += api_loc + strlen(API_V1_PREFIX);
+ api_command.len -= api_loc + strlen(API_V1_PREFIX);
+
+ if (!api_command.len)
+ return 1;
+
+ // this (emulating struct web_client) is a hack and will be removed
+ // in future PRs but needs bigger changes in old http_api_v1
+ // we need to make the web_client_api_request_v1 to be web server
+ // agnostic and remove the old webservers dependency creep into the
+ // individual response generators and thus remove the need to "emulate"
+ // the old webserver calling this function here and in ACLK
+ struct web_client w;
+ w.response.data = buffer_create(NBUF_INITIAL_SIZE_RESP, NULL);
+ w.response.header = buffer_create(NBUF_INITIAL_SIZE_RESP, NULL);
+ w.url_query_string_decoded = buffer_create(NBUF_INITIAL_SIZE_RESP, NULL);
+ w.acl = WEB_CLIENT_ACL_DASHBOARD;
+
+ char *path_c_str = iovec_to_cstr(&api_command);
+ char *path_unescaped = url_unescape(path_c_str);
+ freez(path_c_str);
+
+ IF_HAS_URL_PARAMS(req) {
+ h2o_iovec_t query_params = URL_PARAMS_IOVEC_INIT_WITH_QUESTIONMARK(req);
+ char *query_c_str = iovec_to_cstr(&query_params);
+ char *query_unescaped = url_unescape(query_c_str);
+ freez(query_c_str);
+ buffer_strcat(w.url_query_string_decoded, query_unescaped);
+ freez(query_unescaped);
+ }
+
+ web_client_api_request_v1(*host, &w, path_unescaped);
+ freez(path_unescaped);
+
+ h2o_iovec_t body = buffer_to_h2o_iovec(w.response.data);
+
+ // we move msg body to req->pool managed memory as it has to
+ // live until whole response has been encrypted and sent
+ // when req is finished memory will be freed with the pool
+ void *managed = h2o_mem_alloc_shared(&req->pool, body.len, NULL);
+ memcpy(managed, body.base, body.len);
+ body.base = managed;
+
+ req->res.status = HTTP_RESP_OK;
+ req->res.reason = "OK";
+ if (w.response.data->content_type == CT_APPLICATION_JSON)
+ h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CONTENT_TYPE, NULL, CONTENT_JSON_UTF8);
+ else
+ h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CONTENT_TYPE, NULL, CONTENT_TEXT_UTF8);
+ h2o_start_response(req, &generator);
+ h2o_send(req, &body, 1, H2O_SEND_STATE_FINAL);
+
+ buffer_free(w.response.data);
+ buffer_free(w.response.header);
+ buffer_free(w.url_query_string_decoded);
+
+ return 0;
+}
+
+static int netdata_uberhandler(h2o_handler_t *self, h2o_req_t *req)
+{
+ UNUSED(self);
+ RRDHOST *host = localhost;
+
+ int ret = _netdata_uberhandler(req, &host);
+
+ char host_uuid_str[UUID_STR_LEN];
+ uuid_unparse_lower(host->host_uuid, host_uuid_str);
+
+ if (!ret) {
+ log_access("HTTPD OK method: " PRINTF_H2O_IOVEC_FMT
+ ", path: " PRINTF_H2O_IOVEC_FMT
+ ", as host: %s"
+ ", response: %d",
+ PRINTF_H2O_IOVEC(&req->method),
+ PRINTF_H2O_IOVEC(&req->input.path),
+ host == localhost ? "localhost" : host_uuid_str,
+ req->res.status);
+ } else {
+ log_access("HTTPD %d"
+ " method: " PRINTF_H2O_IOVEC_FMT
+ ", path: " PRINTF_H2O_IOVEC_FMT
+ ", forwarding to file handler as path: " PRINTF_H2O_IOVEC_FMT,
+ ret,
+ PRINTF_H2O_IOVEC(&req->method),
+ PRINTF_H2O_IOVEC(&req->input.path),
+ PRINTF_H2O_IOVEC(&req->path));
+ }
+
+ return ret;
+}
+
+static int hdl_netdata_conf(h2o_handler_t *self, h2o_req_t *req)
+{
+ UNUSED(self);
+ if (!h2o_memis(req->method.base, req->method.len, H2O_STRLIT("GET")))
+ return -1;
+
+ BUFFER *buf = buffer_create(NBUF_INITIAL_SIZE_RESP, NULL);
+ config_generate(buf, 0);
+
+ void *managed = h2o_mem_alloc_shared(&req->pool, buf->len, NULL);
+ memcpy(managed, buf->buffer, buf->len);
+
+ req->res.status = HTTP_RESP_OK;
+ req->res.reason = "OK";
+ h2o_add_header(&req->pool, &req->res.headers, H2O_TOKEN_CONTENT_TYPE, NULL, CONTENT_TEXT_UTF8);
+ h2o_send_inline(req, managed, buf->len);
+ buffer_free(buf);
+
+ return 0;
+}
+
+#define POLL_INTERVAL 100
+
+void *httpd_main(void *ptr) {
+ struct netdata_static_thread *static_thread = (struct netdata_static_thread *)ptr;
+
+ h2o_pathconf_t *pathconf;
+ h2o_hostconf_t *hostconf;
+
+ netdata_thread_disable_cancelability();
+
+ const char *bind_addr = config_get(HTTPD_CONFIG_SECTION, "bind to", "127.0.0.1");
+ int bind_port = config_get_number(HTTPD_CONFIG_SECTION, "port", 19998);
+
+ h2o_config_init(&config);
+ hostconf = h2o_config_register_host(&config, h2o_iovec_init(H2O_STRLIT("default")), bind_port);
+
+ pathconf = h2o_config_register_path(hostconf, "/netdata.conf", 0);
+ h2o_handler_t *handler = h2o_create_handler(pathconf, sizeof(*handler));
+ handler->on_req = hdl_netdata_conf;
+
+ pathconf = h2o_config_register_path(hostconf, "/", 0);
+ handler = h2o_create_handler(pathconf, sizeof(*handler));
+ handler->on_req = netdata_uberhandler;
+ h2o_file_register(pathconf, netdata_configured_web_dir, NULL, NULL, H2O_FILE_FLAG_SEND_COMPRESSED);
+
+ h2o_context_init(&ctx, h2o_evloop_create(), &config);
+
+ if(ssl_init()) {
+ error_report("SSL was requested but could not be properly initialized. Aborting.");
+ return NULL;
+ }
+
+ accept_ctx.ctx = &ctx;
+ accept_ctx.hosts = config.hosts;
+
+ if (create_listener(bind_addr, bind_port) != 0) {
+ error("failed to create listener %s:%d", bind_addr, bind_port);
+ return NULL;
+ }
+
+ while (service_running(SERVICE_HTTPD)) {
+ int rc = h2o_evloop_run(ctx.loop, POLL_INTERVAL);
+ if (rc < 0 && errno != EINTR) {
+ error("h2o_evloop_run returned (%d) with errno other than EINTR. Aborting", rc);
+ break;
+ }
+ }
+
+ static_thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+ return NULL;
+}
+
+int httpd_is_enabled() {
+ return config_get_boolean(HTTPD_CONFIG_SECTION, "enabled", HTTPD_ENABLED_DEFAULT);
+}
diff --git a/httpd/http_server.h b/httpd/http_server.h
new file mode 100644
index 000000000..23b78da83
--- /dev/null
+++ b/httpd/http_server.h
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef HTTP_SERVER_H
+#define HTTP_SERVER_H
+
+void *httpd_main(void * ptr);
+
+int httpd_is_enabled();
+
+#endif /* HTTP_SERVER_H */
diff --git a/libnetdata/buffer/buffer.c b/libnetdata/buffer/buffer.c
index 142fbca14..91bc4dd60 100644
--- a/libnetdata/buffer/buffer.c
+++ b/libnetdata/buffer/buffer.c
@@ -503,3 +503,11 @@ int buffer_unittest(void) {
return errors;
}
+#ifdef ENABLE_HTTPD
+h2o_iovec_t buffer_to_h2o_iovec(BUFFER *wb) {
+ h2o_iovec_t ret;
+ ret.base = wb->buffer;
+ ret.len = wb->len;
+ return ret;
+}
+#endif
diff --git a/libnetdata/buffer/buffer.h b/libnetdata/buffer/buffer.h
index f5f83bc2a..22686a5a1 100644
--- a/libnetdata/buffer/buffer.h
+++ b/libnetdata/buffer/buffer.h
@@ -6,6 +6,10 @@
#include "../string/utf8.h"
#include "../libnetdata.h"
+#ifdef ENABLE_HTTPD
+#include "h2o/memory.h"
+#endif
+
#define WEB_DATA_LENGTH_INCREASE_STEP 1024
#define BUFFER_JSON_MAX_DEPTH 32 // max is 255
@@ -129,6 +133,10 @@ void buffer_char_replace(BUFFER *wb, char from, char to);
void buffer_print_sn_flags(BUFFER *wb, SN_FLAGS flags, bool send_anomaly_bit);
+#ifdef ENABLE_HTTPD
+h2o_iovec_t buffer_to_h2o_iovec(BUFFER *wb);
+#endif
+
static inline void buffer_need_bytes(BUFFER *buffer, size_t needed_free_size) {
if(unlikely(buffer->len + needed_free_size >= buffer->size))
buffer_increase(buffer, needed_free_size + 1);
diff --git a/libnetdata/ebpf/ebpf.c b/libnetdata/ebpf/ebpf.c
index 61833dd73..b980d09ed 100644
--- a/libnetdata/ebpf/ebpf.c
+++ b/libnetdata/ebpf/ebpf.c
@@ -453,6 +453,11 @@ void ebpf_update_stats(ebpf_plugin_stats_t *report, ebpf_module_t *em)
else if (em->load & EBPF_LOAD_CORE)
report->core++;
+ if (em->maps_per_core)
+ report->hash_percpu++;
+ else
+ report->hash_unique++;
+
ebpf_stats_targets(report, em->targets);
}
@@ -596,15 +601,70 @@ void ebpf_update_map_size(struct bpf_map *map, ebpf_local_maps_t *lmap, ebpf_mod
#endif
}
+#ifdef LIBBPF_MAJOR_VERSION
+/**
+ * Update map type
+ *
+ * Update map type with information given.
+ *
+ * @param map the map we want to modify
+ * @param w a structure with user input
+ */
+void ebpf_update_map_type(struct bpf_map *map, ebpf_local_maps_t *w)
+{
+ if (bpf_map__set_type(map, w->map_type)) {
+ error("Cannot modify map type for %s", w->name);
+ }
+}
+
+/**
+ * Define map type
+ *
+ * This PR defines the type used by hash tables according user input.
+ *
+ * @param maps the list of maps used with a hash table.
+ * @param maps_per_core define if map type according user specification.
+ * @param kver kernel version host is running.
+ */
+void ebpf_define_map_type(ebpf_local_maps_t *maps, int maps_per_core, int kver)
+{
+ if (!maps)
+ return;
+
+ // Before kernel 4.06 there was not percpu hash tables
+ if (kver < NETDATA_EBPF_KERNEL_4_06)
+ maps_per_core = CONFIG_BOOLEAN_NO;
+
+ int i = 0;
+ while (maps[i].name) {
+ ebpf_local_maps_t *map = &maps[i];
+ // maps_per_core is a boolean value in configuration files.
+ if (maps_per_core) {
+ if (map->map_type == BPF_MAP_TYPE_HASH)
+ map->map_type = BPF_MAP_TYPE_PERCPU_HASH;
+ else if (map->map_type == BPF_MAP_TYPE_ARRAY)
+ map->map_type = BPF_MAP_TYPE_PERCPU_ARRAY;
+ } else {
+ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH)
+ map->map_type = BPF_MAP_TYPE_HASH;
+ else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
+ map->map_type = BPF_MAP_TYPE_ARRAY;
+ }
+
+ i++;
+ }
+}
+#endif
+
/**
- * Update Legacy map sizes
+ * Update Legacy map
*
- * Update map size for eBPF legacy code.
+ * Update map for eBPF legacy code.
*
* @param program the structure with values read from binary.
* @param em the structure with information about how the module/thread is working.
*/
-static void ebpf_update_legacy_map_sizes(struct bpf_object *program, ebpf_module_t *em)
+static void ebpf_update_legacy_map(struct bpf_object *program, ebpf_module_t *em)
{
struct bpf_map *map;
ebpf_local_maps_t *maps = em->maps;
@@ -614,13 +674,19 @@ static void ebpf_update_legacy_map_sizes(struct bpf_object *program, ebpf_module
bpf_map__for_each(map, program)
{
const char *map_name = bpf_map__name(map);
- int i = 0; ;
+ int i = 0;
while (maps[i].name) {
ebpf_local_maps_t *w = &maps[i];
- if (w->type & NETDATA_EBPF_MAP_RESIZABLE) {
- if (!strcmp(w->name, map_name)) {
+
+ if (!strcmp(w->name, map_name)) {
+ // Modify size
+ if (w->type & NETDATA_EBPF_MAP_RESIZABLE) {
ebpf_update_map_size(map, w, em, map_name);
}
+
+#ifdef LIBBPF_MAJOR_VERSION
+ ebpf_update_map_type(map, w);
+#endif
}
i++;
@@ -790,13 +856,15 @@ struct bpf_link **ebpf_load_program(char *plugins_dir, ebpf_module_t *em, int kv
em->load |= EBPF_LOAD_LEGACY;
*obj = bpf_object__open_file(lpath, NULL);
+ if (!*obj)
+ return NULL;
+
if (libbpf_get_error(obj)) {
- error("Cannot open BPF object %s", lpath);
bpf_object__close(*obj);
return NULL;
}
- ebpf_update_legacy_map_sizes(*obj, em);
+ ebpf_update_legacy_map(*obj, em);
if (bpf_object__load(*obj)) {
error("ERROR: loading BPF object file failed %s\n", lpath);
@@ -1156,8 +1224,8 @@ void ebpf_update_module_using_config(ebpf_module_t *modules, netdata_ebpf_load_m
{
char default_value[EBPF_MAX_MODE_LENGTH + 1];
ebpf_select_mode_string(default_value, EBPF_MAX_MODE_LENGTH, modules->mode);
- char *value = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_LOAD_MODE, default_value);
- modules->mode = ebpf_select_mode(value);
+ char *load_mode = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_LOAD_MODE, default_value);
+ modules->mode = ebpf_select_mode(load_mode);
modules->update_every = (int)appconfig_get_number(modules->cfg, EBPF_GLOBAL_SECTION,
EBPF_CFG_UPDATE_EVERY, modules->update_every);
@@ -1171,19 +1239,38 @@ void ebpf_update_module_using_config(ebpf_module_t *modules, netdata_ebpf_load_m
modules->pid_map_size = (uint32_t)appconfig_get_number(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_PID_SIZE,
modules->pid_map_size);
- value = ebpf_convert_load_mode_to_string(modules->load & NETDATA_EBPF_LOAD_METHODS);
- value = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_TYPE_FORMAT, value);
- netdata_ebpf_load_mode_t load = epbf_convert_string_to_load_mode(value);
+ char *value = ebpf_convert_load_mode_to_string(modules->load & NETDATA_EBPF_LOAD_METHODS);
+ char *type_format = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_TYPE_FORMAT, value);
+ netdata_ebpf_load_mode_t load = epbf_convert_string_to_load_mode(type_format);
load = ebpf_select_load_mode(btf_file, load, kver, is_rh);
modules->load = origin | load;
- value = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_CORE_ATTACH, EBPF_CFG_ATTACH_TRAMPOLINE);
- netdata_ebpf_program_loaded_t fill_lm = ebpf_convert_core_type(value, modules->mode);
+ char *core_attach = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_CORE_ATTACH, EBPF_CFG_ATTACH_TRAMPOLINE);
+ netdata_ebpf_program_loaded_t fill_lm = ebpf_convert_core_type(core_attach, modules->mode);
ebpf_update_target_with_conf(modules, fill_lm);
value = ebpf_convert_collect_pid_to_string(modules->apps_level);
- value = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_COLLECT_PID, value);
- modules->apps_level = ebpf_convert_string_to_apps_level(value);
+ char *collect_pid = appconfig_get(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_COLLECT_PID, value);
+ modules->apps_level = ebpf_convert_string_to_apps_level(collect_pid);
+
+ modules->maps_per_core = appconfig_get_boolean(modules->cfg, EBPF_GLOBAL_SECTION, EBPF_CFG_MAPS_PER_CORE,
+ modules->maps_per_core);
+ if (kver < NETDATA_EBPF_KERNEL_4_06)
+ modules->maps_per_core = CONFIG_BOOLEAN_NO;
+
+#ifdef NETDATA_DEV_MODE
+ info("The thread %s was configured with: mode = %s; update every = %d; apps = %s; cgroup = %s; ebpf type format = %s; ebpf co-re tracing = %s; collect pid = %s; maps per core = %s",
+ modules->thread_name,
+ load_mode,
+ modules->update_every,
+ (modules->apps_charts)?"enabled":"disabled",
+ (modules->cgroup_charts)?"enabled":"disabled",
+ type_format,
+ core_attach,
+ collect_pid,
+ (modules->maps_per_core)?"enabled":"disabled"
+ );
+#endif
}
/**
diff --git a/libnetdata/ebpf/ebpf.h b/libnetdata/ebpf/ebpf.h
index bf5fdc33d..e82aaedd4 100644
--- a/libnetdata/ebpf/ebpf.h
+++ b/libnetdata/ebpf/ebpf.h
@@ -40,6 +40,8 @@
#define EBPF_CFG_PROGRAM_PATH "btf path"
+#define EBPF_CFG_MAPS_PER_CORE "maps per core"
+
#define EBPF_CFG_UPDATE_EVERY "update every"
#define EBPF_CFG_UPDATE_APPS_EVERY_DEFAULT 10
#define EBPF_CFG_PID_SIZE "pid table size"
@@ -77,6 +79,7 @@
*
*/
enum netdata_ebpf_kernel_versions {
+ NETDATA_EBPF_KERNEL_4_06 = 263680, // 264960 = 4 * 65536 + 6 * 256
NETDATA_EBPF_KERNEL_4_11 = 264960, // 264960 = 4 * 65536 + 15 * 256
NETDATA_EBPF_KERNEL_4_14 = 265728, // 264960 = 4 * 65536 + 14 * 256
NETDATA_EBPF_KERNEL_4_15 = 265984, // 265984 = 4 * 65536 + 15 * 256
@@ -196,6 +199,9 @@ typedef struct ebpf_local_maps {
uint32_t user_input;
uint32_t type;
int map_fd;
+#ifdef LIBBPF_MAJOR_VERSION
+ enum bpf_map_type map_type;
+#endif
} ebpf_local_maps_t;
typedef struct ebpf_specify_name {
@@ -243,6 +249,9 @@ typedef struct ebpf_plugin_stats {
uint64_t memlock_kern; // The same information reported by bpftool, but it is not accurated
// https://lore.kernel.org/linux-mm/20230112155326.26902-5-laoar.shao@gmail.com/T/
uint32_t hash_tables; // Number of hash tables used on the system.
+
+ uint32_t hash_percpu; // Number of threads running per cpu maps
+ uint32_t hash_unique; // Number of threads running an unique map for all cores.
} ebpf_plugin_stats_t;
typedef enum ebpf_stats_action {
@@ -296,6 +305,7 @@ typedef struct ebpf_module {
// charts
char memory_usage[NETDATA_EBPF_CHART_MEM_LENGTH];
char memory_allocations[NETDATA_EBPF_CHART_MEM_LENGTH];
+ int maps_per_core;
} ebpf_module_t;
int ebpf_get_kernel_version();
@@ -348,6 +358,7 @@ typedef struct ebpf_filesystem_partitions {
ebpf_addresses_t addresses;
uint64_t kernels;
+ ebpf_local_maps_t *fs_maps;
} ebpf_filesystem_partitions_t;
typedef struct ebpf_sync_syscalls {
@@ -365,6 +376,7 @@ typedef struct ebpf_sync_syscalls {
#else
void *sync_obj;
#endif
+ ebpf_local_maps_t *sync_maps;
} ebpf_sync_syscalls_t;
void ebpf_histogram_dimension_cleanup(char **ptr, size_t length);
@@ -391,6 +403,8 @@ void ebpf_adjust_thread_load(ebpf_module_t *mod, struct btf *file);
struct btf *ebpf_parse_btf_file(const char *filename);
struct btf *ebpf_load_btf_file(char *path, char *filename);
int ebpf_is_function_inside_btf(struct btf *file, char *function);
+void ebpf_update_map_type(struct bpf_map *map, ebpf_local_maps_t *w);
+void ebpf_define_map_type(ebpf_local_maps_t *maps, int maps_per_core, int kver);
#endif
void ebpf_update_kernel_memory_with_vector(ebpf_plugin_stats_t *report, ebpf_local_maps_t *maps);
diff --git a/libnetdata/gorilla/benchmark.sh b/libnetdata/gorilla/benchmark.sh
new file mode 100755
index 000000000..a5d111435
--- /dev/null
+++ b/libnetdata/gorilla/benchmark.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+
+set -exu -o pipefail
+
+clang++ \
+ -std=c++11 -Wall -Wextra \
+ -DENABLE_BENCHMARK -O2 -g \
+ -lbenchmark -lbenchmark_main \
+ -o gorilla_benchmark gorilla.cc
+
+./gorilla_benchmark
diff --git a/libnetdata/gorilla/fuzzer.sh b/libnetdata/gorilla/fuzzer.sh
new file mode 100755
index 000000000..9dfdec055
--- /dev/null
+++ b/libnetdata/gorilla/fuzzer.sh
@@ -0,0 +1,14 @@
+#!/usr/bin/env bash
+#
+# SPDX-License-Identifier: GPL-3.0-or-later
+#
+
+set -exu -o pipefail
+
+clang++ \
+ -std=c++11 -Wall -Wextra \
+ -DENABLE_FUZZER -O2 -g \
+ -fsanitize=fuzzer \
+ -o gorilla_fuzzer gorilla.cc
+
+./gorilla_fuzzer -workers=8 -jobs=8
diff --git a/libnetdata/gorilla/gorilla.cc b/libnetdata/gorilla/gorilla.cc
new file mode 100644
index 000000000..af2f74007
--- /dev/null
+++ b/libnetdata/gorilla/gorilla.cc
@@ -0,0 +1,620 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "gorilla.h"
+
+#include <cassert>
+#include <climits>
+#include <cstdio>
+#include <cstring>
+
+using std::size_t;
+
+template <typename T>
+static constexpr size_t bit_size() noexcept
+{
+ static_assert((sizeof(T) * CHAR_BIT) == 32 || (sizeof(T) * CHAR_BIT) == 64,
+ "Word size should be 32 or 64 bits.");
+ return (sizeof(T) * CHAR_BIT);
+}
+
+/*
+ * Low-level bitstream operations, allowing us to read/write individual bits.
+*/
+
+template<typename Word>
+struct bit_stream_t {
+ Word *buffer;
+ size_t capacity;
+ size_t position;
+};
+
+template<typename Word>
+static bit_stream_t<Word> bit_stream_new(Word *buffer, Word capacity) {
+ bit_stream_t<Word> bs;
+
+ bs.buffer = buffer;
+ bs.capacity = capacity * bit_size<Word>();
+ bs.position = 0;
+
+ return bs;
+}
+
+template<typename Word>
+static bool bit_stream_write(bit_stream_t<Word> *bs, Word value, size_t nbits) {
+ assert(nbits > 0 && nbits <= bit_size<Word>());
+ assert(bs->capacity >= (bs->position + nbits));
+
+ if (bs->position + nbits > bs->capacity) {
+ return false;
+ }
+
+ const size_t index = bs->position / bit_size<Word>();
+ const size_t offset = bs->position % bit_size<Word>();
+ bs->position += nbits;
+
+ if (offset == 0) {
+ bs->buffer[index] = value;
+ } else {
+ const size_t remaining_bits = bit_size<Word>() - offset;
+
+ // write the lower part of the value
+ const Word low_bits_mask = ((Word) 1 << remaining_bits) - 1;
+ const Word lowest_bits_in_value = value & low_bits_mask;
+ bs->buffer[index] |= (lowest_bits_in_value << offset);
+
+ if (nbits > remaining_bits) {
+ // write the upper part of the value
+ const Word high_bits_mask = ~low_bits_mask;
+ const Word highest_bits_in_value = (value & high_bits_mask) >> (remaining_bits);
+ bs->buffer[index + 1] = highest_bits_in_value;
+ }
+ }
+
+ return true;
+}
+
+template<typename Word>
+static bool bit_stream_read(bit_stream_t<Word> *bs, Word *value, size_t nbits) {
+ assert(nbits > 0 && nbits <= bit_size<Word>());
+ assert(bs->capacity >= (bs->position + nbits));
+
+ if (bs->position + nbits > bs->capacity) {
+ return false;
+ }
+
+ const size_t index = bs->position / bit_size<Word>();
+ const size_t offset = bs->position % bit_size<Word>();
+ bs->position += nbits;
+
+ if (offset == 0) {
+ *value = (nbits == bit_size<Word>()) ?
+ bs->buffer[index] :
+ bs->buffer[index] & (((Word) 1 << nbits) - 1);
+ } else {
+ const size_t remaining_bits = bit_size<Word>() - offset;
+
+ // extract the lower part of the value
+ if (nbits < remaining_bits) {
+ *value = (bs->buffer[index] >> offset) & (((Word) 1 << nbits) - 1);
+ } else {
+ *value = (bs->buffer[index] >> offset) & (((Word) 1 << remaining_bits) - 1);
+ nbits -= remaining_bits;
+ *value |= (bs->buffer[index + 1] & (((Word) 1 << nbits) - 1)) << remaining_bits;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * High-level Gorilla codec implementation
+*/
+
+template<typename Word>
+struct bit_code_t {
+ bit_stream_t<Word> bs;
+ Word entries;
+ Word prev_number;
+ Word prev_xor;
+ Word prev_xor_lzc;
+};
+
+template<typename Word>
+static void bit_code_init(bit_code_t<Word> *bc, Word *buffer, Word capacity) {
+ bc->bs = bit_stream_new(buffer, capacity);
+
+ bc->entries = 0;
+ bc->prev_number = 0;
+ bc->prev_xor = 0;
+ bc->prev_xor_lzc = 0;
+
+ // reserved two words:
+ // Buffer[0] -> number of entries written
+ // Buffer[1] -> number of bits written
+
+ bc->bs.position += 2 * bit_size<Word>();
+}
+
+template<typename Word>
+static bool bit_code_read(bit_code_t<Word> *bc, Word *number) {
+ bit_stream_t<Word> *bs = &bc->bs;
+
+ bc->entries++;
+
+ // read the first number
+ if (bc->entries == 1) {
+ bool ok = bit_stream_read(bs, number, bit_size<Word>());
+ bc->prev_number = *number;
+ return ok;
+ }
+
+ // process same-number bit
+ Word is_same_number;
+ if (!bit_stream_read(bs, &is_same_number, 1)) {
+ return false;
+ }
+
+ if (is_same_number) {
+ *number = bc->prev_number;
+ return true;
+ }
+
+ // proceess same-xor-lzc bit
+ Word xor_lzc = bc->prev_xor_lzc;
+
+ Word same_xor_lzc;
+ if (!bit_stream_read(bs, &same_xor_lzc, 1)) {
+ return false;
+ }
+
+ if (!same_xor_lzc) {
+ if (!bit_stream_read(bs, &xor_lzc, (bit_size<Word>() == 32) ? 5 : 6)) {
+ return false;
+ }
+ }
+
+ // process the non-lzc suffix
+ Word xor_value = 0;
+ if (!bit_stream_read(bs, &xor_value, bit_size<Word>() - xor_lzc)) {
+ return false;
+ }
+
+ *number = (bc->prev_number ^ xor_value);
+
+ bc->prev_number = *number;
+ bc->prev_xor_lzc = xor_lzc;
+ bc->prev_xor = xor_value;
+
+ return true;
+}
+
+template<typename Word>
+static bool bit_code_write(bit_code_t<Word> *bc, const Word number) {
+ bit_stream_t<Word> *bs = &bc->bs;
+ Word position = bs->position;
+
+ bc->entries++;
+
+ // this is the first number we are writing
+ if (bc->entries == 1) {
+ bc->prev_number = number;
+ return bit_stream_write(bs, number, bit_size<Word>());
+ }
+
+ // write true/false based on whether we got the same number or not.
+ if (number == bc->prev_number) {
+ return bit_stream_write(bs, static_cast<Word>(1), 1);
+ } else {
+ if (bit_stream_write(bs, static_cast<Word>(0), 1) == false) {
+ return false;
+ }
+ }
+
+ // otherwise:
+ // - compute the non-zero xor
+ // - find its leading-zero count
+
+ Word xor_value = bc->prev_number ^ number;
+ // FIXME: Use SFINAE
+ Word xor_lzc = (bit_size<Word>() == 32) ? __builtin_clz(xor_value) : __builtin_clzll(xor_value);
+ Word is_xor_lzc_same = (xor_lzc == bc->prev_xor_lzc) ? 1 : 0;
+
+ if (is_xor_lzc_same) {
+ // xor-lzc is same
+ if (bit_stream_write(bs, static_cast<Word>(1), 1) == false) {
+ goto RET_FALSE;
+ }
+ } else {
+ // xor-lzc is different
+ if (bit_stream_write(bs, static_cast<Word>(0), 1) == false) {
+ goto RET_FALSE;
+ }
+
+ if (bit_stream_write(bs, xor_lzc, (bit_size<Word>() == 32) ? 5 : 6) == false) {
+ goto RET_FALSE;
+ }
+ }
+
+ // write the bits of the XOR value without the LZC prefix
+ if (bit_stream_write(bs, xor_value, bit_size<Word>() - xor_lzc) == false) {
+ goto RET_FALSE;
+ }
+
+ bc->prev_number = number;
+ bc->prev_xor_lzc = xor_lzc;
+ return true;
+
+RET_FALSE:
+ bc->bs.position = position;
+ return false;
+}
+
+// only valid for writers
+template<typename Word>
+static bool bit_code_flush(bit_code_t<Word> *bc) {
+ bit_stream_t<Word> *bs = &bc->bs;
+
+ Word num_entries_written = bc->entries;
+ Word num_bits_written = bs->position;
+
+ // we want to write these at the beginning
+ bs->position = 0;
+
+ if (!bit_stream_write(bs, num_entries_written, bit_size<Word>())) {
+ return false;
+ }
+
+ if (!bit_stream_write(bs, num_bits_written, bit_size<Word>())) {
+ return false;
+ }
+
+ bs->position = num_bits_written;
+ return true;
+}
+
+// only valid for readers
+template<typename Word>
+static bool bit_code_info(bit_code_t<Word> *bc, Word *num_entries_written,
+ Word *num_bits_written) {
+ bit_stream_t<Word> *bs = &bc->bs;
+
+ assert(bs->position == 2 * bit_size<Word>());
+ if (bs->capacity < (2 * bit_size<Word>())) {
+ return false;
+ }
+
+ if (num_entries_written) {
+ *num_entries_written = bs->buffer[0];
+ }
+ if (num_bits_written) {
+ *num_bits_written = bs->buffer[1];
+ }
+
+ return true;
+}
+
+template<typename Word>
+static size_t gorilla_encode(Word *dst, Word dst_len, const Word *src, Word src_len) {
+ bit_code_t<Word> bcw;
+
+ bit_code_init(&bcw, dst, dst_len);
+
+ for (size_t i = 0; i != src_len; i++) {
+ if (!bit_code_write(&bcw, src[i]))
+ return 0;
+ }
+
+ if (!bit_code_flush(&bcw))
+ return 0;
+
+ return src_len;
+}
+
+template<typename Word>
+static size_t gorilla_decode(Word *dst, Word dst_len, const Word *src, Word src_len) {
+ bit_code_t<Word> bcr;
+
+ bit_code_init(&bcr, (Word *) src, src_len);
+
+ Word num_entries;
+ if (!bit_code_info(&bcr, &num_entries, (Word *) NULL)) {
+ return 0;
+ }
+ if (num_entries > dst_len) {
+ return 0;
+ }
+
+ for (size_t i = 0; i != num_entries; i++) {
+ if (!bit_code_read(&bcr, &dst[i]))
+ return 0;
+ }
+
+ return num_entries;
+}
+
+/*
+ * Low-level public API
+*/
+
+// 32-bit API
+
+void bit_code_writer_u32_init(bit_code_writer_u32_t *bcw, uint32_t *buffer, uint32_t capacity) {
+ bit_code_t<uint32_t> *bc = (bit_code_t<uint32_t> *) bcw;
+ bit_code_init(bc, buffer, capacity);
+}
+
+bool bit_code_writer_u32_write(bit_code_writer_u32_t *bcw, const uint32_t number) {
+ bit_code_t<uint32_t> *bc = (bit_code_t<uint32_t> *) bcw;
+ return bit_code_write(bc, number);
+}
+
+bool bit_code_writer_u32_flush(bit_code_writer_u32_t *bcw) {
+ bit_code_t<uint32_t> *bc = (bit_code_t<uint32_t> *) bcw;
+ return bit_code_flush(bc);
+}
+
+void bit_code_reader_u32_init(bit_code_reader_u32_t *bcr, uint32_t *buffer, uint32_t capacity) {
+ bit_code_t<uint32_t> *bc = (bit_code_t<uint32_t> *) bcr;
+ bit_code_init(bc, buffer, capacity);
+}
+
+bool bit_code_reader_u32_read(bit_code_reader_u32_t *bcr, uint32_t *number) {
+ bit_code_t<uint32_t> *bc = (bit_code_t<uint32_t> *) bcr;
+ return bit_code_read(bc, number);
+}
+
+bool bit_code_reader_u32_info(bit_code_reader_u32_t *bcr, uint32_t *num_entries_written,
+ uint32_t *num_bits_written) {
+ bit_code_t<uint32_t> *bc = (bit_code_t<uint32_t> *) bcr;
+ return bit_code_info(bc, num_entries_written, num_bits_written);
+}
+
+// 64-bit API
+
+void bit_code_writer_u64_init(bit_code_writer_u64_t *bcw, uint64_t *buffer, uint64_t capacity) {
+ bit_code_t<uint64_t> *bc = (bit_code_t<uint64_t> *) bcw;
+ bit_code_init(bc, buffer, capacity);
+}
+
+bool bit_code_writer_u64_write(bit_code_writer_u64_t *bcw, const uint64_t number) {
+ bit_code_t<uint64_t> *bc = (bit_code_t<uint64_t> *) bcw;
+ return bit_code_write(bc, number);
+}
+
+bool bit_code_writer_u64_flush(bit_code_writer_u64_t *bcw) {
+ bit_code_t<uint64_t> *bc = (bit_code_t<uint64_t> *) bcw;
+ return bit_code_flush(bc);
+}
+
+void bit_code_reader_u64_init(bit_code_reader_u64_t *bcr, uint64_t *buffer, uint64_t capacity) {
+ bit_code_t<uint64_t> *bc = (bit_code_t<uint64_t> *) bcr;
+ bit_code_init(bc, buffer, capacity);
+}
+
+bool bit_code_reader_u64_read(bit_code_reader_u64_t *bcr, uint64_t *number) {
+ bit_code_t<uint64_t> *bc = (bit_code_t<uint64_t> *) bcr;
+ return bit_code_read(bc, number);
+}
+
+bool bit_code_reader_u64_info(bit_code_reader_u64_t *bcr, uint64_t *num_entries_written,
+ uint64_t *num_bits_written) {
+ bit_code_t<uint64_t> *bc = (bit_code_t<uint64_t> *) bcr;
+ return bit_code_info(bc, num_entries_written, num_bits_written);
+}
+
+/*
+ * High-level public API
+*/
+
+// 32-bit API
+
+size_t gorilla_encode_u32(uint32_t *dst, size_t dst_len, const uint32_t *src, size_t src_len) {
+ return gorilla_encode(dst, (uint32_t) dst_len, src, (uint32_t) src_len);
+}
+
+size_t gorilla_decode_u32(uint32_t *dst, size_t dst_len, const uint32_t *src, size_t src_len) {
+ return gorilla_decode(dst, (uint32_t) dst_len, src, (uint32_t) src_len);
+}
+
+// 64-bit API
+
+size_t gorilla_encode_u64(uint64_t *dst, size_t dst_len, const uint64_t *src, size_t src_len) {
+ return gorilla_encode(dst, (uint64_t) dst_len, src, (uint64_t) src_len);
+}
+
+size_t gorilla_decode_u64(uint64_t *dst, size_t dst_len, const uint64_t *src, size_t src_len) {
+ return gorilla_decode(dst, (uint64_t) dst_len, src, (uint64_t) src_len);
+}
+
+/*
+ * Internal code used for fuzzing the library
+*/
+
+#ifdef ENABLE_FUZZER
+
+#include <vector>
+
+template<typename Word>
+static std::vector<Word> random_vector(const uint8_t *data, size_t size) {
+ std::vector<Word> V;
+
+ V.reserve(1024);
+
+ while (size >= sizeof(Word)) {
+ size -= sizeof(Word);
+
+ Word w;
+ memcpy(&w, &data[size], sizeof(Word));
+ V.push_back(w);
+ }
+
+ return V;
+}
+
+template<typename Word>
+static void check_equal_buffers(Word *lhs, Word lhs_size, Word *rhs, Word rhs_size) {
+ assert((lhs_size == rhs_size) && "Buffers have different size.");
+
+ for (size_t i = 0; i != lhs_size; i++) {
+ assert((lhs[i] == rhs[i]) && "Buffers differ");
+ }
+}
+
+extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
+ // 32-bit tests
+ {
+ if (Size < 4)
+ return 0;
+
+ std::vector<uint32_t> RandomData = random_vector<uint32_t>(Data, Size);
+ std::vector<uint32_t> EncodedData(10 * RandomData.capacity(), 0);
+ std::vector<uint32_t> DecodedData(10 * RandomData.capacity(), 0);
+
+ size_t num_entries_written = gorilla_encode_u32(EncodedData.data(), EncodedData.size(),
+ RandomData.data(), RandomData.size());
+ size_t num_entries_read = gorilla_decode_u32(DecodedData.data(), DecodedData.size(),
+ EncodedData.data(), EncodedData.size());
+
+ assert(num_entries_written == num_entries_read);
+ check_equal_buffers(RandomData.data(), (uint32_t) RandomData.size(),
+ DecodedData.data(), (uint32_t) RandomData.size());
+ }
+
+ // 64-bit tests
+ {
+ if (Size < 8)
+ return 0;
+
+ std::vector<uint64_t> RandomData = random_vector<uint64_t>(Data, Size);
+ std::vector<uint64_t> EncodedData(10 * RandomData.capacity(), 0);
+ std::vector<uint64_t> DecodedData(10 * RandomData.capacity(), 0);
+
+ size_t num_entries_written = gorilla_encode_u64(EncodedData.data(), EncodedData.size(),
+ RandomData.data(), RandomData.size());
+ size_t num_entries_read = gorilla_decode_u64(DecodedData.data(), DecodedData.size(),
+ EncodedData.data(), EncodedData.size());
+
+ assert(num_entries_written == num_entries_read);
+ check_equal_buffers(RandomData.data(), (uint64_t) RandomData.size(),
+ DecodedData.data(), (uint64_t) RandomData.size());
+ }
+
+ return 0;
+}
+
+#endif /* ENABLE_FUZZER */
+
+#ifdef ENABLE_BENCHMARK
+
+#include <benchmark/benchmark.h>
+#include <random>
+
+static size_t NumItems = 1024;
+
+static void BM_EncodeU32Numbers(benchmark::State& state) {
+ std::random_device rd;
+ std::mt19937 mt(rd());
+ std::uniform_int_distribution<uint32_t> dist(0x0, 0x0000FFFF);
+
+ std::vector<uint32_t> RandomData;
+ for (size_t idx = 0; idx != NumItems; idx++) {
+ RandomData.push_back(dist(mt));
+ }
+ std::vector<uint32_t> EncodedData(10 * RandomData.capacity(), 0);
+
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(
+ gorilla_encode_u32(EncodedData.data(), EncodedData.size(),
+ RandomData.data(), RandomData.size())
+ );
+ benchmark::ClobberMemory();
+ }
+
+ state.SetItemsProcessed(NumItems * state.iterations());
+ state.SetBytesProcessed(NumItems * state.iterations() * sizeof(uint32_t));
+}
+BENCHMARK(BM_EncodeU32Numbers);
+
+static void BM_DecodeU32Numbers(benchmark::State& state) {
+ std::random_device rd;
+ std::mt19937 mt(rd());
+ std::uniform_int_distribution<uint32_t> dist(0x0, 0xFFFFFFFF);
+
+ std::vector<uint32_t> RandomData;
+ for (size_t idx = 0; idx != NumItems; idx++) {
+ RandomData.push_back(dist(mt));
+ }
+ std::vector<uint32_t> EncodedData(10 * RandomData.capacity(), 0);
+ std::vector<uint32_t> DecodedData(10 * RandomData.capacity(), 0);
+
+ gorilla_encode_u32(EncodedData.data(), EncodedData.size(),
+ RandomData.data(), RandomData.size());
+
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(
+ gorilla_decode_u32(DecodedData.data(), DecodedData.size(),
+ EncodedData.data(), EncodedData.size())
+ );
+ benchmark::ClobberMemory();
+ }
+
+ state.SetItemsProcessed(NumItems * state.iterations());
+ state.SetBytesProcessed(NumItems * state.iterations() * sizeof(uint32_t));
+}
+// Register the function as a benchmark
+BENCHMARK(BM_DecodeU32Numbers);
+
+static void BM_EncodeU64Numbers(benchmark::State& state) {
+ std::random_device rd;
+ std::mt19937 mt(rd());
+ std::uniform_int_distribution<uint64_t> dist(0x0, 0x0000FFFF);
+
+ std::vector<uint64_t> RandomData;
+ for (size_t idx = 0; idx != 1024; idx++) {
+ RandomData.push_back(dist(mt));
+ }
+ std::vector<uint64_t> EncodedData(10 * RandomData.capacity(), 0);
+
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(
+ gorilla_encode_u64(EncodedData.data(), EncodedData.size(),
+ RandomData.data(), RandomData.size())
+ );
+ benchmark::ClobberMemory();
+ }
+
+ state.SetItemsProcessed(NumItems * state.iterations());
+ state.SetBytesProcessed(NumItems * state.iterations() * sizeof(uint64_t));
+}
+BENCHMARK(BM_EncodeU64Numbers);
+
+static void BM_DecodeU64Numbers(benchmark::State& state) {
+ std::random_device rd;
+ std::mt19937 mt(rd());
+ std::uniform_int_distribution<uint64_t> dist(0x0, 0xFFFFFFFF);
+
+ std::vector<uint64_t> RandomData;
+ for (size_t idx = 0; idx != 1024; idx++) {
+ RandomData.push_back(dist(mt));
+ }
+ std::vector<uint64_t> EncodedData(10 * RandomData.capacity(), 0);
+ std::vector<uint64_t> DecodedData(10 * RandomData.capacity(), 0);
+
+ gorilla_encode_u64(EncodedData.data(), EncodedData.size(),
+ RandomData.data(), RandomData.size());
+
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(
+ gorilla_decode_u64(DecodedData.data(), DecodedData.size(),
+ EncodedData.data(), EncodedData.size())
+ );
+ benchmark::ClobberMemory();
+ }
+
+ state.SetItemsProcessed(NumItems * state.iterations());
+ state.SetBytesProcessed(NumItems * state.iterations() * sizeof(uint64_t));
+}
+// Register the function as a benchmark
+BENCHMARK(BM_DecodeU64Numbers);
+
+#endif /* ENABLE_BENCHMARK */
diff --git a/libnetdata/gorilla/gorilla.h b/libnetdata/gorilla/gorilla.h
new file mode 100644
index 000000000..12bec42c0
--- /dev/null
+++ b/libnetdata/gorilla/gorilla.h
@@ -0,0 +1,60 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef GORILLA_H
+#define GORILLA_H
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Low-level public API
+*/
+
+// 32-bit API
+
+typedef struct bit_code_writer_u32 bit_code_writer_u32_t;
+typedef struct bit_code_reader_u32 bit_code_reader_u32_t;
+
+void bit_code_writer_u32_init(bit_code_writer_u32_t *bcw, uint32_t *buffer, uint32_t capacity);
+bool bit_code_writer_u32_write(bit_code_writer_u32_t *bcw, const uint32_t number);
+bool bit_code_writer_u32_flush(bit_code_writer_u32_t *bcw);
+
+void bit_code_reader_u32_init(bit_code_reader_u32_t *bcr, uint32_t *buffer, uint32_t capacity);
+bool bit_code_reader_u32_read(bit_code_reader_u32_t *bcr, uint32_t *number);
+bool bit_code_reader_u32_info(bit_code_reader_u32_t *bcr, uint32_t *num_entries_written,
+ uint64_t *num_bits_written);
+
+// 64-bit API
+
+typedef struct bit_code_writer_u64 bit_code_writer_u64_t;
+typedef struct bit_code_reader_u64 bit_code_reader_u64_t;
+
+void bit_code_writer_u64_init(bit_code_writer_u64_t *bcw, uint64_t *buffer, uint64_t capacity);
+bool bit_code_writer_u64_write(bit_code_writer_u64_t *bcw, const uint64_t number);
+bool bit_code_writer_u64_flush(bit_code_writer_u64_t *bcw);
+
+void bit_code_reader_u64_init(bit_code_reader_u64_t *bcr, uint64_t *buffer, uint64_t capacity);
+bool bit_code_reader_u64_read(bit_code_reader_u64_t *bcr, uint64_t *number);
+bool bit_code_reader_u64_info(bit_code_reader_u64_t *bcr, uint64_t *num_entries_written,
+ uint64_t *num_bits_written);
+
+/*
+ * High-level public API
+*/
+
+size_t gorilla_encode_u32(uint32_t *dst, size_t dst_len, const uint32_t *src, size_t src_len);
+size_t gorilla_decode_u32(uint32_t *dst, size_t dst_len, const uint32_t *src, size_t src_len);
+
+size_t gorilla_encode_u64(uint64_t *dst, size_t dst_len, const uint64_t *src, size_t src_len);
+size_t gorilla_decode_u64(uint64_t *dst, size_t dst_len, const uint64_t *src, size_t src_len);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* GORILLA_H */
diff --git a/libnetdata/http/http_defs.h b/libnetdata/http/http_defs.h
new file mode 100644
index 000000000..774ea0b71
--- /dev/null
+++ b/libnetdata/http/http_defs.h
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_HTTP_DEFS_H
+#define NETDATA_HTTP_DEFS_H
+
+// HTTP_CODES 2XX Success
+#define HTTP_RESP_OK 200
+
+// HTTP_CODES 3XX Redirections
+#define HTTP_RESP_MOVED_PERM 301
+#define HTTP_RESP_REDIR_TEMP 307
+#define HTTP_RESP_REDIR_PERM 308
+
+// HTTP_CODES 4XX Client Errors
+#define HTTP_RESP_BAD_REQUEST 400
+#define HTTP_RESP_UNAUTHORIZED 401
+#define HTTP_RESP_FORBIDDEN 403
+#define HTTP_RESP_NOT_FOUND 404
+#define HTTP_RESP_CONFLICT 409
+#define HTTP_RESP_PRECOND_FAIL 412
+#define HTTP_RESP_CONTENT_TOO_LONG 413
+
+// HTTP_CODES 5XX Server Errors
+#define HTTP_RESP_INTERNAL_SERVER_ERROR 500
+#define HTTP_RESP_BACKEND_FETCH_FAILED 503 // 503 is right
+#define HTTP_RESP_SERVICE_UNAVAILABLE 503 // 503 is right
+#define HTTP_RESP_GATEWAY_TIMEOUT 504
+#define HTTP_RESP_BACKEND_RESPONSE_INVALID 591
+
+#endif /* NETDATA_HTTP_DEFS_H */
diff --git a/libnetdata/libjudy/src/JudyL/JudyLTables.c b/libnetdata/libjudy/src/JudyL/JudyLTables.c
new file mode 100644
index 000000000..21c974986
--- /dev/null
+++ b/libnetdata/libjudy/src/JudyL/JudyLTables.c
@@ -0,0 +1,338 @@
+// @(#) From generation tool: $Revision: 4.37 $ $Source: /judy/src/JudyCommon/JudyTables.c $
+// Pregenerated and modified by hand. Do not overwrite!
+
+#include "JudyL.h"
+// Leave the malloc() sizes readable in the binary (via strings(1)):
+#ifdef JU_64BIT
+const char * JudyLMallocSizes = "JudyLMallocSizes = 3, 5, 7, 11, 15, 23, 32, 47, 64, Leaf1 = 13";
+#else // JU_32BIT
+const char * JudyLMallocSizes = "JudyLMallocSizes = 3, 5, 7, 11, 15, 23, 32, 47, 64, Leaf1 = 25";
+#endif // JU_64BIT
+
+#ifdef JU_64BIT
+// object uses 64 words
+// cJU_BITSPERSUBEXPB = 32
+const uint8_t
+j__L_BranchBJPPopToWords[cJU_BITSPERSUBEXPB + 1] =
+{
+ 0,
+ 3, 5, 7, 11, 11, 15, 15, 23,
+ 23, 23, 23, 32, 32, 32, 32, 32,
+ 47, 47, 47, 47, 47, 47, 47, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64
+};
+
+// object uses 15 words
+// cJL_LEAF1_MAXPOP1 = 13
+const uint8_t
+j__L_Leaf1PopToWords[cJL_LEAF1_MAXPOP1 + 1] =
+{
+ 0,
+ 3, 3, 5, 5, 7, 7, 11, 11,
+ 11, 15, 15, 15, 15
+};
+const uint8_t
+j__L_Leaf1Offset[cJL_LEAF1_MAXPOP1 + 1] =
+{
+ 0,
+ 1, 1, 1, 1, 1, 1, 2, 2,
+ 2, 2, 2, 2, 2
+};
+
+// object uses 64 words
+// cJL_LEAF2_MAXPOP1 = 51
+const uint8_t
+j__L_Leaf2PopToWords[cJL_LEAF2_MAXPOP1 + 1] =
+{
+ 0,
+ 3, 3, 5, 5, 7, 11, 11, 11,
+ 15, 15, 15, 15, 23, 23, 23, 23,
+ 23, 23, 32, 32, 32, 32, 32, 32,
+ 32, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64
+};
+const uint8_t
+j__L_Leaf2Offset[cJL_LEAF2_MAXPOP1 + 1] =
+{
+ 0,
+ 1, 1, 1, 1, 2, 3, 3, 3,
+ 3, 3, 3, 3, 5, 5, 5, 5,
+ 5, 5, 7, 7, 7, 7, 7, 7,
+ 7, 10, 10, 10, 10, 10, 10, 10,
+ 10, 10, 10, 10, 10, 13, 13, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 13
+};
+
+// object uses 64 words
+// cJL_LEAF3_MAXPOP1 = 46
+const uint8_t
+j__L_Leaf3PopToWords[cJL_LEAF3_MAXPOP1 + 1] =
+{
+ 0,
+ 3, 3, 5, 7, 7, 11, 11, 11,
+ 15, 15, 23, 23, 23, 23, 23, 23,
+ 32, 32, 32, 32, 32, 32, 32, 47,
+ 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64
+};
+const uint8_t
+j__L_Leaf3Offset[cJL_LEAF3_MAXPOP1 + 1] =
+{
+ 0,
+ 1, 1, 2, 2, 2, 3, 3, 3,
+ 4, 4, 6, 6, 6, 6, 6, 6,
+ 9, 9, 9, 9, 9, 9, 9, 13,
+ 13, 13, 13, 13, 13, 13, 13, 13,
+ 13, 13, 18, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 18, 18
+};
+
+// object uses 63 words
+// cJL_LEAF4_MAXPOP1 = 42
+const uint8_t
+j__L_Leaf4PopToWords[cJL_LEAF4_MAXPOP1 + 1] =
+{
+ 0,
+ 3, 3, 5, 7, 11, 11, 11, 15,
+ 15, 15, 23, 23, 23, 23, 23, 32,
+ 32, 32, 32, 32, 32, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 63,
+ 63, 63, 63, 63, 63, 63, 63, 63,
+ 63, 63
+};
+const uint8_t
+j__L_Leaf4Offset[cJL_LEAF4_MAXPOP1 + 1] =
+{
+ 0,
+ 1, 1, 2, 2, 4, 4, 4, 5,
+ 5, 5, 8, 8, 8, 8, 8, 11,
+ 11, 11, 11, 11, 11, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21
+};
+
+// object uses 64 words
+// cJL_LEAF5_MAXPOP1 = 39
+const uint8_t
+j__L_Leaf5PopToWords[cJL_LEAF5_MAXPOP1 + 1] =
+{
+ 0,
+ 3, 5, 5, 7, 11, 11, 15, 15,
+ 15, 23, 23, 23, 23, 23, 32, 32,
+ 32, 32, 32, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64
+};
+const uint8_t
+j__L_Leaf5Offset[cJL_LEAF5_MAXPOP1 + 1] =
+{
+ 0,
+ 2, 2, 2, 3, 4, 4, 6, 6,
+ 6, 9, 9, 9, 9, 9, 12, 12,
+ 12, 12, 12, 18, 18, 18, 18, 18,
+ 18, 18, 18, 18, 25, 25, 25, 25,
+ 25, 25, 25, 25, 25, 25, 25
+};
+
+// object uses 63 words
+// cJL_LEAF6_MAXPOP1 = 36
+const uint8_t
+j__L_Leaf6PopToWords[cJL_LEAF6_MAXPOP1 + 1] =
+{
+ 0,
+ 3, 5, 7, 7, 11, 11, 15, 15,
+ 23, 23, 23, 23, 23, 32, 32, 32,
+ 32, 32, 47, 47, 47, 47, 47, 47,
+ 47, 47, 63, 63, 63, 63, 63, 63,
+ 63, 63, 63, 63
+};
+const uint8_t
+j__L_Leaf6Offset[cJL_LEAF6_MAXPOP1 + 1] =
+{
+ 0,
+ 1, 3, 3, 3, 5, 5, 6, 6,
+ 10, 10, 10, 10, 10, 14, 14, 14,
+ 14, 14, 20, 20, 20, 20, 20, 20,
+ 20, 20, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27
+};
+
+// object uses 64 words
+// cJL_LEAF7_MAXPOP1 = 34
+const uint8_t
+j__L_Leaf7PopToWords[cJL_LEAF7_MAXPOP1 + 1] =
+{
+ 0,
+ 3, 5, 7, 11, 11, 15, 15, 15,
+ 23, 23, 23, 23, 32, 32, 32, 32,
+ 32, 47, 47, 47, 47, 47, 47, 47,
+ 47, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64
+};
+const uint8_t
+j__L_Leaf7Offset[cJL_LEAF7_MAXPOP1 + 1] =
+{
+ 0,
+ 1, 3, 3, 5, 5, 7, 7, 7,
+ 11, 11, 11, 11, 15, 15, 15, 15,
+ 15, 22, 22, 22, 22, 22, 22, 22,
+ 22, 30, 30, 30, 30, 30, 30, 30,
+ 30, 30
+};
+
+// object uses 63 words
+// cJL_LEAFW_MAXPOP1 = 31
+const uint8_t
+j__L_LeafWPopToWords[cJL_LEAFW_MAXPOP1 + 1] =
+{
+ 0,
+ 3, 5, 7, 11, 11, 15, 15, 23,
+ 23, 23, 23, 32, 32, 32, 32, 47,
+ 47, 47, 47, 47, 47, 47, 47, 63,
+ 63, 63, 63, 63, 63, 63, 63
+};
+const uint8_t
+j__L_LeafWOffset[cJL_LEAFW_MAXPOP1 + 1] =
+{
+ 0,
+ 2, 3, 4, 6, 6, 8, 8, 12,
+ 12, 12, 12, 16, 16, 16, 16, 24,
+ 24, 24, 24, 24, 24, 24, 24, 32,
+ 32, 32, 32, 32, 32, 32, 32
+};
+
+// object uses 64 words
+// cJU_BITSPERSUBEXPL = 64
+const uint8_t
+j__L_LeafVPopToWords[cJU_BITSPERSUBEXPL + 1] =
+{
+ 0,
+ 3, 3, 3, 5, 5, 7, 7, 11,
+ 11, 11, 11, 15, 15, 15, 15, 23,
+ 23, 23, 23, 23, 23, 23, 23, 32,
+ 32, 32, 32, 32, 32, 32, 32, 32,
+ 47, 47, 47, 47, 47, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64
+};
+#else // JU_32BIT
+// object uses 64 words
+// cJU_BITSPERSUBEXPB = 32
+const uint8_t
+j__L_BranchBJPPopToWords[cJU_BITSPERSUBEXPB + 1] =
+{
+ 0,
+ 3, 5, 7, 11, 11, 15, 15, 23,
+ 23, 23, 23, 32, 32, 32, 32, 32,
+ 47, 47, 47, 47, 47, 47, 47, 64,
+ 64, 64, 64, 64, 64, 64, 64, 64
+};
+
+// object uses 32 words
+// cJL_LEAF1_MAXPOP1 = 25
+const uint8_t
+j__L_Leaf1PopToWords[cJL_LEAF1_MAXPOP1 + 1] =
+{
+ 0,
+ 3, 3, 5, 5, 7, 11, 11, 11,
+ 15, 15, 15, 15, 23, 23, 23, 23,
+ 23, 23, 32, 32, 32, 32, 32, 32,
+ 32
+};
+const uint8_t
+j__L_Leaf1Offset[cJL_LEAF1_MAXPOP1 + 1] =
+{
+ 0,
+ 1, 1, 1, 1, 2, 3, 3, 3,
+ 3, 3, 3, 3, 5, 5, 5, 5,
+ 5, 5, 7, 7, 7, 7, 7, 7,
+ 7
+};
+
+// object uses 63 words
+// cJL_LEAF2_MAXPOP1 = 42
+const uint8_t
+j__L_Leaf2PopToWords[cJL_LEAF2_MAXPOP1 + 1] =
+{
+ 0,
+ 3, 3, 5, 7, 11, 11, 11, 15,
+ 15, 15, 23, 23, 23, 23, 23, 32,
+ 32, 32, 32, 32, 32, 47, 47, 47,
+ 47, 47, 47, 47, 47, 47, 47, 63,
+ 63, 63, 63, 63, 63, 63, 63, 63,
+ 63, 63
+};
+const uint8_t
+j__L_Leaf2Offset[cJL_LEAF2_MAXPOP1 + 1] =
+{
+ 0,
+ 1, 1, 2, 2, 4, 4, 4, 5,
+ 5, 5, 8, 8, 8, 8, 8, 11,
+ 11, 11, 11, 11, 11, 16, 16, 16,
+ 16, 16, 16, 16, 16, 16, 16, 21,
+ 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21
+};
+
+// object uses 63 words
+// cJL_LEAF3_MAXPOP1 = 36
+const uint8_t
+j__L_Leaf3PopToWords[cJL_LEAF3_MAXPOP1 + 1] =
+{
+ 0,
+ 3, 5, 7, 7, 11, 11, 15, 15,
+ 23, 23, 23, 23, 23, 32, 32, 32,
+ 32, 32, 47, 47, 47, 47, 47, 47,
+ 47, 47, 63, 63, 63, 63, 63, 63,
+ 63, 63, 63, 63
+};
+const uint8_t
+j__L_Leaf3Offset[cJL_LEAF3_MAXPOP1 + 1] =
+{
+ 0,
+ 1, 3, 3, 3, 5, 5, 6, 6,
+ 10, 10, 10, 10, 10, 14, 14, 14,
+ 14, 14, 20, 20, 20, 20, 20, 20,
+ 20, 20, 27, 27, 27, 27, 27, 27,
+ 27, 27, 27, 27
+};
+
+// object uses 63 words
+// cJL_LEAFW_MAXPOP1 = 31
+const uint8_t
+j__L_LeafWPopToWords[cJL_LEAFW_MAXPOP1 + 1] =
+{
+ 0,
+ 3, 5, 7, 11, 11, 15, 15, 23,
+ 23, 23, 23, 32, 32, 32, 32, 47,
+ 47, 47, 47, 47, 47, 47, 47, 63,
+ 63, 63, 63, 63, 63, 63, 63
+};
+const uint8_t
+j__L_LeafWOffset[cJL_LEAFW_MAXPOP1 + 1] =
+{
+ 0,
+ 2, 3, 4, 6, 6, 8, 8, 12,
+ 12, 12, 12, 16, 16, 16, 16, 24,
+ 24, 24, 24, 24, 24, 24, 24, 32,
+ 32, 32, 32, 32, 32, 32, 32
+};
+
+// object uses 32 words
+// cJU_BITSPERSUBEXPL = 32
+const uint8_t
+j__L_LeafVPopToWords[cJU_BITSPERSUBEXPL + 1] =
+{
+ 0,
+ 3, 3, 3, 5, 5, 7, 7, 11,
+ 11, 11, 11, 15, 15, 15, 15, 23,
+ 23, 23, 23, 23, 23, 23, 23, 32,
+ 32, 32, 32, 32, 32, 32, 32, 32
+};
+#endif // JU_64BIT
diff --git a/libnetdata/libjudy/src/JudyL/JudyLTablesGen.c b/libnetdata/libjudy/src/JudyL/JudyLTablesGen.c
deleted file mode 100644
index ce4b37153..000000000
--- a/libnetdata/libjudy/src/JudyL/JudyLTablesGen.c
+++ /dev/null
@@ -1,301 +0,0 @@
-// Copyright (C) 2000 - 2002 Hewlett-Packard Company
-//
-// This program is free software; you can redistribute it and/or modify it
-// under the term of the GNU Lesser General Public License as published by the
-// Free Software Foundation; either version 2 of the License, or (at your
-// option) any later version.
-//
-// This program is distributed in the hope that it will be useful, but WITHOUT
-// ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
-// FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
-// for more details.
-//
-// You should have received a copy of the GNU Lesser General Public License
-// along with this program; if not, write to the Free Software Foundation,
-// Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
-// _________________
-
-// @(#) $Revision: 4.37.1-netdata $ $Source: JudyTables.c $
-
-#ifndef JU_WIN
-#include <unistd.h> // unavailable on win_*.
-#endif
-
-#include <stdlib.h>
-#include <stdio.h>
-
-#if (! (defined(JUDY1) || defined(JUDYL)))
-#error: One of -DJUDY1 or -DJUDYL must be specified.
-#endif
-
-#define TERMINATOR 999 // terminator for Alloc tables
-
-// define bytes per word
-#ifdef JU_64BIT
-#define BPW 8UL
-#else
-#define BPW 4UL
-#endif
-
-#ifdef JUDY1
-#include "Judy1.h"
-#else
-#include "JudyL.h"
-#endif
-
-FILE *fd;
-
-// Definitions come from header files Judy1.h and JudyL.h:
-
-int AllocSizes[] = ALLOCSIZES;
-
-#define ROUNDUP(BYTES,BPW,OFFSETW) \
- ((((BYTES) + (BPW) - 1) / (BPW)) + (OFFSETW))
-
-
-// ****************************************************************************
-// G E N T A B L E
-//
-// Note: "const" is required for newer compilers.
-
-FUNCTION void GenTable(
- const char * TableName, // name of table string
- const char * TableSize, // dimentioned size string
- int IndexBytes, // bytes per Index
- int LeafSize, // number elements in object
- int ValueBytes, // bytes per Value
- int OffsetWords) // 1 for LEAFW
-{
- int * PAllocSizes = AllocSizes;
- int OWord;
- int CurWord;
- int IWord;
- int ii;
- int BytesOfIndex;
- int BytesOfObject;
- int Index;
- int LastWords;
- int Words [1000] = { 0 };
- int Offset[1000] = { 0 };
- int MaxWords;
-
- MaxWords = ROUNDUP((IndexBytes + ValueBytes) * LeafSize, BPW, OffsetWords);
- Words[0] = 0;
- Offset[0] = 0;
- CurWord = TERMINATOR;
-
-// Walk through all number of Indexes in table:
-
- for (Index = 1; /* null */; ++Index)
- {
-
-// Calculate byte required for next size:
-
- BytesOfIndex = IndexBytes * Index;
- BytesOfObject = (IndexBytes + ValueBytes) * Index;
-
-// Round up and calculate words required for next size:
-
- OWord = ROUNDUP(BytesOfObject, BPW, OffsetWords);
- IWord = ROUNDUP(BytesOfIndex, BPW, OffsetWords);
-
-// Root-level leaves of population of 1 and 2 do not have the 1 word offset:
-
-// Save minimum value of offset:
-
- Offset[Index] = IWord;
-
-// Round up to next available size of words:
-
- while (OWord > *PAllocSizes) PAllocSizes++;
-
- if (Index == LeafSize)
- {
- CurWord = Words[Index] = OWord;
- break;
- }
-// end of available sizes ?
-
- if (*PAllocSizes == TERMINATOR)
- {
- fprintf(stderr, "BUG, in %sPopToWords, sizes not big enough for object\n", TableName);
- exit(1);
- }
-
-// Save words required and last word:
-
- if (*PAllocSizes < MaxWords) { CurWord = Words[Index] = *PAllocSizes; }
- else { CurWord = Words[Index] = MaxWords; }
-
- } // for each index
-
- LastWords = TERMINATOR;
-
-// Round up to largest size in each group of malloc sizes:
-
- for (ii = LeafSize; ii > 0; ii--)
- {
- if (LastWords > (Words[ii] - ii)) LastWords = Offset[ii];
- else Offset[ii] = LastWords;
- }
-
-// Print the PopToWords[] table:
-
- fprintf(fd,"\n//\tobject uses %d words\n", CurWord);
- fprintf(fd,"//\t%s = %d\n", TableSize, LeafSize);
-
- fprintf(fd,"const uint8_t\n");
- fprintf(fd,"%sPopToWords[%s + 1] =\n", TableName, TableSize);
- fprintf(fd,"{\n\t 0,");
-
- for (ii = 1; ii <= LeafSize; ii++)
- {
-
-// 8 columns per line, starting with 1:
-
- if ((ii % 8) == 1) fprintf(fd,"\n\t");
-
- fprintf(fd,"%2d", Words[ii]);
-
-// If not last number place comma:
-
- if (ii != LeafSize) fprintf(fd,", ");
- }
- fprintf(fd,"\n};\n");
-
-// Print the Offset table if needed:
-
- if (! ValueBytes) return;
-
- fprintf(fd,"const uint8_t\n");
- fprintf(fd,"%sOffset[%s + 1] =\n", TableName, TableSize);
- fprintf(fd,"{\n");
- fprintf(fd,"\t 0,");
-
- for (ii = 1; ii <= LeafSize; ii++)
- {
- if ((ii % 8) == 1) fprintf(fd,"\n\t");
-
- fprintf(fd,"%2d", Offset[ii]);
-
- if (ii != LeafSize) fprintf(fd,", ");
- }
- fprintf(fd,"\n};\n");
-
-} // GenTable()
-
-
-// ****************************************************************************
-// M A I N
-
-FUNCTION int main()
-{
- int ii;
-
-#ifdef JUDY1
- char *fname = "Judy1Tables.c";
-#else
- char *fname = "JudyLTables.c";
-#endif
-
- if ((fd = fopen(fname, "w")) == NULL){
- perror("FATAL ERROR: could not write to Judy[1L]Tables.c file\n");
- return (-1);
- }
-
-
- fprintf(fd,"// @(#) From generation tool: $Revision: 4.37.1-netdata $ $Source: JudyTables.c $\n");
- fprintf(fd,"//\n\n");
-
-
-// ================================ Judy1 =================================
-#ifdef JUDY1
-
- fprintf(fd,"#include \"Judy1.h\"\n");
-
- fprintf(fd,"// Leave the malloc() sizes readable in the binary (via "
- "strings(1)):\n");
- fprintf(fd,"const char * Judy1MallocSizes = \"Judy1MallocSizes =");
-
- for (ii = 0; AllocSizes[ii] != TERMINATOR; ii++)
- fprintf(fd," %d,", AllocSizes[ii]);
-
-#ifndef JU_64BIT
- fprintf(fd," Leaf1 = %d\";\n\n", cJ1_LEAF1_MAXPOP1);
-#else
- fprintf(fd,"\";\n\n"); // no Leaf1 in this case.
-#endif
-
-// ================================ 32 bit ================================
-#ifndef JU_64BIT
-
- GenTable("j__1_BranchBJP","cJU_BITSPERSUBEXPB", 8, cJU_BITSPERSUBEXPB,0,0);
-
- GenTable("j__1_Leaf1", "cJ1_LEAF1_MAXPOP1", 1, cJ1_LEAF1_MAXPOP1, 0, 0);
- GenTable("j__1_Leaf2", "cJ1_LEAF2_MAXPOP1", 2, cJ1_LEAF2_MAXPOP1, 0, 0);
- GenTable("j__1_Leaf3", "cJ1_LEAF3_MAXPOP1", 3, cJ1_LEAF3_MAXPOP1, 0, 0);
- GenTable("j__1_LeafW", "cJ1_LEAFW_MAXPOP1", 4, cJ1_LEAFW_MAXPOP1, 0, 1);
-
-#endif
-
-// ================================ 64 bit ================================
-#ifdef JU_64BIT
- GenTable("j__1_BranchBJP","cJU_BITSPERSUBEXPB",16, cJU_BITSPERSUBEXPB,0,0);
-
- GenTable("j__1_Leaf2", "cJ1_LEAF2_MAXPOP1", 2, cJ1_LEAF2_MAXPOP1, 0, 0);
- GenTable("j__1_Leaf3", "cJ1_LEAF3_MAXPOP1", 3, cJ1_LEAF3_MAXPOP1, 0, 0);
- GenTable("j__1_Leaf4", "cJ1_LEAF4_MAXPOP1", 4, cJ1_LEAF4_MAXPOP1, 0, 0);
- GenTable("j__1_Leaf5", "cJ1_LEAF5_MAXPOP1", 5, cJ1_LEAF5_MAXPOP1, 0, 0);
- GenTable("j__1_Leaf6", "cJ1_LEAF6_MAXPOP1", 6, cJ1_LEAF6_MAXPOP1, 0, 0);
- GenTable("j__1_Leaf7", "cJ1_LEAF7_MAXPOP1", 7, cJ1_LEAF7_MAXPOP1, 0, 0);
- GenTable("j__1_LeafW", "cJ1_LEAFW_MAXPOP1", 8, cJ1_LEAFW_MAXPOP1, 0, 1);
-#endif
-#endif // JUDY1
-
-
-// ================================ JudyL =================================
-#ifdef JUDYL
-
- fprintf(fd,"#include \"JudyL.h\"\n");
-
- fprintf(fd,"// Leave the malloc() sizes readable in the binary (via "
- "strings(1)):\n");
- fprintf(fd,"const char * JudyLMallocSizes = \"JudyLMallocSizes =");
-
- for (ii = 0; AllocSizes[ii] != TERMINATOR; ii++)
- fprintf(fd," %d,", AllocSizes[ii]);
-
- fprintf(fd," Leaf1 = %ld\";\n\n", (Word_t)cJL_LEAF1_MAXPOP1);
-
-#ifndef JU_64BIT
-// ================================ 32 bit ================================
- GenTable("j__L_BranchBJP","cJU_BITSPERSUBEXPB", 8, cJU_BITSPERSUBEXPB, 0,0);
-
- GenTable("j__L_Leaf1", "cJL_LEAF1_MAXPOP1", 1, cJL_LEAF1_MAXPOP1, BPW,0);
- GenTable("j__L_Leaf2", "cJL_LEAF2_MAXPOP1", 2, cJL_LEAF2_MAXPOP1, BPW,0);
- GenTable("j__L_Leaf3", "cJL_LEAF3_MAXPOP1", 3, cJL_LEAF3_MAXPOP1, BPW,0);
- GenTable("j__L_LeafW", "cJL_LEAFW_MAXPOP1", 4, cJL_LEAFW_MAXPOP1, BPW,1);
- GenTable("j__L_LeafV", "cJU_BITSPERSUBEXPL", 4, cJU_BITSPERSUBEXPL, 0,0);
-#endif // 32 BIT
-
-#ifdef JU_64BIT
-// ================================ 64 bit ================================
- GenTable("j__L_BranchBJP","cJU_BITSPERSUBEXPB",16, cJU_BITSPERSUBEXPB, 0,0);
-
- GenTable("j__L_Leaf1", "cJL_LEAF1_MAXPOP1", 1, cJL_LEAF1_MAXPOP1, BPW,0);
- GenTable("j__L_Leaf2", "cJL_LEAF2_MAXPOP1", 2, cJL_LEAF2_MAXPOP1, BPW,0);
- GenTable("j__L_Leaf3", "cJL_LEAF3_MAXPOP1", 3, cJL_LEAF3_MAXPOP1, BPW,0);
- GenTable("j__L_Leaf4", "cJL_LEAF4_MAXPOP1", 4, cJL_LEAF4_MAXPOP1, BPW,0);
- GenTable("j__L_Leaf5", "cJL_LEAF5_MAXPOP1", 5, cJL_LEAF5_MAXPOP1, BPW,0);
- GenTable("j__L_Leaf6", "cJL_LEAF6_MAXPOP1", 6, cJL_LEAF6_MAXPOP1, BPW,0);
- GenTable("j__L_Leaf7", "cJL_LEAF7_MAXPOP1", 7, cJL_LEAF7_MAXPOP1, BPW,0);
- GenTable("j__L_LeafW", "cJL_LEAFW_MAXPOP1", 8, cJL_LEAFW_MAXPOP1, BPW,1);
- GenTable("j__L_LeafV", "cJU_BITSPERSUBEXPL", 8, cJU_BITSPERSUBEXPL, 0,0);
-#endif // 64 BIT
-
-#endif // JUDYL
- fclose(fd);
-
- return(0);
-
-} // main()
diff --git a/libnetdata/libnetdata.h b/libnetdata/libnetdata.h
index c24494930..062d8c6fa 100644
--- a/libnetdata/libnetdata.h
+++ b/libnetdata/libnetdata.h
@@ -176,9 +176,7 @@ extern "C" {
#include <stdint.h>
#endif
-#ifdef NETDATA_WITH_ZLIB
#include <zlib.h>
-#endif
#ifdef HAVE_CAPABILITY
#include <sys/capability.h>
@@ -667,6 +665,8 @@ extern char *netdata_configured_host_prefix;
#include "worker_utilization/worker_utilization.h"
#include "parser/parser.h"
#include "yaml.h"
+#include "http/http_defs.h"
+#include "gorilla/gorilla.h"
// BEWARE: this exists in alarm-notify.sh
#define DEFAULT_CLOUD_BASE_URL "https://app.netdata.cloud"
diff --git a/libnetdata/parser/parser.c b/libnetdata/parser/parser.c
index c3eebcd16..80c9a2639 100644
--- a/libnetdata/parser/parser.c
+++ b/libnetdata/parser/parser.c
@@ -1,4 +1,6 @@
// SPDX-License-Identifier: GPL-3.0-or-later
+#include <poll.h>
+#include <stdio.h>
#include "parser.h"
#include "collectors/plugins.d/pluginsd_parser.h"
@@ -124,26 +126,77 @@ void parser_destroy(PARSER *parser)
*
*/
-int parser_next(PARSER *parser, char *buffer, size_t buffer_size)
-{
- char *tmp = fgets(buffer, (int)buffer_size, (FILE *)parser->fp_input);
+typedef enum {
+ PARSER_FGETS_RESULT_OK,
+ PARSER_FGETS_RESULT_TIMEOUT,
+ PARSER_FGETS_RESULT_ERROR,
+ PARSER_FGETS_RESULT_EOF,
+} PARSER_FGETS_RESULT;
+
+static inline PARSER_FGETS_RESULT parser_fgets(char *s, int size, FILE *stream) {
+ errno = 0;
+
+ struct pollfd fds[1];
+ int timeout_msecs = 2 * 60 * MSEC_PER_SEC;
+
+ fds[0].fd = fileno(stream);
+ fds[0].events = POLLIN;
+
+ int ret = poll(fds, 1, timeout_msecs);
- if (unlikely(!tmp)) {
- if (feof((FILE *)parser->fp_input))
- error("PARSER: read failed: end of file");
+ if (ret > 0) {
+ /* There is data to read */
+ if (fds[0].revents & POLLIN) {
+ char *tmp = fgets(s, size, stream);
- else if (ferror((FILE *)parser->fp_input))
- error("PARSER: read failed: input error");
+ if(unlikely(!tmp)) {
+ if (feof(stream)) {
+ error("PARSER: read failed: end of file.");
+ return PARSER_FGETS_RESULT_EOF;
+ }
- else
- error("PARSER: read failed: unknown error");
+ else if (ferror(stream)) {
+ error("PARSER: read failed: input error.");
+ return PARSER_FGETS_RESULT_ERROR;
+ }
- return 1;
+ error("PARSER: read failed: unknown error.");
+ return PARSER_FGETS_RESULT_ERROR;
+ }
+
+ return PARSER_FGETS_RESULT_OK;
+ }
+ else if(fds[0].revents & POLLERR) {
+ error("PARSER: read failed: POLLERR.");
+ return PARSER_FGETS_RESULT_ERROR;
+ }
+ else if(fds[0].revents & POLLHUP) {
+ error("PARSER: read failed: POLLHUP.");
+ return PARSER_FGETS_RESULT_ERROR;
+ }
+ else if(fds[0].revents & POLLNVAL) {
+ error("PARSER: read failed: POLLNVAL.");
+ return PARSER_FGETS_RESULT_ERROR;
+ }
+
+ error("PARSER: poll() returned positive number, but POLLIN|POLLERR|POLLHUP|POLLNVAL are not set.");
+ return PARSER_FGETS_RESULT_ERROR;
+ }
+ else if (ret == 0) {
+ error("PARSER: timeout while waiting for data.");
+ return PARSER_FGETS_RESULT_TIMEOUT;
}
- return 0;
+ error("PARSER: poll() failed with code %d.", ret);
+ return PARSER_FGETS_RESULT_ERROR;
}
+int parser_next(PARSER *parser, char *buffer, size_t buffer_size) {
+ if(likely(parser_fgets(buffer, (int)buffer_size, (FILE *)parser->fp_input) == PARSER_FGETS_RESULT_OK))
+ return 0;
+
+ return 1;
+}
/*
* Takes an initialized parser object that has an unprocessed entry (by calling parser_next)
@@ -202,7 +255,6 @@ inline int parser_action(PARSER *parser, char *input)
else
rc = PARSER_RC_ERROR;
-#ifdef NETDATA_INTERNAL_CHECKS
if(rc == PARSER_RC_ERROR) {
BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL);
for(size_t i = 0; i < num_words ;i++) {
@@ -214,12 +266,11 @@ inline int parser_action(PARSER *parser, char *input)
buffer_fast_strcat(wb, "\"", 1);
}
- internal_error(true, "PLUGINSD: parser_action('%s') failed on line %zu: { %s } (quotes added to show parsing)",
+ error("PLUGINSD: parser_action('%s') failed on line %zu: { %s } (quotes added to show parsing)",
command, parser->line, buffer_tostring(wb));
buffer_free(wb);
}
-#endif
return (rc == PARSER_RC_ERROR || rc == PARSER_RC_STOP);
}
diff --git a/libnetdata/parser/parser.h b/libnetdata/parser/parser.h
index 9e0d3480d..c21cbaf7e 100644
--- a/libnetdata/parser/parser.h
+++ b/libnetdata/parser/parser.h
@@ -44,7 +44,7 @@ typedef struct parser {
FILE *fp_input; // Input source e.g. stream
FILE *fp_output; // Stream to send commands to plugin
#ifdef ENABLE_HTTPS
- struct netdata_ssl *ssl_output;
+ NETDATA_SSL *ssl_output;
#endif
void *user; // User defined structure to hold extra state between calls
uint32_t flags;
diff --git a/libnetdata/popen/popen.c b/libnetdata/popen/popen.c
index 5ed74ae95..783c74a51 100644
--- a/libnetdata/popen/popen.c
+++ b/libnetdata/popen/popen.c
@@ -5,11 +5,13 @@
// ----------------------------------------------------------------------------
// popen with tracking
-static pthread_mutex_t netdata_popen_tracking_mutex;
-static bool netdata_popen_tracking_enabled = false;
+static pthread_mutex_t netdata_popen_tracking_mutex = NETDATA_MUTEX_INITIALIZER;
struct netdata_popen {
pid_t pid;
+ bool reaped;
+ siginfo_t infop;
+ int waitid_ret;
struct netdata_popen *next;
struct netdata_popen *prev;
};
@@ -18,29 +20,20 @@ static struct netdata_popen *netdata_popen_root = NULL;
// myp_add_lock takes the lock if we're tracking.
static void netdata_popen_tracking_lock(void) {
- if(!netdata_popen_tracking_enabled)
- return;
-
netdata_mutex_lock(&netdata_popen_tracking_mutex);
}
// myp_add_unlock release the lock if we're tracking.
static void netdata_popen_tracking_unlock(void) {
- if(!netdata_popen_tracking_enabled)
- return;
-
netdata_mutex_unlock(&netdata_popen_tracking_mutex);
}
// myp_add_locked adds pid if we're tracking.
// myp_add_lock must have been called previously.
static void netdata_popen_tracking_add_pid_unsafe(pid_t pid) {
- if(!netdata_popen_tracking_enabled)
- return;
-
struct netdata_popen *mp;
- mp = mallocz(sizeof(struct netdata_popen));
+ mp = callocz(1, sizeof(struct netdata_popen));
mp->pid = pid;
DOUBLE_LINKED_LIST_PREPEND_ITEM_UNSAFE(netdata_popen_root, mp, prev, next);
@@ -48,12 +41,9 @@ static void netdata_popen_tracking_add_pid_unsafe(pid_t pid) {
// myp_del deletes pid if we're tracking.
static void netdata_popen_tracking_del_pid(pid_t pid) {
- if(!netdata_popen_tracking_enabled)
- return;
-
struct netdata_popen *mp;
- netdata_mutex_lock(&netdata_popen_tracking_mutex);
+ netdata_popen_tracking_lock();
DOUBLE_LINKED_LIST_FOREACH_FORWARD(netdata_popen_root, mp, prev, next) {
if(unlikely(mp->pid == pid))
@@ -65,34 +55,15 @@ static void netdata_popen_tracking_del_pid(pid_t pid) {
freez(mp);
}
else
- error("Cannot find pid %d.", pid);
-
- netdata_mutex_unlock(&netdata_popen_tracking_mutex);
-}
+ error("POPEN: Cannot find pid %d.", pid);
-// netdata_popen_tracking_init() should be called by apps which act as init
-// (pid 1) so that processes created by mypopen and mypopene
-// are tracked. This enables the reaper to ignore processes
-// which will be handled internally, by calling myp_reap, to
-// avoid issues with already reaped processes during wait calls.
-//
-// Callers should call myp_free() to clean up resources.
-void netdata_popen_tracking_init(void) {
- info("process tracking enabled.");
- netdata_popen_tracking_enabled = true;
-
- if (netdata_mutex_init(&netdata_popen_tracking_mutex) != 0)
- fatal("netdata_popen_tracking_init() mutex init failed.");
+ netdata_popen_tracking_unlock();
}
// myp_free cleans up any resources allocated for process
// tracking.
void netdata_popen_tracking_cleanup(void) {
- if(!netdata_popen_tracking_enabled)
- return;
-
- netdata_mutex_lock(&netdata_popen_tracking_mutex);
- netdata_popen_tracking_enabled = false;
+ netdata_popen_tracking_lock();
while(netdata_popen_root) {
struct netdata_popen *mp = netdata_popen_root;
@@ -100,26 +71,45 @@ void netdata_popen_tracking_cleanup(void) {
freez(mp);
}
- netdata_mutex_unlock(&netdata_popen_tracking_mutex);
+ netdata_popen_tracking_unlock();
}
-// myp_reap returns 1 if pid should be reaped, 0 otherwise.
-int netdata_popen_tracking_pid_shoud_be_reaped(pid_t pid) {
- if(!netdata_popen_tracking_enabled)
- return 0;
+int netdata_waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options) {
+ struct netdata_popen *mp = NULL;
- netdata_mutex_lock(&netdata_popen_tracking_mutex);
+ if(idtype == P_PID && id != 0) {
+ // the caller is asking to waitid() for a specific child pid
- int ret = 1;
- struct netdata_popen *mp;
- DOUBLE_LINKED_LIST_FOREACH_FORWARD(netdata_popen_root, mp, prev, next) {
- if(unlikely(mp->pid == pid)) {
- ret = 0;
- break;
+ netdata_popen_tracking_lock();
+ DOUBLE_LINKED_LIST_FOREACH_FORWARD(netdata_popen_root, mp, prev, next) {
+ if(unlikely(mp->pid == (pid_t)id))
+ break;
}
+
+ if(!mp)
+ netdata_popen_tracking_unlock();
}
- netdata_mutex_unlock(&netdata_popen_tracking_mutex);
+ int ret;
+ if(mp && mp->reaped) {
+ // we have already reaped this child
+ ret = mp->waitid_ret;
+ *infop = mp->infop;
+ }
+ else {
+ // we haven't reaped this child yet
+ ret = waitid(idtype, id, infop, options);
+
+ if(mp && !mp->reaped) {
+ mp->reaped = true;
+ mp->infop = *infop;
+ mp->waitid_ret = ret;
+ }
+ }
+
+ if(mp)
+ netdata_popen_tracking_unlock();
+
return ret;
}
@@ -404,7 +394,7 @@ int netdata_pclose(FILE *fp_child_input, FILE *fp_child_output, pid_t pid) {
errno = 0;
- ret = waitid(P_PID, (id_t) pid, &info, WEXITED);
+ ret = netdata_waitid(P_PID, (id_t) pid, &info, WEXITED);
netdata_popen_tracking_del_pid(pid);
if (ret != -1) {
@@ -415,8 +405,12 @@ int netdata_pclose(FILE *fp_child_input, FILE *fp_child_output, pid_t pid) {
return(info.si_status);
case CLD_KILLED:
- if(info.si_status == 15) {
- info("child pid %d killed by signal %d.", info.si_pid, info.si_status);
+ if(info.si_status == SIGTERM) {
+ info("child pid %d killed by SIGTERM", info.si_pid);
+ return(0);
+ }
+ else if(info.si_status == SIGPIPE) {
+ info("child pid %d killed by SIGPIPE.", info.si_pid);
return(0);
}
else {
@@ -450,7 +444,3 @@ int netdata_pclose(FILE *fp_child_input, FILE *fp_child_output, pid_t pid) {
return 0;
}
-
-int netdata_spawn_waitpid(pid_t pid) {
- return netdata_pclose(NULL, NULL, pid);
-}
diff --git a/libnetdata/popen/popen.h b/libnetdata/popen/popen.h
index c57a35a4e..4f86158bc 100644
--- a/libnetdata/popen/popen.h
+++ b/libnetdata/popen/popen.h
@@ -28,13 +28,6 @@ int netdata_popene_variadic_internal_dont_use_directly(volatile pid_t *pidptr, c
int netdata_pclose(FILE *fp_child_input, FILE *fp_child_output, pid_t pid);
int netdata_spawn(const char *command, volatile pid_t *pidptr);
-int netdata_spawn_waitpid(pid_t pid);
-
-void netdata_popen_tracking_init(void);
-void netdata_popen_tracking_cleanup(void);
-int netdata_popen_tracking_pid_shoud_be_reaped(pid_t pid);
-
-void signals_unblock(void);
-void signals_reset(void);
+int netdata_waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options);
#endif /* NETDATA_POPEN_H */
diff --git a/libnetdata/simple_pattern/README.md b/libnetdata/simple_pattern/README.md
index e00006d37..5f56a3af7 100644
--- a/libnetdata/simple_pattern/README.md
+++ b/libnetdata/simple_pattern/README.md
@@ -16,7 +16,7 @@ to use, write and understand.
So, Netdata supports **simple patterns**.
Simple patterns are a space separated list of words, that can have `*`
-as a wildcard. Each world may use any number of `*`. Simple patterns
+as a wildcard. Each word may use any number of `*`. Simple patterns
allow **negative** matches by prefixing a word with `!`.
So, `pattern = !*bad* *` will match anything, except all those that
diff --git a/libnetdata/socket/security.c b/libnetdata/socket/security.c
index 7c5092150..d1181ad5f 100644
--- a/libnetdata/socket/security.c
+++ b/libnetdata/socket/security.c
@@ -3,13 +3,389 @@
#ifdef ENABLE_HTTPS
SSL_CTX *netdata_ssl_exporting_ctx =NULL;
-SSL_CTX *netdata_ssl_client_ctx =NULL;
-SSL_CTX *netdata_ssl_srv_ctx =NULL;
+SSL_CTX *netdata_ssl_streaming_sender_ctx =NULL;
+SSL_CTX *netdata_ssl_web_server_ctx =NULL;
const char *netdata_ssl_security_key =NULL;
const char *netdata_ssl_security_cert =NULL;
const char *tls_version=NULL;
const char *tls_ciphers=NULL;
-int netdata_ssl_validate_server = NETDATA_SSL_VALID_CERTIFICATE;
+bool netdata_ssl_validate_certificate = true;
+bool netdata_ssl_validate_certificate_sender = true;
+
+static SOCKET_PEERS netdata_ssl_peers(NETDATA_SSL *ssl) {
+ int sock_fd;
+
+ if(unlikely(!ssl->conn))
+ sock_fd = -1;
+ else
+ sock_fd = SSL_get_rfd(ssl->conn);
+
+ return socket_peers(sock_fd);
+}
+
+bool netdata_ssl_open(NETDATA_SSL *ssl, SSL_CTX *ctx, int fd) {
+ errno = 0;
+ ssl->ssl_errno = 0;
+
+ if(ssl->conn) {
+ if(!ctx || SSL_get_SSL_CTX(ssl->conn) != ctx) {
+ SSL_free(ssl->conn);
+ ssl->conn = NULL;
+ }
+ else if (SSL_clear(ssl->conn) == 0) {
+ netdata_ssl_log_error_queue("SSL_clear", ssl);
+ SSL_free(ssl->conn);
+ ssl->conn = NULL;
+ }
+ }
+
+ if(!ssl->conn) {
+ if(!ctx) {
+ internal_error(true, "SSL: not CTX given");
+ ssl->state = NETDATA_SSL_STATE_FAILED;
+ return false;
+ }
+
+ ssl->conn = SSL_new(ctx);
+ if (!ssl->conn) {
+ netdata_ssl_log_error_queue("SSL_new", ssl);
+ ssl->state = NETDATA_SSL_STATE_FAILED;
+ return false;
+ }
+ }
+
+ if(SSL_set_fd(ssl->conn, fd) != 1) {
+ netdata_ssl_log_error_queue("SSL_set_fd", ssl);
+ ssl->state = NETDATA_SSL_STATE_FAILED;
+ return false;
+ }
+
+ ssl->state = NETDATA_SSL_STATE_INIT;
+
+ ERR_clear_error();
+
+ return true;
+}
+
+void netdata_ssl_close(NETDATA_SSL *ssl) {
+ errno = 0;
+ ssl->ssl_errno = 0;
+
+ if(ssl->conn) {
+ if(SSL_connection(ssl)) {
+ int ret = SSL_shutdown(ssl->conn);
+ if(ret == 0)
+ SSL_shutdown(ssl->conn);
+ }
+
+ SSL_free(ssl->conn);
+
+ ERR_clear_error();
+ }
+
+ *ssl = NETDATA_SSL_UNSET_CONNECTION;
+}
+
+void netdata_ssl_log_error_queue(const char *call, NETDATA_SSL *ssl) {
+ error_limit_static_thread_var(erl, 1, 0);
+ unsigned long err;
+ while((err = ERR_get_error())) {
+ char *code;
+
+ switch (err) {
+ case SSL_ERROR_NONE:
+ code = "SSL_ERROR_NONE";
+ break;
+
+ case SSL_ERROR_SSL:
+ code = "SSL_ERROR_SSL";
+ ssl->state = NETDATA_SSL_STATE_FAILED;
+ break;
+
+ case SSL_ERROR_WANT_READ:
+ code = "SSL_ERROR_WANT_READ";
+ break;
+
+ case SSL_ERROR_WANT_WRITE:
+ code = "SSL_ERROR_WANT_WRITE";
+ break;
+
+ case SSL_ERROR_WANT_X509_LOOKUP:
+ code = "SSL_ERROR_WANT_X509_LOOKUP";
+ break;
+
+ case SSL_ERROR_SYSCALL:
+ code = "SSL_ERROR_SYSCALL";
+ ssl->state = NETDATA_SSL_STATE_FAILED;
+ break;
+
+ case SSL_ERROR_ZERO_RETURN:
+ code = "SSL_ERROR_ZERO_RETURN";
+ break;
+
+ case SSL_ERROR_WANT_CONNECT:
+ code = "SSL_ERROR_WANT_CONNECT";
+ break;
+
+ case SSL_ERROR_WANT_ACCEPT:
+ code = "SSL_ERROR_WANT_ACCEPT";
+ break;
+
+#ifdef SSL_ERROR_WANT_ASYNC
+ case SSL_ERROR_WANT_ASYNC:
+ code = "SSL_ERROR_WANT_ASYNC";
+ break;
+#endif
+
+#ifdef SSL_ERROR_WANT_ASYNC_JOB
+ case SSL_ERROR_WANT_ASYNC_JOB:
+ code = "SSL_ERROR_WANT_ASYNC_JOB";
+ break;
+#endif
+
+#ifdef SSL_ERROR_WANT_CLIENT_HELLO_CB
+ case SSL_ERROR_WANT_CLIENT_HELLO_CB:
+ code = "SSL_ERROR_WANT_CLIENT_HELLO_CB";
+ break;
+#endif
+
+#ifdef SSL_ERROR_WANT_RETRY_VERIFY
+ case SSL_ERROR_WANT_RETRY_VERIFY:
+ code = "SSL_ERROR_WANT_RETRY_VERIFY";
+ break;
+#endif
+
+ default:
+ code = "SSL_ERROR_UNKNOWN";
+ break;
+ }
+
+ char str[1024 + 1];
+ ERR_error_string_n(err, str, 1024);
+ str[1024] = '\0';
+ SOCKET_PEERS peers = netdata_ssl_peers(ssl);
+ error_limit(&erl, "SSL: %s() on socket local [[%s]:%d] <-> remote [[%s]:%d], returned error %lu (%s): %s",
+ call, peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, err, code, str);
+ }
+}
+
+static inline bool is_handshake_complete(NETDATA_SSL *ssl, const char *op) {
+ error_limit_static_thread_var(erl, 1, 0);
+
+ if(unlikely(!ssl->conn)) {
+ internal_error(true, "SSL: trying to %s on a NULL connection", op);
+ return false;
+ }
+
+ switch(ssl->state) {
+ case NETDATA_SSL_STATE_NOT_SSL: {
+ SOCKET_PEERS peers = netdata_ssl_peers(ssl);
+ error_limit(&erl, "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on non-SSL connection",
+ peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
+ return false;
+ }
+
+ case NETDATA_SSL_STATE_INIT: {
+ SOCKET_PEERS peers = netdata_ssl_peers(ssl);
+ error_limit(&erl, "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on an incomplete connection",
+ peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
+ return false;
+ }
+
+ case NETDATA_SSL_STATE_FAILED: {
+ SOCKET_PEERS peers = netdata_ssl_peers(ssl);
+ error_limit(&erl, "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on a failed connection",
+ peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
+ return false;
+ }
+
+ case NETDATA_SSL_STATE_COMPLETE: {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/*
+ * netdata_ssl_read() should return the same as read():
+ *
+ * Positive value: The read() function succeeded and read some bytes. The exact number of bytes read is returned.
+ *
+ * Zero: For files and sockets, a return value of zero signifies end-of-file (EOF), meaning no more data is available
+ * for reading. For sockets, this usually means the other side has closed the connection.
+ *
+ * -1: An error occurred. The specific error can be found by examining the errno variable.
+ * EAGAIN or EWOULDBLOCK: The file descriptor is in non-blocking mode, and the read operation would block.
+ * (These are often the same value, but can be different on some systems.)
+ */
+
+ssize_t netdata_ssl_read(NETDATA_SSL *ssl, void *buf, size_t num) {
+ errno = 0;
+ ssl->ssl_errno = 0;
+
+ if(unlikely(!is_handshake_complete(ssl, "read")))
+ return -1;
+
+ int bytes = SSL_read(ssl->conn, buf, (int)num);
+
+ if(unlikely(bytes <= 0)) {
+ int err = SSL_get_error(ssl->conn, bytes);
+ netdata_ssl_log_error_queue("SSL_read", ssl);
+ if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) {
+ ssl->ssl_errno = err;
+ errno = EWOULDBLOCK;
+ }
+
+ bytes = -1; // according to read() or recv()
+ }
+
+ return bytes;
+}
+
+/*
+ * netdata_ssl_write() should return the same as write():
+ *
+ * Positive value: The write() function succeeded and wrote some bytes. The exact number of bytes written is returned.
+ *
+ * Zero: It's technically possible for write() to return zero, indicating that zero bytes were written. However, for a
+ * socket, this generally does not happen unless the size of the data to be written is zero.
+ *
+ * -1: An error occurred. The specific error can be found by examining the errno variable.
+ * EAGAIN or EWOULDBLOCK: The file descriptor is in non-blocking mode, and the write operation would block.
+ * (These are often the same value, but can be different on some systems.)
+ */
+
+ssize_t netdata_ssl_write(NETDATA_SSL *ssl, const void *buf, size_t num) {
+ errno = 0;
+ ssl->ssl_errno = 0;
+
+ if(unlikely(!is_handshake_complete(ssl, "write")))
+ return -1;
+
+ int bytes = SSL_write(ssl->conn, (uint8_t *)buf, (int)num);
+
+ if(unlikely(bytes <= 0)) {
+ int err = SSL_get_error(ssl->conn, bytes);
+ netdata_ssl_log_error_queue("SSL_write", ssl);
+ if (err == SSL_ERROR_WANT_READ || err == SSL_ERROR_WANT_WRITE) {
+ ssl->ssl_errno = err;
+ errno = EWOULDBLOCK;
+ }
+
+ bytes = -1; // according to write() or send()
+ }
+
+ return bytes;
+}
+
+static inline bool is_handshake_initialized(NETDATA_SSL *ssl, const char *op) {
+ error_limit_static_thread_var(erl, 1, 0);
+
+ if(unlikely(!ssl->conn)) {
+ internal_error(true, "SSL: trying to %s on a NULL connection", op);
+ return false;
+ }
+
+ switch(ssl->state) {
+ case NETDATA_SSL_STATE_NOT_SSL: {
+ SOCKET_PEERS peers = netdata_ssl_peers(ssl);
+ error_limit(&erl, "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on non-SSL connection",
+ peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
+ return false;
+ }
+
+ case NETDATA_SSL_STATE_INIT: {
+ return true;
+ }
+
+ case NETDATA_SSL_STATE_FAILED: {
+ SOCKET_PEERS peers = netdata_ssl_peers(ssl);
+ error_limit(&erl, "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on a failed connection",
+ peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
+ return false;
+ }
+
+ case NETDATA_SSL_STATE_COMPLETE: {
+ SOCKET_PEERS peers = netdata_ssl_peers(ssl);
+ error_limit(&erl, "SSL: on socket local [[%s]:%d] <-> remote [[%s]:%d], attempt to %s on an complete connection",
+ peers.local.ip, peers.local.port, peers.peer.ip, peers.peer.port, op);
+ return false;
+ }
+ }
+
+ return false;
+}
+
+#define WANT_READ_WRITE_TIMEOUT_MS 10
+
+static inline bool want_read_write_should_retry(NETDATA_SSL *ssl, int err) {
+ int ssl_errno = SSL_get_error(ssl->conn, err);
+ if(ssl_errno == SSL_ERROR_WANT_READ || ssl_errno == SSL_ERROR_WANT_WRITE) {
+ struct pollfd pfds[1] = { [0] = {
+ .fd = SSL_get_rfd(ssl->conn),
+ .events = (short)(((ssl_errno == SSL_ERROR_WANT_READ ) ? POLLIN : 0) |
+ ((ssl_errno == SSL_ERROR_WANT_WRITE) ? POLLOUT : 0)),
+ }};
+
+ if(poll(pfds, 1, WANT_READ_WRITE_TIMEOUT_MS) <= 0)
+ return false; // timeout (0) or error (<0)
+
+ return true; // we have activity, so we should retry
+ }
+
+ return false; // an unknown error
+}
+
+bool netdata_ssl_connect(NETDATA_SSL *ssl) {
+ errno = 0;
+ ssl->ssl_errno = 0;
+
+ if(unlikely(!is_handshake_initialized(ssl, "connect")))
+ return false;
+
+ SSL_set_connect_state(ssl->conn);
+
+ int err;
+ while ((err = SSL_connect(ssl->conn)) != 1) {
+ if(!want_read_write_should_retry(ssl, err))
+ break;
+ }
+
+ if (err != 1) {
+ netdata_ssl_log_error_queue("SSL_connect", ssl);
+ ssl->state = NETDATA_SSL_STATE_FAILED;
+ return false;
+ }
+
+ ssl->state = NETDATA_SSL_STATE_COMPLETE;
+ return true;
+}
+
+bool netdata_ssl_accept(NETDATA_SSL *ssl) {
+ errno = 0;
+ ssl->ssl_errno = 0;
+
+ if(unlikely(!is_handshake_initialized(ssl, "accept")))
+ return false;
+
+ SSL_set_accept_state(ssl->conn);
+
+ int err;
+ while ((err = SSL_accept(ssl->conn)) != 1) {
+ if(!want_read_write_should_retry(ssl, err))
+ break;
+ }
+
+ if (err != 1) {
+ netdata_ssl_log_error_queue("SSL_accept", ssl);
+ ssl->state = NETDATA_SSL_STATE_FAILED;
+ return false;
+ }
+
+ ssl->state = NETDATA_SSL_STATE_COMPLETE;
+ return true;
+}
/**
* Info Callback
@@ -20,7 +396,7 @@ int netdata_ssl_validate_server = NETDATA_SSL_VALID_CERTIFICATE;
* @param where the variable with the flags set.
* @param ret the return of the caller
*/
-static void security_info_callback(const SSL *ssl, int where, int ret __maybe_unused) {
+static void netdata_ssl_info_callback(const SSL *ssl, int where, int ret __maybe_unused) {
(void)ssl;
if (where & SSL_CB_ALERT) {
debug(D_WEB_CLIENT,"SSL INFO CALLBACK %s %s", SSL_alert_type_string(ret), SSL_alert_desc_string_long(ret));
@@ -32,8 +408,8 @@ static void security_info_callback(const SSL *ssl, int where, int ret __maybe_un
*
* Starts the openssl library for the Netdata.
*/
-void security_openssl_library()
-{
+void netdata_ssl_initialize_openssl() {
+
#if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110
# if (SSLEAY_VERSION_NUMBER >= OPENSSL_VERSION_097)
OPENSSL_config(NULL);
@@ -42,10 +418,13 @@ void security_openssl_library()
SSL_load_error_strings();
SSL_library_init();
+
#else
+
if (OPENSSL_init_ssl(OPENSSL_INIT_LOAD_CONFIG, NULL) != 1) {
error("SSL library cannot be initialized.");
}
+
#endif
}
@@ -59,7 +438,7 @@ void security_openssl_library()
*
* @return it returns the version number.
*/
-int tls_select_version(const char *lversion) {
+static int netdata_ssl_select_tls_version(const char *lversion) {
if (!strcmp(lversion, "1") || !strcmp(lversion, "1.0"))
return TLS1_VERSION;
else if (!strcmp(lversion, "1.1"))
@@ -80,43 +459,13 @@ int tls_select_version(const char *lversion) {
#endif
/**
- * OpenSSL common options
- *
- * Clients and SERVER have common options, this function is responsible to set them in the context.
- *
- * @param ctx the initialized SSL context.
- * @param side 0 means server, and 1 client.
- */
-void security_openssl_common_options(SSL_CTX *ctx, int side) {
-#if OPENSSL_VERSION_NUMBER >= OPENSSL_VERSION_110
- if (!side) {
- int version = tls_select_version(tls_version) ;
-#endif
-#if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110
- SSL_CTX_set_options (ctx,SSL_OP_NO_SSLv2|SSL_OP_NO_SSLv3|SSL_OP_NO_COMPRESSION);
-#else
- SSL_CTX_set_min_proto_version(ctx, TLS1_VERSION);
- SSL_CTX_set_max_proto_version(ctx, version);
-
- if(tls_ciphers && strcmp(tls_ciphers, "none") != 0) {
- if (!SSL_CTX_set_cipher_list(ctx, tls_ciphers)) {
- error("SSL error. cannot set the cipher list");
- }
- }
- }
-#endif
-
- SSL_CTX_set_mode(ctx, SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);
-}
-
-/**
* Initialize Openssl Client
*
* Starts the client context with TLS 1.2.
*
* @return It returns the context on success or NULL otherwise
*/
-SSL_CTX * security_initialize_openssl_client() {
+SSL_CTX * netdata_ssl_create_client_ctx(unsigned long mode) {
SSL_CTX *ctx;
#if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110
ctx = SSL_CTX_new(SSLv23_client_method());
@@ -138,6 +487,9 @@ SSL_CTX * security_initialize_openssl_client() {
#endif
}
+ if(mode)
+ SSL_CTX_set_mode(ctx, mode);
+
return ctx;
}
@@ -148,7 +500,7 @@ SSL_CTX * security_initialize_openssl_client() {
*
* @return It returns the context on success or NULL otherwise
*/
-static SSL_CTX * security_initialize_openssl_server() {
+static SSL_CTX * netdata_ssl_create_server_ctx(unsigned long mode) {
SSL_CTX *ctx;
char lerror[512];
static int netdata_id_context = 1;
@@ -171,7 +523,19 @@ static SSL_CTX * security_initialize_openssl_server() {
SSL_CTX_use_certificate_chain_file(ctx, netdata_ssl_security_cert);
#endif
- security_openssl_common_options(ctx, 0);
+
+#if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110
+ SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv2|SSL_OP_NO_SSLv3|SSL_OP_NO_COMPRESSION);
+#else
+ SSL_CTX_set_min_proto_version(ctx, TLS1_VERSION);
+ SSL_CTX_set_max_proto_version(ctx, netdata_ssl_select_tls_version(tls_version));
+
+ if(tls_ciphers && strcmp(tls_ciphers, "none") != 0) {
+ if (!SSL_CTX_set_cipher_list(ctx, tls_ciphers)) {
+ error("SSL error. cannot set the cipher list");
+ }
+ }
+#endif
SSL_CTX_use_PrivateKey_file(ctx, netdata_ssl_security_key,SSL_FILETYPE_PEM);
@@ -183,13 +547,15 @@ static SSL_CTX * security_initialize_openssl_server() {
}
SSL_CTX_set_session_id_context(ctx,(void*)&netdata_id_context,(unsigned int)sizeof(netdata_id_context));
- SSL_CTX_set_info_callback(ctx,security_info_callback);
+ SSL_CTX_set_info_callback(ctx, netdata_ssl_info_callback);
#if (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_095)
SSL_CTX_set_verify_depth(ctx,1);
#endif
debug(D_WEB_CLIENT,"SSL GLOBAL CONTEXT STARTED\n");
+ SSL_CTX_set_mode(ctx, mode);
+
return ctx;
}
@@ -203,39 +569,54 @@ static SSL_CTX * security_initialize_openssl_server() {
* NETDATA_SSL_CONTEXT_STREAMING - Starts the streaming context.
* NETDATA_SSL_CONTEXT_EXPORTING - Starts the OpenTSDB context
*/
-void security_start_ssl(int selector) {
+void netdata_ssl_initialize_ctx(int selector) {
static SPINLOCK sp = NETDATA_SPINLOCK_INITIALIZER;
netdata_spinlock_lock(&sp);
switch (selector) {
- case NETDATA_SSL_CONTEXT_SERVER: {
- if(!netdata_ssl_srv_ctx) {
+ case NETDATA_SSL_WEB_SERVER_CTX: {
+ if(!netdata_ssl_web_server_ctx) {
struct stat statbuf;
if (stat(netdata_ssl_security_key, &statbuf) || stat(netdata_ssl_security_cert, &statbuf))
info("To use encryption it is necessary to set \"ssl certificate\" and \"ssl key\" in [web] !\n");
else {
- netdata_ssl_srv_ctx = security_initialize_openssl_server();
- SSL_CTX_set_mode(netdata_ssl_srv_ctx, SSL_MODE_ENABLE_PARTIAL_WRITE);
+ netdata_ssl_web_server_ctx = netdata_ssl_create_server_ctx(
+ SSL_MODE_ENABLE_PARTIAL_WRITE |
+ SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER |
+ // SSL_MODE_AUTO_RETRY |
+ 0);
+
+ if(netdata_ssl_web_server_ctx && !netdata_ssl_validate_certificate)
+ SSL_CTX_set_verify(netdata_ssl_web_server_ctx, SSL_VERIFY_NONE, NULL);
}
}
break;
}
- case NETDATA_SSL_CONTEXT_STREAMING: {
- if(!netdata_ssl_client_ctx) {
- netdata_ssl_client_ctx = security_initialize_openssl_client();
+ case NETDATA_SSL_STREAMING_SENDER_CTX: {
+ if(!netdata_ssl_streaming_sender_ctx) {
//This is necessary for the stream, because it is working sometimes with nonblock socket.
//It returns the bitmask after to change, there is not any description of errors in the documentation
- SSL_CTX_set_mode(netdata_ssl_client_ctx,
- SSL_MODE_ENABLE_PARTIAL_WRITE | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER |
- SSL_MODE_AUTO_RETRY);
+ netdata_ssl_streaming_sender_ctx = netdata_ssl_create_client_ctx(
+ SSL_MODE_ENABLE_PARTIAL_WRITE |
+ SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER |
+ // SSL_MODE_AUTO_RETRY |
+ 0
+ );
+
+ if(netdata_ssl_streaming_sender_ctx && !netdata_ssl_validate_certificate_sender)
+ SSL_CTX_set_verify(netdata_ssl_streaming_sender_ctx, SSL_VERIFY_NONE, NULL);
}
break;
}
- case NETDATA_SSL_CONTEXT_EXPORTING: {
- if(!netdata_ssl_exporting_ctx)
- netdata_ssl_exporting_ctx = security_initialize_openssl_client();
+ case NETDATA_SSL_EXPORTING_CTX: {
+ if(!netdata_ssl_exporting_ctx) {
+ netdata_ssl_exporting_ctx = netdata_ssl_create_client_ctx(0);
+
+ if(netdata_ssl_exporting_ctx && !netdata_ssl_validate_certificate)
+ SSL_CTX_set_verify(netdata_ssl_exporting_ctx, SSL_VERIFY_NONE, NULL);
+ }
break;
}
}
@@ -248,18 +629,21 @@ void security_start_ssl(int selector) {
*
* Clean all the allocated contexts from netdata.
*/
-void security_clean_openssl()
+void netdata_ssl_cleanup()
{
- if (netdata_ssl_srv_ctx) {
- SSL_CTX_free(netdata_ssl_srv_ctx);
+ if (netdata_ssl_web_server_ctx) {
+ SSL_CTX_free(netdata_ssl_web_server_ctx);
+ netdata_ssl_web_server_ctx = NULL;
}
- if (netdata_ssl_client_ctx) {
- SSL_CTX_free(netdata_ssl_client_ctx);
+ if (netdata_ssl_streaming_sender_ctx) {
+ SSL_CTX_free(netdata_ssl_streaming_sender_ctx);
+ netdata_ssl_streaming_sender_ctx = NULL;
}
if (netdata_ssl_exporting_ctx) {
SSL_CTX_free(netdata_ssl_exporting_ctx);
+ netdata_ssl_exporting_ctx = NULL;
}
#if OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110
@@ -268,64 +652,6 @@ void security_clean_openssl()
}
/**
- * Process accept
- *
- * Process the SSL handshake with the client case it is necessary.
- *
- * @param ssl is a pointer for the SSL structure
- * @param msg is a copy of the first 8 bytes of the initial message received
- *
- * @return it returns 0 case it performs the handshake, 8 case it is clean connection
- * and another integer power of 2 otherwise.
- */
-int security_process_accept(SSL *ssl,int msg) {
- int sock = SSL_get_fd(ssl);
- int test;
- if (msg > 0x17)
- {
- return NETDATA_SSL_NO_HANDSHAKE;
- }
-
- ERR_clear_error();
- if ((test = SSL_accept(ssl)) <= 0) {
- int sslerrno = SSL_get_error(ssl, test);
- switch(sslerrno) {
- case SSL_ERROR_WANT_READ:
- {
- error("SSL handshake did not finish and it wanna read on socket %d!", sock);
- return NETDATA_SSL_WANT_READ;
- }
- case SSL_ERROR_WANT_WRITE:
- {
- error("SSL handshake did not finish and it wanna read on socket %d!", sock);
- return NETDATA_SSL_WANT_WRITE;
- }
- case SSL_ERROR_NONE:
- case SSL_ERROR_SSL:
- case SSL_ERROR_SYSCALL:
- default:
- {
- u_long err;
- char buf[256];
- int counter = 0;
- while ((err = ERR_get_error()) != 0) {
- ERR_error_string_n(err, buf, sizeof(buf));
- error("%d SSL Handshake error (%s) on socket %d", counter++, ERR_error_string((long)SSL_get_error(ssl, test), NULL), sock);
- }
- return NETDATA_SSL_NO_HANDSHAKE;
- }
- }
- }
-
- if (SSL_is_init_finished(ssl))
- {
- debug(D_WEB_CLIENT_ACCESS,"SSL Handshake finished %s errno %d on socket fd %d", ERR_error_string((long)SSL_get_error(ssl, test), NULL), errno, sock);
- }
-
- return NETDATA_SSL_HANDSHAKE_COMPLETE;
-}
-
-/**
* Test Certificate
*
* Check the certificate of Netdata parent
diff --git a/libnetdata/socket/security.h b/libnetdata/socket/security.h
index ae7c595e3..c83b60ad1 100644
--- a/libnetdata/socket/security.h
+++ b/libnetdata/socket/security.h
@@ -1,20 +1,16 @@
#ifndef NETDATA_SECURITY_H
# define NETDATA_SECURITY_H
-# define NETDATA_SSL_HANDSHAKE_COMPLETE 0 //All the steps were successful
-# define NETDATA_SSL_START 1 //Starting handshake, conn variable is NULL
-# define NETDATA_SSL_WANT_READ 2 //The connection wanna read from socket
-# define NETDATA_SSL_WANT_WRITE 4 //The connection wanna write on socket
-# define NETDATA_SSL_NO_HANDSHAKE 8 //Continue without encrypt connection.
-# define NETDATA_SSL_OPTIONAL 16 //Flag to define the HTTP request
-# define NETDATA_SSL_FORCE 32 //We only accepts HTTPS request
-# define NETDATA_SSL_INVALID_CERTIFICATE 64 //Accepts invalid certificate
-# define NETDATA_SSL_VALID_CERTIFICATE 128 //Accepts invalid certificate
-# define NETDATA_SSL_PROXY_HTTPS 256 //Proxy is using HTTPS
-
-#define NETDATA_SSL_CONTEXT_SERVER 0
-#define NETDATA_SSL_CONTEXT_STREAMING 1
-#define NETDATA_SSL_CONTEXT_EXPORTING 2
+typedef enum __attribute__((packed)) {
+ NETDATA_SSL_STATE_NOT_SSL = 1, // This connection is not SSL
+ NETDATA_SSL_STATE_INIT, // SSL handshake is initialized
+ NETDATA_SSL_STATE_FAILED, // SSL handshake failed
+ NETDATA_SSL_STATE_COMPLETE, // SSL handshake successful
+} NETDATA_SSL_STATE;
+
+#define NETDATA_SSL_WEB_SERVER_CTX 0
+#define NETDATA_SSL_STREAMING_SENDER_CTX 1
+#define NETDATA_SSL_EXPORTING_CTX 2
# ifdef ENABLE_HTTPS
@@ -37,27 +33,42 @@
#include <openssl/decoder.h>
#endif
-struct netdata_ssl {
- SSL *conn; //SSL connection
- uint32_t flags; //The flags for SSL connection
-};
+typedef struct netdata_ssl {
+ SSL *conn; // SSL connection
+ NETDATA_SSL_STATE state; // The state for SSL connection
+ unsigned long ssl_errno; // The SSL errno of the last SSL call
+} NETDATA_SSL;
+
+#define NETDATA_SSL_UNSET_CONNECTION (NETDATA_SSL){ .conn = NULL, .state = NETDATA_SSL_STATE_NOT_SSL }
+
+#define SSL_connection(ssl) ((ssl)->conn && (ssl)->state != NETDATA_SSL_STATE_NOT_SSL)
extern SSL_CTX *netdata_ssl_exporting_ctx;
-extern SSL_CTX *netdata_ssl_client_ctx;
-extern SSL_CTX *netdata_ssl_srv_ctx;
+extern SSL_CTX *netdata_ssl_streaming_sender_ctx;
+extern SSL_CTX *netdata_ssl_web_server_ctx;
extern const char *netdata_ssl_security_key;
extern const char *netdata_ssl_security_cert;
extern const char *tls_version;
extern const char *tls_ciphers;
-extern int netdata_ssl_validate_server;
+extern bool netdata_ssl_validate_certificate;
+extern bool netdata_ssl_validate_certificate_sender;
int ssl_security_location_for_context(SSL_CTX *ctx,char *file,char *path);
-void security_openssl_library();
-void security_clean_openssl();
-void security_start_ssl(int selector);
-int security_process_accept(SSL *ssl,int msg);
+void netdata_ssl_initialize_openssl();
+void netdata_ssl_cleanup();
+void netdata_ssl_initialize_ctx(int selector);
int security_test_certificate(SSL *ssl);
-SSL_CTX * security_initialize_openssl_client();
+SSL_CTX * netdata_ssl_create_client_ctx(unsigned long mode);
+
+bool netdata_ssl_connect(NETDATA_SSL *ssl);
+bool netdata_ssl_accept(NETDATA_SSL *ssl);
+
+bool netdata_ssl_open(NETDATA_SSL *ssl, SSL_CTX *ctx, int fd);
+void netdata_ssl_close(NETDATA_SSL *ssl);
+void netdata_ssl_log_error_queue(const char *call, NETDATA_SSL *ssl);
+
+ssize_t netdata_ssl_read(NETDATA_SSL *ssl, void *buf, size_t num);
+ssize_t netdata_ssl_write(NETDATA_SSL *ssl, const void *buf, size_t num);
# endif //ENABLE_HTTPS
#endif //NETDATA_SECURITY_H
diff --git a/libnetdata/socket/socket.c b/libnetdata/socket/socket.c
index 7eb212b33..220db7601 100644
--- a/libnetdata/socket/socket.c
+++ b/libnetdata/socket/socket.c
@@ -10,6 +10,63 @@
#include "../libnetdata.h"
+
+SOCKET_PEERS socket_peers(int sock_fd) {
+ SOCKET_PEERS peers;
+
+ if(sock_fd < 0) {
+ strncpyz(peers.peer.ip, "unknown", sizeof(peers.peer.ip) - 1);
+ peers.peer.port = 0;
+
+ strncpyz(peers.local.ip, "unknown", sizeof(peers.local.ip) - 1);
+ peers.local.port = 0;
+
+ return peers;
+ }
+
+ struct sockaddr_storage addr;
+ socklen_t addr_len = sizeof(addr);
+
+ // Get peer info
+ if (getpeername(sock_fd, (struct sockaddr *)&addr, &addr_len) == 0) {
+ if (addr.ss_family == AF_INET) { // IPv4
+ struct sockaddr_in *s = (struct sockaddr_in *)&addr;
+ inet_ntop(AF_INET, &s->sin_addr, peers.peer.ip, sizeof(peers.peer.ip));
+ peers.peer.port = ntohs(s->sin_port);
+ }
+ else { // IPv6
+ struct sockaddr_in6 *s = (struct sockaddr_in6 *)&addr;
+ inet_ntop(AF_INET6, &s->sin6_addr, peers.peer.ip, sizeof(peers.peer.ip));
+ peers.peer.port = ntohs(s->sin6_port);
+ }
+ }
+ else {
+ strncpyz(peers.peer.ip, "unknown", sizeof(peers.peer.ip) - 1);
+ peers.peer.port = 0;
+ }
+
+ // Get local info
+ addr_len = sizeof(addr);
+ if (getsockname(sock_fd, (struct sockaddr *)&addr, &addr_len) == 0) {
+ if (addr.ss_family == AF_INET) { // IPv4
+ struct sockaddr_in *s = (struct sockaddr_in *) &addr;
+ inet_ntop(AF_INET, &s->sin_addr, peers.local.ip, sizeof(peers.local.ip));
+ peers.local.port = ntohs(s->sin_port);
+ } else { // IPv6
+ struct sockaddr_in6 *s = (struct sockaddr_in6 *) &addr;
+ inet_ntop(AF_INET6, &s->sin6_addr, peers.local.ip, sizeof(peers.local.ip));
+ peers.local.port = ntohs(s->sin6_port);
+ }
+ }
+ else {
+ strncpyz(peers.local.ip, "unknown", sizeof(peers.local.ip) - 1);
+ peers.local.port = 0;
+ }
+
+ return peers;
+}
+
+
// --------------------------------------------------------------------------------------------------------------------
// various library calls
@@ -967,49 +1024,11 @@ int connect_to_one_of_urls(const char *destination, int default_port, struct tim
}
-#ifdef ENABLE_HTTPS
-ssize_t netdata_ssl_read(SSL *ssl, void *buf, size_t num) {
- error_limit_static_thread_var(erl, 1, 0);
-
- int bytes, err;
-
- bytes = SSL_read(ssl, buf, (int)num);
- err = SSL_get_error(ssl, bytes);
-
- if(unlikely(bytes <= 0)) {
- if (err == SSL_ERROR_WANT_WRITE || err == SSL_ERROR_WANT_READ) {
- bytes = 0;
- } else
- error_limit(&erl, "SSL_write() returned %d bytes, SSL error %d", bytes, err);
- }
-
- return bytes;
-}
-
-ssize_t netdata_ssl_write(SSL *ssl, const void *buf, size_t num) {
- error_limit_static_thread_var(erl, 1, 0);
-
- int bytes, err;
-
- bytes = SSL_write(ssl, (uint8_t *)buf, (int)num);
- err = SSL_get_error(ssl, bytes);
-
- if(unlikely(bytes <= 0)) {
- if (err == SSL_ERROR_WANT_WRITE || err == SSL_ERROR_WANT_READ) {
- bytes = 0;
- } else
- error_limit(&erl, "SSL_write() returned %d bytes, SSL error %d", bytes, err);
- }
-
- return bytes;
-}
-#endif
-
// --------------------------------------------------------------------------------------------------------------------
// helpers to send/receive data in one call, in blocking mode, with a timeout
#ifdef ENABLE_HTTPS
-ssize_t recv_timeout(struct netdata_ssl *ssl,int sockfd, void *buf, size_t len, int flags, int timeout) {
+ssize_t recv_timeout(NETDATA_SSL *ssl,int sockfd, void *buf, size_t len, int flags, int timeout) {
#else
ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout) {
#endif
@@ -1033,24 +1052,24 @@ ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout)
return -1;
}
- if(!retval) {
+ if(!retval)
// timeout
return 0;
- }
- if(fd.events & POLLIN) break;
+ if(fd.revents & POLLIN)
+ break;
}
#ifdef ENABLE_HTTPS
- if (ssl->conn && ssl->flags == NETDATA_SSL_HANDSHAKE_COMPLETE)
- return netdata_ssl_read(ssl->conn, buf, len);
+ if (SSL_connection(ssl))
+ return netdata_ssl_read(ssl, buf, len);
#endif
return recv(sockfd, buf, len, flags);
}
#ifdef ENABLE_HTTPS
-ssize_t send_timeout(struct netdata_ssl *ssl,int sockfd, void *buf, size_t len, int flags, int timeout) {
+ssize_t send_timeout(NETDATA_SSL *ssl,int sockfd, void *buf, size_t len, int flags, int timeout) {
#else
ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout) {
#endif
@@ -1079,13 +1098,13 @@ ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout)
return 0;
}
- if(fd.events & POLLOUT) break;
+ if(fd.revents & POLLOUT) break;
}
#ifdef ENABLE_HTTPS
if(ssl->conn) {
- if (ssl->flags == NETDATA_SSL_HANDSHAKE_COMPLETE) {
- return netdata_ssl_write(ssl->conn, buf, len);
+ if (SSL_connection(ssl)) {
+ return netdata_ssl_write(ssl, buf, len);
}
else {
error("cannot write to SSL connection - connection is not ready.");
diff --git a/libnetdata/socket/socket.h b/libnetdata/socket/socket.h
index 110063014..0e29711e0 100644
--- a/libnetdata/socket/socket.h
+++ b/libnetdata/socket/socket.h
@@ -68,10 +68,8 @@ int connect_to_one_of_urls(const char *destination, int default_port, struct tim
#ifdef ENABLE_HTTPS
-ssize_t recv_timeout(struct netdata_ssl *ssl,int sockfd, void *buf, size_t len, int flags, int timeout);
-ssize_t send_timeout(struct netdata_ssl *ssl,int sockfd, void *buf, size_t len, int flags, int timeout);
-ssize_t netdata_ssl_read(SSL *ssl, void *buf, size_t num);
-ssize_t netdata_ssl_write(SSL *ssl, const void *buf, size_t num);
+ssize_t recv_timeout(NETDATA_SSL *ssl,int sockfd, void *buf, size_t len, int flags, int timeout);
+ssize_t send_timeout(NETDATA_SSL *ssl,int sockfd, void *buf, size_t len, int flags, int timeout);
#else
ssize_t recv_timeout(int sockfd, void *buf, size_t len, int flags, int timeout);
ssize_t send_timeout(int sockfd, void *buf, size_t len, int flags, int timeout);
@@ -219,4 +217,22 @@ void poll_events(LISTEN_SOCKETS *sockets
, size_t max_tcp_sockets
);
+#ifndef INET6_ADDRSTRLEN
+#define INET6_ADDRSTRLEN 46
+#endif
+
+typedef struct socket_peers {
+ struct {
+ char ip[INET6_ADDRSTRLEN];
+ int port;
+ } local;
+
+ struct {
+ char ip[INET6_ADDRSTRLEN];
+ int port;
+ } peer;
+} SOCKET_PEERS;
+
+SOCKET_PEERS socket_peers(int sock_fd);
+
#endif //NETDATA_SOCKET_H
diff --git a/ml/Config.cc b/ml/Config.cc
index c5129c49d..c00d2e8ee 100644
--- a/ml/Config.cc
+++ b/ml/Config.cc
@@ -25,10 +25,10 @@ void ml_config_load(ml_config_t *cfg) {
* Read values
*/
- unsigned max_train_samples = config_get_number(config_section_ml, "maximum num samples to train", 4 * 3600);
+ unsigned max_train_samples = config_get_number(config_section_ml, "maximum num samples to train", 6 * 3600);
unsigned min_train_samples = config_get_number(config_section_ml, "minimum num samples to train", 1 * 900);
- unsigned train_every = config_get_number(config_section_ml, "train every", 1 * 3600);
- unsigned num_models_to_use = config_get_number(config_section_ml, "number of models per dimension", 2);
+ unsigned train_every = config_get_number(config_section_ml, "train every", 3 * 3600);
+ unsigned num_models_to_use = config_get_number(config_section_ml, "number of models per dimension", 9);
unsigned diff_n = config_get_number(config_section_ml, "num samples to diff", 1);
unsigned smooth_n = config_get_number(config_section_ml, "num samples to smooth", 3);
@@ -86,7 +86,7 @@ void ml_config_load(ml_config_t *cfg) {
error("invalid min/max train samples found (%u >= %u)", min_train_samples, max_train_samples);
min_train_samples = 1 * 3600;
- max_train_samples = 4 * 3600;
+ max_train_samples = 6 * 3600;
}
/*
diff --git a/ml/README.md b/ml/README.md
index 60f38f22e..06baf509b 100644
--- a/ml/README.md
+++ b/ml/README.md
@@ -127,10 +127,10 @@ Below is a list of all the available configuration params and their default valu
```
[ml]
# enabled = yes
- # maximum num samples to train = 14400
- # minimum num samples to train = 3600
- # train every = 3600
- # number of models per dimension = 2
+ # maximum num samples to train = 21600
+ # minimum num samples to train = 900
+ # train every = 10800
+ # number of models per dimension = 9
# dbengine anomaly rate every = 30
# num samples to diff = 1
# num samples to smooth = 3
@@ -186,10 +186,10 @@ This example assumes 3 child nodes [streaming](https://github.com/netdata/netdat
### Descriptions (min/max)
- `enabled`: `yes` to enable, `no` to disable.
-- `maximum num samples to train`: (`3600`/`86400`) This is the maximum amount of time you would like to train each model on. For example, the default of `14400` trains on the preceding 4 hours of data, assuming an `update every` of 1 second.
+- `maximum num samples to train`: (`3600`/`86400`) This is the maximum amount of time you would like to train each model on. For example, the default of `21600` trains on the preceding 6 hours of data, assuming an `update every` of 1 second.
- `minimum num samples to train`: (`900`/`21600`) This is the minimum amount of data required to be able to train a model. For example, the default of `900` implies that once at least 15 minutes of data is available for training, a model is trained, otherwise it is skipped and checked again at the next training run.
-- `train every`: (`1800`/`21600`) This is how often each model will be retrained. For example, the default of `3600` means that each model is retrained every hour. Note: The training of all models is spread out across the `train every` period for efficiency, so in reality, it means that each model will be trained in a staggered manner within each `train every` period.
-- `number of models per dimension`: (`1`/`168`) This is the number of trained models that will be used for scoring. For example the default `number of models per dimension = 2` means that the two most recently trained models (covering up to the most recent `maximum num samples to train` of training data) for the dimension will be used to determine the corresponding anomaly bit. Alternatively, if you have `train every = 3600` and `number of models per dimension = 24` this means that netdata will store and use the last 24 trained models for each dimension when determining the anomaly bit, this means that for the latest feature vector in this configuration to be considered anomalous it would need to look anomalous across _all_ the models trained for that dimension in the last 24 hours. As such, increasing `number of models per dimension` may reduce some false positives since it will result in more models (covering a wider time frame of training) being used during scoring.
+- `train every`: (`1800`/`21600`) This is how often each model will be retrained. For example, the default of `10800` means that each model is retrained every 3 hours. Note: The training of all models is spread out across the `train every` period for efficiency, so in reality, it means that each model will be trained in a staggered manner within each `train every` period.
+- `number of models per dimension`: (`1`/`168`) This is the number of trained models that will be used for scoring. For example the default `number of models per dimension = 9` means that just the most recently trained 9 models for the dimension will be used to determine the corresponding anomaly bit. This means that under default settings of `maximum num samples to train = 21600`, `train every = 10800` and `number of models per dimension = 9`, netdata will store and use the last 9 trained models for each dimension when determining the anomaly bit. This means that for the latest feature vector in this configuration to be considered anomalous it would need to look anomalous across _all_ the models trained for that dimension in the last 9*(10800/3600) ~= 27 hours. As such, increasing `number of models per dimension` may reduce some false positives since it will result in more models (covering a wider time frame of training) being used during scoring.
- `dbengine anomaly rate every`: (`30`/`900`) This is how often netdata will aggregate all the anomaly bits into a single chart (`anomaly_detection.anomaly_rates`). The aggregation into a single chart allows enabling anomaly rate ranking over _all_ metrics with one API call as opposed to a call per chart.
- `num samples to diff`: (`0`/`1`) This is a `0` or `1` to determine if you want the model to operate on differences of the raw data or just the raw data. For example, the default of `1` means that we take differences of the raw values. Using differences is more general and works on dimensions that might naturally tend to have some trends or cycles in them that is normal behavior to which we don't want to be too sensitive.
- `num samples to smooth`: (`0`/`5`) This is a small integer that controls the amount of smoothing applied as part of the feature processing used by the model. For example, the default of `3` means that the rolling average of the last 3 values is used. Smoothing like this helps the model be a little more robust to spiky types of dimensions that naturally "jump" up or down as part of their normal behavior.
diff --git a/ml/ml.cc b/ml/ml.cc
index 34f2b93bd..a5f0fa062 100644
--- a/ml/ml.cc
+++ b/ml/ml.cc
@@ -1565,7 +1565,7 @@ void ml_init()
for (size_t idx = 0; idx != Cfg.num_training_threads; idx++) {
ml_training_thread_t *training_thread = &Cfg.training_threads[idx];
- size_t max_elements_needed_for_training = Cfg.max_train_samples * (Cfg.lag_n + 1);
+ size_t max_elements_needed_for_training = (size_t) Cfg.max_train_samples * (size_t) (Cfg.lag_n + 1);
training_thread->training_cns = new calculated_number_t[max_elements_needed_for_training]();
training_thread->scratch_training_cns = new calculated_number_t[max_elements_needed_for_training]();
diff --git a/netdata-installer.sh b/netdata-installer.sh
index c145c2800..c6272647f 100755
--- a/netdata-installer.sh
+++ b/netdata-installer.sh
@@ -816,17 +816,6 @@ detect_libc() {
return 0
}
-rename_libbpf_packaging() {
- if [ "$(get_kernel_version)" -ge "004014000" ]; then
- cp packaging/current_libbpf.checksums packaging/libbpf.checksums
- cp packaging/current_libbpf.version packaging/libbpf.version
- else
- cp packaging/libbpf_0_0_9.checksums packaging/libbpf.checksums
- cp packaging/libbpf_0_0_9.version packaging/libbpf.version
- fi
-}
-
-
build_libbpf() {
cd "${1}/src" > /dev/null || return 1
mkdir root build
@@ -866,16 +855,20 @@ bundle_libbpf() {
[ -n "${GITHUB_ACTIONS}" ] && echo "::group::Bundling libbpf."
- rename_libbpf_packaging
-
progress "Prepare libbpf"
- LIBBPF_PACKAGE_VERSION="$(cat packaging/libbpf.version)"
+ if [ "$(get_kernel_version)" -ge "004014000" ]; then
+ LIBBPF_PACKAGE_VERSION="$(cat packaging/current_libbpf.version)"
+ LIBBPF_PACKAGE_COMPONENT="current_libbpf"
+ else
+ LIBBPF_PACKAGE_VERSION="$(cat packaging/libbpf_0_0_9.version)"
+ LIBBPF_PACKAGE_COMPONENT="libbpf_0_0_9"
+ fi
tmp="$(mktemp -d -t netdata-libbpf-XXXXXX)"
LIBBPF_PACKAGE_BASENAME="v${LIBBPF_PACKAGE_VERSION}.tar.gz"
- if fetch_and_verify "libbpf" \
+ if fetch_and_verify "${LIBBPF_PACKAGE_COMPONENT}" \
"https://github.com/netdata/libbpf/archive/${LIBBPF_PACKAGE_BASENAME}" \
"${LIBBPF_PACKAGE_BASENAME}" \
"${tmp}" \
@@ -1057,64 +1050,6 @@ fi
[ -n "${GITHUB_ACTIONS}" ] && echo "::group::Installing Netdata."
# -----------------------------------------------------------------------------
-
-# shellcheck disable=SC2230
-md5sum="$(command -v md5sum 2> /dev/null || command -v md5 2> /dev/null)"
-
-deleted_stock_configs=0
-if [ ! -f "${NETDATA_PREFIX}/etc/netdata/.installer-cleanup-of-stock-configs-done" ]; then
-
- progress "Backup existing netdata configuration before installing it"
-
- config_signature_matches() {
- md5="${1}"
- file="${2}"
-
- if [ -f "configs.signatures" ]; then
- grep "\['${md5}'\]='${file}'" "configs.signatures" > /dev/null
- return $?
- fi
-
- return 1
- }
-
- # clean up stock config files from the user configuration directory
- (find -L "${NETDATA_PREFIX}/etc/netdata" -type f -not -path '*/\.*' -not -path "${NETDATA_PREFIX}/etc/netdata/orig/*" \( -name '*.conf.old' -o -name '*.conf' -o -name '*.conf.orig' -o -name '*.conf.installer_backup.*' \)) | while IFS= read -r x; do
- if [ -f "${x}" ]; then
- # find it relative filename
- p="$(echo "${NETDATA_PREFIX}/etc/netdata" | sed -e 's/\//\\\//')"
- f="$(echo "${x}" | sed -e "s/${p}//")"
-
- # find the stock filename
- t="$(echo "${f}" | sed -e 's/\.conf\.installer_backup\..*/\.conf/')"
- t="$(echo "${t}" | sed -e 's/\.conf\.old/\.conf/')"
- t="$(echo "${t}" | sed -e 's/\.conf\.orig/\.conf/')"
- t="$(echo "${t}" | sed -e 's/orig//')"
-
- if [ -z "${md5sum}" ] || [ ! -x "${md5sum}" ]; then
- # we don't have md5sum - keep it
- echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RED}is not known to distribution${TPUT_RESET}. Keeping it."
- else
- # find its checksum
- md5="$(${md5sum} < "${x}" | cut -d ' ' -f 1)"
-
- if config_signature_matches "${md5}" "${t}"; then
- # it is a stock version - remove it
- echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' is stock version of '${t}'."
- run rm -f "${x}"
- # shellcheck disable=SC2030
- deleted_stock_configs=$((deleted_stock_configs + 1))
- else
- # edited by user - keep it
- echo >&2 "File '${TPUT_CYAN}${x}${TPUT_RESET}' ${TPUT_RED} does not match stock of${TPUT_RESET} ${TPUT_CYAN}'${t}'${TPUT_RESET}. Keeping it."
- fi
- fi
- fi
- done
-fi
-touch "${NETDATA_PREFIX}/etc/netdata/.installer-cleanup-of-stock-configs-done"
-
-# -----------------------------------------------------------------------------
progress "Install netdata"
if ! run $make install; then
@@ -1212,16 +1147,8 @@ fi
# --- stock conf dir ----
[ ! -d "${NETDATA_STOCK_CONFIG_DIR}" ] && mkdir -p "${NETDATA_STOCK_CONFIG_DIR}"
-
-helplink="000.-.USE.THE.orig.LINK.TO.COPY.AND.EDIT.STOCK.CONFIG.FILES"
-# shellcheck disable=SC2031
-[ ${deleted_stock_configs} -eq 0 ] && helplink=""
-for link in "orig" "${helplink}"; do
- if [ -n "${link}" ]; then
- [ -L "${NETDATA_USER_CONFIG_DIR}/${link}" ] && run rm -f "${NETDATA_USER_CONFIG_DIR}/${link}"
- run ln -s "${NETDATA_STOCK_CONFIG_DIR}" "${NETDATA_USER_CONFIG_DIR}/${link}"
- fi
-done
+[ -L "${NETDATA_USER_CONFIG_DIR}/orig" ] && run rm -f "${NETDATA_USER_CONFIG_DIR}/orig"
+run ln -s "${NETDATA_STOCK_CONFIG_DIR}" "${NETDATA_USER_CONFIG_DIR}/orig"
# --- web dir ----
@@ -1295,6 +1222,23 @@ if [ "$(id -u)" -eq 0 ]; then
fi
fi
+ if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/debugfs.plugin" ]; then
+ run chown "root:${NETDATA_GROUP}" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/debugfs.plugin"
+ capabilities=0
+ if ! iscontainer && command -v setcap 1> /dev/null 2>&1; then
+ run chmod 0750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/debugfs.plugin"
+ if run setcap cap_dac_read_search+ep "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/debugfs.plugin"; then
+ # if we managed to setcap, but we fail to execute debugfs.plugin setuid to root
+ "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/debugfs.plugin" -t > /dev/null 2>&1 && capabilities=1 || capabilities=0
+ fi
+ fi
+
+ if [ $capabilities -eq 0 ]; then
+ # fix debugfs.plugin to be setuid to root
+ run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/debugfs.plugin"
+ fi
+ fi
+
if [ -f "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/freeipmi.plugin" ]; then
run chown "root:${NETDATA_GROUP}" "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/freeipmi.plugin"
run chmod 4750 "${NETDATA_PREFIX}/usr/libexec/netdata/plugins.d/freeipmi.plugin"
diff --git a/netdata.spec.in b/netdata.spec.in
index c2fa7dcab..baf690660 100644
--- a/netdata.spec.in
+++ b/netdata.spec.in
@@ -11,7 +11,7 @@
AutoReqProv: yes
# This is temporary and should eventually be resolved. This bypasses
-# the default rhel __os_install_post which throws a python compile
+# the default rhel __os_install_p,ost which throws a python compile
# error.
%global __os_install_post %{nil}
@@ -27,20 +27,6 @@ AutoReqProv: yes
%global _have_ebpf 0
%endif
-# Disable FreeIPMI on Amazon Linux
-%if 0%{?amzn}
-%global _have_freeipmi 0
-%else
-%global _have_freeipmi 1
-%endif
-
-# Disable the NFACCT plugin on Amazon Linux
-%if 0%{?amzn}
-%global _have_nfacct 0
-%else
-%global _have_nfacct 1
-%endif
-
# Mitigate the cross-distro mayhem by strictly defining the libexec destination
%define _prefix /usr
%define _sysconfdir /etc
@@ -54,13 +40,18 @@ AutoReqProv: yes
# Redefine centos_ver to standardize on a single macro
%{?rhel:%global centos_ver %rhel}
-#
-# Conditional build:
-%bcond_without netns # build with netns support (cgroup-network)
+# Disable FreeIPMI on Amazon Linux 2023 and newer
+%if 0%{?amzn} >= 2023
+%global _have_freeipmi 0
+%else
+%global _have_freeipmi 1
+%endif
-%if 0%{?fedora} || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1140
+# Disable NFACCT for RHEL equivalents and Amazon Linux
+%if 0%{?centos_ver} || 0%{?amzn}
+%global _have_nfacct 0
%else
-%undefine with_netns
+%global _have_nfacct 1
%endif
Summary: Real-time performance monitoring, done right!
@@ -150,33 +141,50 @@ Requires(pre): /usr/sbin/groupadd
Requires(pre): /usr/sbin/useradd
# #####################################################################
-# Functionality-dependent package dependencies
+# External plugin package dependencies
# #####################################################################
-# Note: Some or all of the Packages may be found in the EPEL repo,
-# rather than the standard ones
-
-# epbf dependencies
+# CentOS prior to CentOS 8 does not have a new enough version of RPM
+# to support weak dependencies. Explicitly requiring our default plugins
+# makes it impossible to properly test the packages prior to upload,
+# so we just skip depending on them on CentOS 7.
%if 0%{?_have_ebpf}
-%if 0%{?suse_version}
-BuildRequires: libelf-devel
-%else
-BuildRequires: elfutils-libelf-devel
+Requires: netdata-plugin-ebpf
+%endif
+Requires: netdata-plugin-apps
+Requires: netdata-plugin-pythond
+Requires: netdata-plugin-go
+Requires: netdata-plugin-debugfs
+Requires: netdata-plugin-chartsd
+Requires: netdata-plugin-slabinfo
+Requires: netdata-plugin-perf
+%if 0%{?_have_nfacct}
+Requires: netdata-plugin-nfacct
+%endif
+%if 0%{?_have_freeipmi} && 0%{?centos_ver} != 6 && 0%{?centos_ver} != 7 && 0%{?amazon_linux} != 2
+Suggests: netdata-plugin-freeipmi
%endif
+%if 0%{?centos_ver} != 6 && 0%{?centos_ver} != 7 && 0%{?amazon_linux} != 2
+Suggests: netdata-plugin-cups
%endif
-# end - ebpf dependencies
+
+
+# #####################################################################
+# Functionality-dependent package dependencies
+# #####################################################################
+# Note: Some or all of the Packages may be found in the EPEL repo,
+# rather than the standard ones
# nfacct plugin dependencies
-%if %{_have_nfacct}
+
+%if 0%{?_have_nfacct}
BuildRequires: libmnl-devel
-%if 0%{?fedora} || 0%{?suse_version} >= 1140
BuildRequires: libnetfilter_acct-devel
%endif
-%endif
# end nfacct plugin dependencies
# freeipmi plugin dependencies
-%if %{_have_freeipmi}
+%if 0%{?_have_freeipmi}
BuildRequires: freeipmi-devel
%endif
# end - freeipmi plugin dependencies
@@ -234,6 +242,12 @@ autoreconf -ivf
%if 0%{!?_have_ebpf}
--disable-ebpf
%endif
+ %if 0%{!?_have_freeipmi}
+ --disable-plugin-freeipmi
+ %endif
+ %if 0%{!?_have_nfacct}
+ --disable-plugin-nfacct
+ %endif
%if 0%{?centos_ver:1}
%if %{centos_ver} < 8
--with-bundled-protobuf \
@@ -272,7 +286,7 @@ install -m 644 -p system/logrotate/netdata "${RPM_BUILD_ROOT}%{_sysconfdir}/logr
# ###########################################################
# Install freeipmi
-%if %{_have_freeipmi}
+%if 0%{?_have_freeipmi}
install -m 4750 -p freeipmi.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/freeipmi.plugin"
%endif
@@ -281,6 +295,10 @@ install -m 4750 -p freeipmi.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plug
install -m 4750 -p apps.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/apps.plugin"
# ###########################################################
+# Install debugfs.plugin
+install -m 0750 -p debugfs.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/debugfs.plugin"
+
+# ###########################################################
# Install perf.plugin
install -m 4750 -p perf.plugin "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/plugins.d/perf.plugin"
@@ -310,6 +328,11 @@ install -m 755 -d "${RPM_BUILD_ROOT}%{_localstatedir}/log/%{name}"
install -m 755 -d "${RPM_BUILD_ROOT}%{_localstatedir}/lib/%{name}/registry"
# ###########################################################
+# Install uninstaller script
+install -m 750 -p packaging/installer/netdata-uninstaller.sh \
+ "${RPM_BUILD_ROOT}%{_libexecdir}/%{name}/netdata-uninstaller.sh"
+
+# ###########################################################
# Install netdata service
install -m 755 -d "${RPM_BUILD_ROOT}%{_unitdir}"
install -m 644 -p system/systemd/netdata.service "${RPM_BUILD_ROOT}%{_unitdir}/netdata.service"
@@ -457,14 +480,13 @@ rm -rf "${RPM_BUILD_ROOT}"
%files
%doc README.md
-%{_sysconfdir}/%{name}
%config(noreplace) %{_sysconfdir}/%{name}/netdata.conf
+%attr(0755,root,netdata) %{_sysconfdir}/%{name}/edit-config
+%attr(0644,root,netdata) %{_sysconfdir}/%{name}/.install-type
+%dir %{_sysconfdir}/%{name}/health.d
+%dir %{_sysconfdir}/%{name}/statsd.d
%config(noreplace) %{_sysconfdir}/logrotate.d/%{name}
-%dir %{_libdir}/%{name}
-%dir %{_datadir}/%{name}
%{_libdir}/%{name}
-%{_libdir}/%{name}/conf.d/
-%{_libexecdir}/%{name}
%{_sbindir}/%{name}
%{_sbindir}/netdatacli
%{_sbindir}/netdata-claim.sh
@@ -472,40 +494,33 @@ rm -rf "${RPM_BUILD_ROOT}"
%{_unitdir}/netdata.service
%{_presetdir}/50-netdata.preset
-%defattr(0750,root,netdata,0750)
-
-%dir %{_libexecdir}/%{name}/python.d
-%dir %{_libexecdir}/%{name}/charts.d
+%dir %{_libexecdir}/%{name}
%dir %{_libexecdir}/%{name}/plugins.d
+%defattr(0750,root,netdata,0750)
+%{_libexecdir}/%{name}/install-service.sh
+%{_libexecdir}/%{name}/netdata-updater.sh
+%{_libexecdir}/%{name}/netdata-uninstaller.sh
+%{_libexecdir}/%{name}/plugins.d/acl.sh
+%{_libexecdir}/%{name}/plugins.d/alarm.sh
+%{_libexecdir}/%{name}/plugins.d/alarm-email.sh
+%{_libexecdir}/%{name}/plugins.d/alarm-notify.sh
+%{_libexecdir}/%{name}/plugins.d/alarm-test.sh
+%{_libexecdir}/%{name}/plugins.d/anonymous-statistics.sh
+%{_libexecdir}/%{name}/plugins.d/cgroup-name.sh
+%{_libexecdir}/%{name}/plugins.d/get-kubernetes-labels.sh
+%{_libexecdir}/%{name}/plugins.d/health-cmdapi-test.sh
+%{_libexecdir}/%{name}/plugins.d/ioping.plugin
+%{_libexecdir}/%{name}/plugins.d/loopsleepms.sh.inc
+%{_libexecdir}/%{name}/plugins.d/request.sh
+%{_libexecdir}/%{name}/plugins.d/system-info.sh
+%{_libexecdir}/%{name}/plugins.d/tc-qos-helper.sh
+%{_libexecdir}/%{name}/plugins.d/template_dim.sh
-%{_libexecdir}/%{name}/python.d
-%{_libexecdir}/%{name}/plugins.d
-
-%caps(cap_dac_read_search,cap_sys_ptrace=ep) %attr(0750,root,netdata) %{_libexecdir}/%{name}/plugins.d/apps.plugin
-
-%if %{with netns}
# cgroup-network detects the network interfaces of CGROUPs
# it must be able to use setns() and run cgroup-network-helper.sh as root
# the helper script reads /proc/PID/fdinfo/* files, runs virsh, etc.
%attr(4750,root,netdata) %{_libexecdir}/%{name}/plugins.d/cgroup-network
%attr(0750,root,netdata) %{_libexecdir}/%{name}/plugins.d/cgroup-network-helper.sh
-%endif
-
-# ebpf plugin
-%if 0%{?_have_ebpf}
-%attr(4750,root,netdata) %{_libexecdir}/%{name}/plugins.d/ebpf.plugin
-%endif
-
-# perf plugin
-# This should be CAP_PERFMON once RPM finally learns about it, but needs to be CAP_SYS_ADMIN for now.
-# %caps(cap_perfmon=ep) %attr(0750,root,netdata) %{_libexecdir}/%{name}/plugins.d/perf.plugin
-%caps(cap_sys_admin=ep) %attr(0750,root,netdata) %{_libexecdir}/%{name}/plugins.d/perf.plugin
-
-# perf plugin
-%caps(cap_dac_read_search=ep) %attr(0750,root,netdata) %{_libexecdir}/%{name}/plugins.d/slabinfo.plugin
-
-# go.d.plugin (the capability required for wireguard module)
-%caps(cap_net_admin,cap_net_raw=eip) %{_libexecdir}/%{name}/plugins.d/go.d.plugin
# Enforce 0644 for files and 0755 for directories
# for the netdata web directory
@@ -521,45 +536,355 @@ rm -rf "${RPM_BUILD_ROOT}"
%attr(0770,netdata,netdata) %dir %{_localstatedir}/lib/%{name}/registry
# Free IPMI belongs to a different sub-package
-%if %{_have_freeipmi}
+%if 0%{?_have_freeipmi}
%exclude %{_libexecdir}/%{name}/plugins.d/freeipmi.plugin
%endif
+# NFACCT belongs to a different sub-package
+%if 0%{?_have_nfacct}
+%exclude %{_libexecdir}/%{name}/plugins.d/nfacct.plugin
+%endif
+
+# Charts.d belongs to a different sub-package
+%exclude %{_libexecdir}/%{name}/plugins.d/charts.d.plugin
+%exclude %{_libexecdir}/%{name}/plugins.d/charts.d.dryrun-helper.sh
+%exclude %{_libexecdir}/%{name}/charts.d/
+%exclude %{_libdir}/%{name}/conf.d/charts.d.conf
+%exclude %{_libdir}/%{name}/conf.d/charts.d/
+
+# eBPF belongs to a different sub-package
+%if 0%{?_have_ebpf}
+%exclude %{_libexecdir}/%{name}/plugins.d/ebpf.plugin
+%exclude %{_libdir}/%{name}/conf.d/ebpf.d.conf
+%exclude %{_libdir}/%{name}/conf.d/ebpf.d
+%exclude %{_libexecdir}/%{name}/plugins.d/ebpf.d
+%endif
+
+# Python.d belongs to a different sub-package
+%exclude %{_libexecdir}/%{name}/plugins.d/python.d.plugin
+%exclude %{_libexecdir}/%{name}/python.d
+%exclude %{_libdir}/%{name}/conf.d/python.d.conf
+%exclude %{_libdir}/%{name}/conf.d/python.d
+
+# Go.d belongs to a different sub-package
+%exclude %{_libexecdir}/%{name}/plugins.d/go.d.plugin
+%exclude %{_libdir}/%{name}/conf.d/go.d.conf
+%exclude %{_libdir}/%{name}/conf.d/go.d
+
+# apps belongs to a different sub-package
+%exclude %{_libexecdir}/%{name}/plugins.d/apps.plugin
+%exclude %{_libdir}/%{name}/conf.d/apps_groups.conf
+
+# slabinfo belongs to a different sub-package
+%exclude %{_libexecdir}/%{name}/plugins.d/slabinfo.plugin
+
+# perf belongs to a different sub-package
+%exclude %{_libexecdir}/%{name}/plugins.d/perf.plugin
+
# CUPS belongs to a different sub package
%if 0%{?centos_ver} != 6 && 0%{?centos_ver} != 7
%exclude %{_libexecdir}/%{name}/plugins.d/cups.plugin
%package plugin-cups
-Summary: The Common Unix Printing System plugin for netdata
+Summary: The CUPS metrics collection plugin for the Netdata Agent
Group: Applications/System
Requires: cups >= 1.7
Requires: netdata = %{version}
%description plugin-cups
- This is the Common Unix Printing System plugin for the netdata daemon.
-Use this plugin to enable metrics collection from cupsd, the daemon running when CUPS is enabled on the system
+ This plugin allows the Netdata Agent to collect metrics from the Common UNIX Printing System.
+
+%pre plugin-cups
+
+if ! getent group netdata > /dev/null; then
+ groupadd --system netdata
+fi
%files plugin-cups
%attr(0750,root,netdata) %{_libexecdir}/%{name}/plugins.d/cups.plugin
%endif
-%if %{_have_freeipmi}
+%if 0%{?_have_freeipmi}
%package plugin-freeipmi
-Summary: FreeIPMI - The Intelligent Platform Management System
+Summary: The FreeIPMI metrics collection plugin for the Netdata Agent
Group: Applications/System
Requires: freeipmi
Requires: netdata = %{version}
%description plugin-freeipmi
- The IPMI specification defines a set of interfaces for platform management.
-It is implemented by a number vendors for system management. The features of IPMI that most users will be interested in
-are sensor monitoring, system event monitoring, power control, and serial-over-LAN (SOL).
+ This plugin allows the Netdata Agent to collect metrics from hardware using FreeIPMI.
+
+%pre plugin-freeipmi
+
+if ! getent group netdata > /dev/null; then
+ groupadd --system netdata
+fi
%files plugin-freeipmi
%attr(4750,root,netdata) %{_libexecdir}/%{name}/plugins.d/freeipmi.plugin
%endif
+%if 0%{?_have_nfacct}
+%package plugin-nfacct
+Summary: The NFACCT metrics collection plugin for the Netdata Agent
+Group: Applications/System
+Requires: netdata = %{version}
+Conflicts: netdata < %{version}
+
+%description plugin-nfacct
+ This plugin allows the Netdata Agent to collect metrics from the firewall using NFACCT objects.
+
+%pre plugin-nfacct
+
+if ! getent group netdata > /dev/null; then
+ groupadd --system netdata
+fi
+
+%files plugin-nfacct
+%attr(4750,root,netdata) %{_libexecdir}/%{name}/plugins.d/nfacct.plugin
+%endif
+
+%package plugin-chartsd
+Summary: The charts.d metrics collection plugin for the Netdata Agent
+Group: Applications/System
+Requires: bash
+Requires: netdata = %{version}
+Conflicts: netdata < %{version}
+%if 0%{?centos_ver} != 7
+Suggests: nut
+Suggests: apcupsd
+Suggests: iw
+Suggests: sudo
+%endif
+
+%description plugin-chartsd
+ This plugin adds a selection of additional collectors written in shell script to the Netdata Agent.
+It includes collectors for NUT, APCUPSD, LibreSWAN, OpenSIPS, and Wireless access point statistics.
+
+%pre plugin-chartsd
+
+if ! getent group netdata > /dev/null; then
+ groupadd --system netdata
+fi
+
+%files plugin-chartsd
+%defattr(0750,root,netdata,0750)
+%{_libexecdir}/%{name}/plugins.d/charts.d.plugin
+%{_libexecdir}/%{name}/plugins.d/charts.d.dryrun-helper.sh
+%{_libexecdir}/%{name}/charts.d/
+%defattr(0644,root,netdata,0644)
+%{_libdir}/%{name}/conf.d/charts.d.conf
+%{_libdir}/%{name}/conf.d/charts.d/
+
+%if 0%{?_have_ebpf}
+%package plugin-ebpf
+Summary: The eBPF metrics collection plugin for the Netdata Agent
+Group: Applications/System
+Requires: netdata = %{version}
+Conflicts: netdata < %{version}
+%if 0%{?centos_ver} != 7
+Recommends: netdata-plugin-apps = %{version}
+Recommends: netdata-ebpf-legacy-code >= %{version}
+%else
+Requires: netdata-plugin-apps = %{version}
+Requires: netdata-ebpf-legacy-code >= %{version}
+%endif
+
+%description plugin-ebpf
+ This plugin allows the Netdata Agent to use eBPF code to collect more detailed kernel-level metrics for the system.
+
+%pre plugin-ebpf
+
+if ! getent group netdata > /dev/null; then
+ groupadd --system netdata
+fi
+
+%files plugin-ebpf
+%defattr(4750,root,netdata,4750)
+%{_libexecdir}/%{name}/plugins.d/ebpf.plugin
+%defattr(0644,root,netdata,0644)
+%{_libdir}/%{name}/conf.d/ebpf.d.conf
+%{_libdir}/%{name}/conf.d/ebpf.d
+
+%package ebpf-legacy-code
+Summary: Compiled eBPF legacy code for the Netdata eBPF plugin
+Group: Applications/System
+Requires: netdata-plugin-ebpf = %{version}
+Conflicts: netdata < %{version}
+
+%description ebpf-legacy-code
+ This package provides the pre-compiled eBPF legacy code for use by the Netdata eBPF plugin.
+ This code is only needed when using the eBPF plugin with kernel versions before 5.10.
+
+%pre ebpf-legacy-code
+
+if ! getent group netdata > /dev/null; then
+ groupadd --system netdata
+fi
+
+%files ebpf-legacy-code
+%defattr(0640,root,netdata,0640)
+%{_libexecdir}/%{name}/plugins.d/ebpf.d/*.o
+
+%endif
+
+%package plugin-pythond
+Summary: The python.d metrics collection plugin for the Netdata Agent
+Group: Applications/System
+Requires: netdata = %{version}
+Conflicts: netdata < %{version}
+%if 0%{?centos_ver} == 7 || 0%{?centos_ver} == 6
+Requires: python
+%else
+%if 0%{?centos_ver} == 8
+Requires: python38
+%else
+Requires: python3
+%endif
+%endif
+%if 0%{?centos_ver} != 7
+Suggests: sudo
+%endif
+
+%description plugin-pythond
+ This plugin adds a selection of additional collectors written in Python to the Netdata Agent.
+Many of the collectors provided by this package are also available in netdata-plugin-go. In msot cases, you probably
+want to use those versions instead of the Python versions.
+
+%pre plugin-pythond
+
+if ! getent group netdata > /dev/null; then
+ groupadd --system netdata
+fi
+
+%files plugin-pythond
+%defattr(0750,root,netdata,0750)
+%{_libexecdir}/%{name}/plugins.d/python.d.plugin
+%{_libexecdir}/%{name}/python.d
+%defattr(0640,root,netdata,0640)
+%{_libdir}/%{name}/conf.d/python.d.conf
+%{_libdir}/%{name}/conf.d/python.d
+
+%package plugin-go
+Summary: The go.d metrics collection plugin for the Netdata Agent
+Group: Applications/System
+Requires: netdata = %{version}
+Conflicts: netdata < %{version}
+%if 0%{?centos_ver} != 7
+Suggests: nvme-cli
+Suggests: sudo
+%endif
+
+%description plugin-go
+ This plugin adds a selection of additional collectors written in Go to the Netdata Agent
+A significant percentage of the application specific collectors provided by Netdata are part of this plugin,
+so most users will want it installed.
+
+%pre plugin-go
+
+if ! getent group netdata > /dev/null; then
+ groupadd --system netdata
+fi
+
+%files plugin-go
+%defattr(0750,root,netdata,0750)
+# CAP_NET_ADMIN needed for WireGuard collector
+# CAP_NET_RAW needed for ping collector
+%caps(cap_net_admin,cap_net_raw=eip) %{_libexecdir}/%{name}/plugins.d/go.d.plugin
+%defattr(0644,root,netdata,0644)
+%{_libdir}/%{name}/conf.d/go.d.conf
+%{_libdir}/%{name}/conf.d/go.d
+
+%package plugin-apps
+Summary: The per-application metrics collection plugin for the Netdata Agent
+Group: Applications/System
+Requires: netdata = %{version}
+Conflicts: netdata < %{version}
+
+%description plugin-apps
+ This plugin allows the Netdata Agent to collect per-application and per-user metrics without using cgroups.
+
+%pre plugin-apps
+
+if ! getent group netdata > /dev/null; then
+ groupadd --system netdata
+fi
+
+%files plugin-apps
+%defattr(0750,root,netdata,0750)
+# CAP_DAC_READ_SEARCH and CAP_SYS_PTRACE needed for data collection by the plugin.
+%caps(cap_dac_read_search,cap_sys_ptrace=ep) %{_libexecdir}/%{name}/plugins.d/apps.plugin
+%defattr(0644,root,netdata,0644)
+%{_libdir}/%{name}/conf.d/apps_groups.conf
+
+%package plugin-slabinfo
+Summary: The slabinfo metrics collector for the Netdata Agent
+Group: Applications/System
+Requires: netdata = %{version}
+Conflicts: netdata < %{version}
+
+%description plugin-slabinfo
+ This plugin allows the Netdata Agent to collect perfromance and utilization metrics for the Linux kernel’s SLAB allocator.
+
+%pre plugin-slabinfo
+
+if ! getent group netdata > /dev/null; then
+ groupadd --system netdata
+fi
+
+%files plugin-slabinfo
+%defattr(0750,root,netdata,0750)
+# CAP_DAC_READ_SEARCH needed to access the files the plugin reads to collect data.
+%caps(cap_dac_read_search=ep) %{_libexecdir}/%{name}/plugins.d/slabinfo.plugin
+
+%package plugin-perf
+Summary: The perf metrics collector for the Netdata Agent
+Group: Applications/System
+Requires: netdata = %{version}
+Conflicts: netdata < %{version}
+
+%description plugin-perf
+ This plugin allows the Netdata to collect metrics from the Linux perf subsystem.
+
+%pre plugin-perf
+
+if ! getent group netdata > /dev/null; then
+ groupadd --system netdata
+fi
+
+%files plugin-perf
+%defattr(0750,root,netdata,0750)
+# Either CAP_SYS_ADMIN or CAP_PERFMON needed for data collection
+# PERFMON is newer, so only try to use it on platforms which support it.
+%if 0%{?centos_ver} >= 9 || 0%{?fedora} >= 36
+%caps(cap_perfmon=ep) %{_libexecdir}/%{name}/plugins.d/perf.plugin
+%else
+%caps(cap_sys_admin=ep) %{_libexecdir}/%{name}/plugins.d/perf.plugin
+%endif
+
+%package plugin-debugfs
+Summary: The debugfs metrics collector for the Netdata Agent
+Group: Applications/System
+Requires: netdata = %{version}
+Conflicts: netdata < %{version}
+
+%description plugin-debugfs
+ This plugin allows the Netdata Agent to collect Linux kernel metrics exposed through debugfs.
+
+%pre plugin-debugfs
+
+if ! getent group netdata > /dev/null; then
+ groupadd --system netdata
+fi
+
+%files plugin-debugfs
+%defattr(0750,root,netdata,0750)
+# CAP_DAC_READ_SEARCH required for data collection.
+%caps(cap_dac_read_search=ep) %attr(0750,root,netdata) %{_libexecdir}/%{name}/plugins.d/debugfs.plugin
+
%changelog
+* Fri Apr 07 2023 Austin Hemmelgarn <austin@netdata.cloud> 0.0.0-19
+- Split additional plugins out in their own packages.
* Tue Mar 21 2023 Austin Hemmelgarn <austin@netdata.cloud> 0.0.0-18
- Fix systemd handling to follow BCP.
- Drop pre-systemd init support.
@@ -602,8 +927,9 @@ First draft refactor on package dependencies section
* Wed Jan 02 2019 Pawel Krupa <pkrupa@redhat.com> - 0.0.0-3
- Temporary set version statically
- Fix changelog ordering
-- Comment-out node.d configuration directory
+- Comment-out node.d configuration directory
* Wed Jan 02 2019 Pawel Krupa <pkrupa@redhat.com> - 0.0.0-2
- Fix permissions for log files
* Sun Nov 15 2015 Alon Bar-Lev <alonbl@redhat.com> - 0.0.0-1
- Initial add.
+
diff --git a/packaging/PLATFORM_SUPPORT.md b/packaging/PLATFORM_SUPPORT.md
index 0de0c3b16..2236ae846 100644
--- a/packaging/PLATFORM_SUPPORT.md
+++ b/packaging/PLATFORM_SUPPORT.md
@@ -68,8 +68,8 @@ to work on these platforms with minimal user effort.
| Debian | 10.x | x86\_64, i386, ARMv7, AArch64 | |
| Fedora | 38 | x86\_64, AArch64 | |
| Fedora | 37 | x86\_64, AArch64 | |
-| Fedora | 36 | x86\_64, AArch64 | |
| openSUSE | Leap 15.4 | x86\_64, AArch64 | |
+| openSUSE | Leap 15.5 | x86\_64, AArch64 | |
| Oracle Linux | 9.x | x86\_64, AArch64 | |
| Oracle Linux | 8.x | x86\_64, AArch64 | |
| Red Hat Enterprise Linux | 9.x | x86\_64, AArch64 | |
@@ -158,13 +158,11 @@ This is a list of platforms that we have supported in the recent past but no lon
|--------------|-----------|----------------------|
| Alpine Linux | 3.14 | EOL as of 2023-05-01 |
| Alpine Linux | 3.13 | EOL as of 2022-11-01 |
-| Alpine Linux | 3.12 | EOL as of 2022-05-01 |
| Debian | 9.x | EOL as of 2022-06-30 |
+| Fedora | 36 | EOL as of 2023-05-18 |
| Fedora | 35 | EOL as of 2022-12-13 |
-| Fedora | 34 | EOL as of 2022-06-07 |
| openSUSE | Leap 15.3 | EOL as of 2022-12-01 |
| Ubuntu | 21.10 | EOL as of 2022-07-31 |
-| Ubuntu | 21.04 | EOL as of 2022-01-01 |
| Ubuntu | 18.04 | EOL as of 2023-04-02 |
## Static builds
diff --git a/packaging/bundle-ebpf.sh b/packaging/bundle-ebpf.sh
index 3204345b0..0103fc4ed 100755
--- a/packaging/bundle-ebpf.sh
+++ b/packaging/bundle-ebpf.sh
@@ -2,18 +2,19 @@
SRCDIR="${1}"
PLUGINDIR="${2}"
+FORCE="${3}"
EBPF_VERSION="$(cat "${SRCDIR}/packaging/ebpf.version")"
EBPF_TARBALL="netdata-kernel-collector-glibc-${EBPF_VERSION}.tar.xz"
-if [ -x "${PLUGINDIR}/ebpf.plugin" ] ; then
+if [ -x "${PLUGINDIR}/ebpf.plugin" ] || [ "${FORCE}" = "force" ]; then
mkdir -p "${SRCDIR}/tmp/ebpf"
curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/kernel-collector/releases/download/${EBPF_VERSION}/${EBPF_TARBALL}" > "${EBPF_TARBALL}" || exit 1
grep "${EBPF_TARBALL}" "${SRCDIR}/packaging/ebpf.checksums" | sha256sum -c - || exit 1
- tar -xaf "${EBPF_TARBALL}" -C "${SRCDIR}/tmp/ebpf" || exit 1
+ tar -xvaf "${EBPF_TARBALL}" -C "${SRCDIR}/tmp/ebpf" || exit 1
if [ ! -d "${PLUGINDIR}/ebpf.d" ];then
mkdir "${PLUGINDIR}/ebpf.d"
fi
# shellcheck disable=SC2046
- cp -a $(find "${SRCDIR}/tmp/ebpf" -mindepth 1 -maxdepth 1) "${PLUGINDIR}/ebpf.d"
+ cp -r $(find "${SRCDIR}/tmp/ebpf" -mindepth 1 -maxdepth 1) "${PLUGINDIR}/ebpf.d"
fi
diff --git a/packaging/bundle-libbpf.sh b/packaging/bundle-libbpf.sh
index 7e6e22a9e..1c5542765 100755
--- a/packaging/bundle-libbpf.sh
+++ b/packaging/bundle-libbpf.sh
@@ -22,6 +22,6 @@ curl -sSL --connect-timeout 10 --retry 3 "https://github.com/netdata/libbpf/arch
sha256sum -c "${1}/packaging/libbpf.checksums" || exit 1
tar -xzf "${LIBBPF_TARBALL}" -C "${1}/externaldeps/libbpf" || exit 1
make -C "${LIBBPF_BUILD_PATH}/src" BUILD_STATIC_ONLY=1 OBJDIR=build/ DESTDIR=../ install || exit 1
-cp -a "${LIBBPF_BUILD_PATH}/usr/${lib_subdir}/libbpf.a" "${1}/externaldeps/libbpf" || exit 1
-cp -a "${LIBBPF_BUILD_PATH}/usr/include" "${1}/externaldeps/libbpf" || exit 1
-cp -a "${LIBBPF_BUILD_PATH}/include/uapi" "${1}/externaldeps/libbpf/include" || exit 1
+cp -r "${LIBBPF_BUILD_PATH}/usr/${lib_subdir}/libbpf.a" "${1}/externaldeps/libbpf" || exit 1
+cp -r "${LIBBPF_BUILD_PATH}/usr/include" "${1}/externaldeps/libbpf" || exit 1
+cp -r "${LIBBPF_BUILD_PATH}/include/uapi" "${1}/externaldeps/libbpf/include" || exit 1
diff --git a/packaging/current_libbpf.checksums b/packaging/current_libbpf.checksums
index e0b91c0c6..2f0d8a9b8 100644
--- a/packaging/current_libbpf.checksums
+++ b/packaging/current_libbpf.checksums
@@ -1 +1 @@
-f2a8214c967153fcbb7a8f2af59c23a38f6e175384878dd37648649c5d8182c4 v1.1_netdata.tar.gz
+97d0b6d5b86ae473883aadcba4fcecf47f608f5d0eb3dbb75eb2dbde271f0046 v1.2_netdata.tar.gz
diff --git a/packaging/current_libbpf.version b/packaging/current_libbpf.version
index b0797d5a8..eff71eefb 100644
--- a/packaging/current_libbpf.version
+++ b/packaging/current_libbpf.version
@@ -1 +1 @@
-1.1_netdata
+1.2_netdata
diff --git a/packaging/docker/Dockerfile b/packaging/docker/Dockerfile
index fcd9432b5..3a4b9025a 100644
--- a/packaging/docker/Dockerfile
+++ b/packaging/docker/Dockerfile
@@ -104,12 +104,13 @@ RUN chown -R root:root \
chmod 0755 /usr/libexec/netdata/plugins.d/*.plugin && \
chmod 4755 \
/usr/libexec/netdata/plugins.d/cgroup-network \
- /usr/libexec/netdata/plugins.d/apps.plugin && \
+ /usr/libexec/netdata/plugins.d/apps.plugin \
+ /usr/libexec/netdata/plugins.d/debugfs.plugin && \
if [ -f /usr/libexec/netdata/plugins.d/freeipmi.plugin ]; then \
chmod 4755 /usr/libexec/netdata/plugins.d/freeipmi.plugin; \
fi && \
- if [ -f /usr/libexec/netdata/plugins.d/go.d.plugin ] && command -v setcap 1>/dev/null 2>&1; then \
- setcap "cap_net_raw=eip" /usr/libexec/netdata/plugins.d/go.d.plugin 2>/dev/null; \
+ if [ -f /usr/libexec/netdata/plugins.d/go.d.plugin ]; then \
+ chmod 4755 /usr/libexec/netdata/plugins.d/go.d.plugin; \
fi && \
# Group write permissions due to: https://github.com/netdata/netdata/pull/6543
find /var/lib/netdata /var/cache/netdata -type d -exec chmod 0770 {} \; && \
diff --git a/packaging/ebpf-co-re.checksums b/packaging/ebpf-co-re.checksums
index 813e421cc..4d4f585d7 100644
--- a/packaging/ebpf-co-re.checksums
+++ b/packaging/ebpf-co-re.checksums
@@ -1 +1 @@
-a50e649635cc2fe86c21a08334ee73451f08591ebbda8b5d0012c3b8fad2cc1e netdata-ebpf-co-re-glibc-v1.1.2.tar.xz
+2bcbe98689efe6ee364cb3e9161ef549198d7f181845add923c6561bc8fb74d1 netdata-ebpf-co-re-glibc-v1.2.0.tar.xz
diff --git a/packaging/ebpf-co-re.version b/packaging/ebpf-co-re.version
index 0f1acbd56..79127d85a 100644
--- a/packaging/ebpf-co-re.version
+++ b/packaging/ebpf-co-re.version
@@ -1 +1 @@
-v1.1.2
+v1.2.0
diff --git a/packaging/ebpf.checksums b/packaging/ebpf.checksums
index 0d0be4eab..739cc2f3f 100644
--- a/packaging/ebpf.checksums
+++ b/packaging/ebpf.checksums
@@ -1,3 +1,3 @@
-597a20895bbedcf87528b08fa9057426bd3c7638aa1ffac94f8987a90634513d ./netdata-kernel-collector-glibc-v1.1.2.tar.xz
-25db2232b75bdb7fc6e10db870c3a3290f52ecfcdcf546d0e51947f2a4c17ccf ./netdata-kernel-collector-musl-v1.1.2.tar.xz
-1d60425f5e8c6e30b3be86028dfc62c16022d8fe561e4c21c84cf6e8b998cd7d ./netdata-kernel-collector-static-v1.1.2.tar.xz
+a7386ffca8cbe9aa24c01b0b97b2e3553c11d696752037551277f9b1f5feb100 ./netdata-kernel-collector-glibc-v1.2.0.tar.xz
+2b37ce6129dc61fd79e5519c150196099d363b4e57dafc55b210f64f9b40a3ec ./netdata-kernel-collector-musl-v1.2.0.tar.xz
+ad22f11cb545557c09955f3728ba76d9734345c0ab84927086bb0e99a9f88f80 ./netdata-kernel-collector-static-v1.2.0.tar.xz
diff --git a/packaging/ebpf.version b/packaging/ebpf.version
index 0f1acbd56..79127d85a 100644
--- a/packaging/ebpf.version
+++ b/packaging/ebpf.version
@@ -1 +1 @@
-v1.1.2
+v1.2.0
diff --git a/packaging/go.d.checksums b/packaging/go.d.checksums
index 6f338464c..09b86f661 100644
--- a/packaging/go.d.checksums
+++ b/packaging/go.d.checksums
@@ -1,17 +1,17 @@
-dc6cf312bf8211236c141a67aa8571ac58e98f9705dfcb5dc1a3103732a053a0 *config.tar.gz
-2c6d0cee9207d00fe3f7e0845193cd511d40239ce94edcbaeb7319674ec86245 *go.d.plugin-v0.52.2.darwin-amd64.tar.gz
-75bf5ac062bec27856890b12e6d5e5be0ecb931d25e2d0cba8f0e3c72f1255fd *go.d.plugin-v0.52.2.darwin-arm64.tar.gz
-4a8a55c2bed0674019acd280aebc9f02ba958fa4e6f78ac3e88ffddd68254a36 *go.d.plugin-v0.52.2.freebsd-386.tar.gz
-117316e1f9d945cfb6c9a8b7ee4576cf5dd27b9237fd21bae9fbbddc80aa0dc5 *go.d.plugin-v0.52.2.freebsd-amd64.tar.gz
-ecb4ee060f153fb711a112e61eb126f893adb64badbb2dbc8e19c72230fa24b4 *go.d.plugin-v0.52.2.freebsd-arm.tar.gz
-ae3057d396ab133ff19880644897ff9e4a1b34b85262422df3e9e079b72507f8 *go.d.plugin-v0.52.2.freebsd-arm64.tar.gz
-446bbb62858db60b15e50710091186ba00de728b3e349d9f7db77f1475a8891f *go.d.plugin-v0.52.2.linux-386.tar.gz
-fff928e244f87dd0b07734aaad87240957f5ee571e4f4196f4d50300d67ff8ec *go.d.plugin-v0.52.2.linux-amd64.tar.gz
-2beb004ecc2820c76d8eb82a5da5251e21cc93675b0dd6575e393f4762f60d28 *go.d.plugin-v0.52.2.linux-arm.tar.gz
-234cc81cbb7e104a8882aeff03ecd56214cb0aeb923db60c42aa0b6131f34bab *go.d.plugin-v0.52.2.linux-arm64.tar.gz
-1a4c1106c82439a3e488d7a3b42432cefa27577e2425daf73871af7431d14ae8 *go.d.plugin-v0.52.2.linux-mips.tar.gz
-29ebc77c995d4428018cf6f014023639e123f88dcebd026a3e476413261a4981 *go.d.plugin-v0.52.2.linux-mips64.tar.gz
-9e6262de77b2e5f0ba2d0882097eaf68f727a0af0fd9d31bd547a8fa55bdeb04 *go.d.plugin-v0.52.2.linux-mips64le.tar.gz
-05b7bbad67a36aa42f7a2933f0f429689ae7f5d23c9ce54bc26b9b386ddfca4b *go.d.plugin-v0.52.2.linux-mipsle.tar.gz
-9b18de7731d02fd2fc48fe250ea071f4d797b9af26b51d562a4bc39cf5d7f34c *go.d.plugin-v0.52.2.linux-ppc64.tar.gz
-357350165a42aa2c7fc03694a9176608943f6c3e4ce0e40ebad5bd5304b024e6 *go.d.plugin-v0.52.2.linux-ppc64le.tar.gz
+5f35071de109b4f78fe0fa7b8e2e08a0107055ef4a98944d238a91bb79e6d685 *config.tar.gz
+888819289f9342b19b33fcff5360b2624964d4e0659f7ffde22638e7c6bc291d *go.d.plugin-v0.53.2.darwin-amd64.tar.gz
+1a8cb431cbd22264b573d6025e7907eb9189f353c242477fd61cff823653bf54 *go.d.plugin-v0.53.2.darwin-arm64.tar.gz
+d0b7aaad5d914fa60488ad7226ba324bfdcd160577cb6df0cb383eb64fb63913 *go.d.plugin-v0.53.2.freebsd-386.tar.gz
+b3da2b601fead7851db2416c06713a447b1c463f9ed918dfd7174ecc76de9dbb *go.d.plugin-v0.53.2.freebsd-amd64.tar.gz
+bab327edc8d732594b04eb626adfad43e7580c1ad694ca3eba821daefe1bfde4 *go.d.plugin-v0.53.2.freebsd-arm.tar.gz
+47b0df08fb91b321b0c982e2e4006adc2d8c084e1b2e2dd1742c0eb118cc913d *go.d.plugin-v0.53.2.freebsd-arm64.tar.gz
+aa75b321de766046ec2ef95a21b67208a50c182077ba2f1a384a575b4080d540 *go.d.plugin-v0.53.2.linux-386.tar.gz
+c3ad5df378d561c82766ff1ae95fe721e0be68621aa1ecc14140678c9558c0f3 *go.d.plugin-v0.53.2.linux-amd64.tar.gz
+c7b2b45f9554bf2e2581b6eb1671b9206ae1123339feca42b442bdb7c9969b0f *go.d.plugin-v0.53.2.linux-arm.tar.gz
+4dda349bb07b32509a080c9eab747161ef4490bd60c190cfae40872274c41e4c *go.d.plugin-v0.53.2.linux-arm64.tar.gz
+7b8c7c61ccecf514aefd1424be650ad4f1b1026176b8ebfe3e876e60b362ccb7 *go.d.plugin-v0.53.2.linux-mips.tar.gz
+325e65bebb944906debd73be7a3410dad6f4014ab66314f83462cd3c0dcf0987 *go.d.plugin-v0.53.2.linux-mips64.tar.gz
+3c4e3cf67751bd10779fb42ebb34bc9f1649c5ea84a250735f90b1676c228789 *go.d.plugin-v0.53.2.linux-mips64le.tar.gz
+210e8271da577189df98c0e397d3f02a305c2fce10f08c336291f5aa96bffba9 *go.d.plugin-v0.53.2.linux-mipsle.tar.gz
+f6842408660c46cd8a65b1765ef3e3f13ff43a58a0f01765fffaf34b6ee5852c *go.d.plugin-v0.53.2.linux-ppc64.tar.gz
+219e3d2406e9284a28f3a0ae4e5ea48b9f1cff5e389a562ca1a2c3842a44908a *go.d.plugin-v0.53.2.linux-ppc64le.tar.gz
diff --git a/packaging/go.d.version b/packaging/go.d.version
index e831eb735..57bc653ec 100644
--- a/packaging/go.d.version
+++ b/packaging/go.d.version
@@ -1 +1 @@
-v0.52.2
+v0.53.2
diff --git a/packaging/installer/install-required-packages.sh b/packaging/installer/install-required-packages.sh
index 9b1f6518a..ce5ab4044 100755
--- a/packaging/installer/install-required-packages.sh
+++ b/packaging/installer/install-required-packages.sh
@@ -1723,6 +1723,7 @@ install_zypper() {
fi
local opts="--ignore-unknown"
+ local install_opts="--allow-downgrade"
if [ "${NON_INTERACTIVE}" -eq 1 ]; then
echo >&2 "Running in non-interactive mode"
# http://unix.stackexchange.com/questions/82016/how-to-use-zypper-in-bash-scripts-for-someone-coming-from-apt-get
@@ -1730,9 +1731,8 @@ install_zypper() {
fi
read -r -a zypper_opts <<< "$opts"
-
# install the required packages
- run ${sudo} zypper "${zypper_opts[@]}" install "${@}"
+ run ${sudo} zypper "${zypper_opts[@]}" install "${install_opts}" "${@}"
}
# -----------------------------------------------------------------------------
diff --git a/packaging/installer/kickstart.sh b/packaging/installer/kickstart.sh
index 284911194..5c6e39cf7 100755
--- a/packaging/installer/kickstart.sh
+++ b/packaging/installer/kickstart.sh
@@ -26,6 +26,7 @@ KICKSTART_SOURCE="$(
echo "$(pwd -P)/${self##*/}"
)"
PACKAGES_SCRIPT="https://raw.githubusercontent.com/netdata/netdata/master/packaging/installer/install-required-packages.sh"
+DEFAULT_PLUGIN_PACKAGES=""
PATH="${PATH}:/usr/local/bin:/usr/local/sbin"
PUBLIC_CLOUD_URL="https://app.netdata.cloud"
REPOCONFIG_DEB_URL_PREFIX="https://repo.netdata.cloud/repos/repoconfig"
@@ -34,7 +35,7 @@ REPOCONFIG_RPM_URL_PREFIX="https://repo.netdata.cloud/repos/repoconfig"
REPOCONFIG_RPM_VERSION="2-1"
START_TIME="$(date +%s)"
STATIC_INSTALL_ARCHES="x86_64 armv7l aarch64 ppc64le"
-TELEMETRY_URL="https://app.posthog.com/capture/"
+TELEMETRY_URL="https://us-east1-netdata-analytics-bi.cloudfunctions.net/ingest_agent_events"
# ======================================================================
# Defaults for environment variables
@@ -63,7 +64,6 @@ else
fi
NETDATA_TARBALL_BASEURL="${NETDATA_TARBALL_BASEURL:-https://github.com/netdata/netdata-nightlies/releases}"
-TELEMETRY_API_KEY="${NETDATA_POSTHOG_API_KEY:-mqkwGT0JNFqO-zX2t0mW6Tec9yooaVu7xCBlXtHnt5Y}"
if echo "${0}" | grep -q 'kickstart-static64'; then
NETDATA_FORCE_METHOD='static'
@@ -267,7 +267,6 @@ telemetry_event() {
REQ_BODY="$(cat << EOF
{
- "api_key": "${TELEMETRY_API_KEY}",
"event": "${1}",
"properties": {
"distinct_id": "${DISTINCT_ID}",
@@ -720,7 +719,7 @@ confirm_root_support() {
fi
if [ -z "${ROOTCMD}" ]; then
- fatal "We need root privileges to continue, but cannot find a way to gain them (we support sudo, doas, and pkexec). Either re-run this script as root, or set \$ROOTCMD to a command that can be used to gain root privileges." F0201
+ fatal "This script needs root privileges to install Netdata, but cannot find a way to gain them (we support sudo, doas, and pkexec). Either re-run this script as root, or set \$ROOTCMD to a command that can be used to gain root privileges." F0201
fi
fi
}
@@ -746,7 +745,7 @@ confirm() {
update() {
updater="${ndprefix}/usr/libexec/netdata/netdata-updater.sh"
- if [ -x "${updater}" ]; then
+ if run_as_root test -x "${updater}"; then
if [ "${DRY_RUN}" -eq 1 ]; then
progress "Would attempt to update existing installation by running the updater script located at: ${updater}"
return 0
@@ -972,7 +971,7 @@ handle_existing_install() {
claim
ret=$?
elif [ "${ACTION}" = "claim" ]; then
- fatal "User asked to claim, but did not proide a claiming token." F0202
+ fatal "User asked to claim, but did not provide a claiming token." F0202
else
progress "Not attempting to claim existing install at ${ndprefix} (no claiming token provided)."
fi
@@ -1010,7 +1009,7 @@ handle_existing_install() {
trap - EXIT
exit $ret
elif [ "${ACTION}" = "claim" ]; then
- fatal "User asked to claim, but did not proide a claiming token." F0202
+ fatal "User asked to claim, but did not provide a claiming token." F0202
else
fatal "Found an existing netdata install at ${ndprefix}, but the install type is '${INSTALL_TYPE}', which is not supported by this script, refusing to proceed." F0103
fi
@@ -1120,7 +1119,6 @@ claim() {
progress "Attempting to claim agent to ${NETDATA_CLAIM_URL}"
fi
- progress "Attempting to claim agent to ${NETDATA_CLAIM_URL}"
if command -v netdata-claim.sh > /dev/null 2>&1; then
NETDATA_CLAIM_PATH="$(command -v netdata-claim.sh)"
elif [ -z "${INSTALL_PREFIX}" ] || [ "${INSTALL_PREFIX}" = "/" ]; then
@@ -1137,14 +1135,29 @@ claim() {
NETDATA_CLAIM_PATH="${INSTALL_PREFIX}/netdata/usr/sbin/netdata-claim.sh"
fi
+ err_msg=
+ err_code=
if [ -z "${NETDATA_CLAIM_PATH}" ]; then
- fatal "Unable to find usable claiming script. Reinstalling Netdata may resolve this." F050B
+ err_msg="Unable to claim node: could not find usable claiming script. Reinstalling Netdata may resolve this."
+ err_code=F050B
elif [ ! -e "${NETDATA_CLAIM_PATH}" ]; then
- fatal "${NETDATA_CLAIM_PATH} does not exist." F0512
+ err_msg="Unable to claim node: ${NETDATA_CLAIM_PATH} does not exist."
+ err_code=F0512
elif [ ! -f "${NETDATA_CLAIM_PATH}" ]; then
- fatal "${NETDATA_CLAIM_PATH} is not a file." F0513
+ err_msg="Unable to claim node: ${NETDATA_CLAIM_PATH} is not a file."
+ err_code=F0513
elif [ ! -x "${NETDATA_CLAIM_PATH}" ]; then
- fatal "Claiming script at ${NETDATA_CLAIM_PATH} is not executable. Reinstalling Netdata may resolve this." F0514
+ err_msg="Unable to claim node: claiming script at ${NETDATA_CLAIM_PATH} is not executable. Reinstalling Netdata may resolve this."
+ err_code=F0514
+ fi
+
+ if [ -n "$err_msg" ]; then
+ if [ "${ACTION}" = "claim" ]; then
+ fatal "$err_msg" "$err_code"
+ else
+ warning "$err_msg"
+ return 1
+ fi
fi
if ! is_netdata_running; then
@@ -1178,7 +1191,7 @@ claim() {
*) warning "Failed to claim node for an unknown reason. This usually means either networking problems or a bug. Please retry claiming later, and if you still see this message file a bug report at ${AGENT_BUG_REPORT_URL}" ;;
esac
- if [ -z "${NETDATA_NEW_INSTALL}" ]; then
+ if [ "${ACTION}" = "claim" ]; then
deferred_warnings
printf >&2 "%s\n" "For community support, you can connect with us on:"
support_list
@@ -1191,9 +1204,9 @@ claim() {
# ======================================================================
# Auto-update handling code.
set_auto_updates() {
- if [ -x "${INSTALL_PREFIX}/usr/libexec/netdata/netdata-updater.sh" ]; then
+ if run_as_root test -x "${INSTALL_PREFIX}/usr/libexec/netdata/netdata-updater.sh"; then
updater="${INSTALL_PREFIX}/usr/libexec/netdata/netdata-updater.sh"
- elif [ -x "${INSTALL_PREFIX}/netdata/usr/libexec/netdata/netdata-updater.sh" ]; then
+ elif run_as_root test -x "${INSTALL_PREFIX}/netdata/usr/libexec/netdata/netdata-updater.sh"; then
updater="${INSTALL_PREFIX}/netdata/usr/libexec/netdata/netdata-updater.sh"
else
warning "Could not find netdata-updater.sh. This means that auto-updates cannot (currently) be enabled on this system. See https://learn.netdata.cloud/docs/agent/packaging/installer/update for more information about updating Netdata."
@@ -1204,7 +1217,7 @@ set_auto_updates() {
if [ "${DRY_RUN}" -eq 1 ]; then
progress "Would have attempted to enable automatic updates."
# This first case is for catching using a new kickstart script with an old build. It can be safely removed after v1.34.0 is released.
- elif ! grep -q '\-\-enable-auto-updates' "${updater}"; then
+ elif ! run_as_root grep -q '\-\-enable-auto-updates' "${updater}"; then
echo
elif ! run_as_root "${updater}" --enable-auto-updates "${NETDATA_AUTO_UPDATE_TYPE}"; then
warning "Failed to enable auto updates. Netdata will still work, but you will need to update manually."
@@ -1331,7 +1344,7 @@ common_dnf_opts() {
}
try_package_install() {
- failed_refresh_msg="Failed to refresh repository metadata. ${BADNET_MSG} or by misconfiguration of one or more rpackage repositories in the system package manager configuration."
+ failed_refresh_msg="Failed to refresh repository metadata. ${BADNET_MSG} or incompatibilities with one or more third-party package repositories in the system package manager configuration."
if [ -z "${DISTRO_COMPAT_NAME}" ] || [ "${DISTRO_COMPAT_NAME}" = "unknown" ]; then
warning "Unable to determine Linux distribution for native packages."
@@ -1386,6 +1399,9 @@ try_package_install() {
common_rpm_opts
common_dnf_opts
repo_prefix="el/${SYSVERSION}"
+ # if [ "${SYSVERSION}" -lt 8 ]; then
+ # explicitly_install_native_plugins=1
+ # fi
;;
fedora|ol)
common_rpm_opts
@@ -1471,7 +1487,7 @@ try_package_install() {
if [ -n "${repo_subcmd}" ]; then
# shellcheck disable=SC2086
if ! run_as_root env ${env} ${pm_cmd} ${repo_subcmd} ${repo_update_opts}; then
- fatal "${failed_refresh_msg}" F0205
+ fatal "${failed_refresh_msg} In most cases, disabling any third-party repositories on the system and re-running the installer with the same options should work. If that does not work, consider using a static build with the --static-only option instead of native packages." F0205
fi
fi
else
@@ -1521,6 +1537,14 @@ try_package_install() {
fi
return 2
fi
+
+ if [ -n "${explicitly_install_native_plugins}" ]; then
+ progress "Installing external plugins."
+ # shellcheck disable=SC2086
+ if ! run_as_root env ${env} ${pm_cmd} install ${DEFAULT_PLUGIN_PACKAGES}; then
+ warning "Failed to install external plugin packages. Some collectors may not be available."
+ fi
+ fi
}
# ======================================================================
diff --git a/packaging/libbpf.checksums b/packaging/libbpf.checksums
deleted file mode 100644
index e0b91c0c6..000000000
--- a/packaging/libbpf.checksums
+++ /dev/null
@@ -1 +0,0 @@
-f2a8214c967153fcbb7a8f2af59c23a38f6e175384878dd37648649c5d8182c4 v1.1_netdata.tar.gz
diff --git a/packaging/libbpf.version b/packaging/libbpf.version
deleted file mode 100644
index b0797d5a8..000000000
--- a/packaging/libbpf.version
+++ /dev/null
@@ -1 +0,0 @@
-1.1_netdata
diff --git a/packaging/makeself/install-or-update.sh b/packaging/makeself/install-or-update.sh
index 1eabde83c..636fb6110 100755
--- a/packaging/makeself/install-or-update.sh
+++ b/packaging/makeself/install-or-update.sh
@@ -62,45 +62,6 @@ if [ ! "${DISABLE_TELEMETRY:-0}" -eq 0 ] ||
REINSTALL_OPTIONS="${REINSTALL_OPTIONS} --disable-telemetry"
fi
-deleted_stock_configs=0
-if [ ! -f "etc/netdata/.installer-cleanup-of-stock-configs-done" ]; then
-
- # -----------------------------------------------------------------------------
- progress "Deleting stock configuration files from user configuration directory"
-
- declare -A configs_signatures=()
- source "system/configs.signatures"
-
- if [ ! -d etc/netdata ]; then
- run mkdir -p etc/netdata
- fi
-
- md5sum="$(command -v md5sum 2> /dev/null || command -v md5 2> /dev/null)"
- while IFS= read -r -d '' x; do
- # find it relative filename
- f="${x/etc\/netdata\//}"
-
- # find the stock filename
- t="${f/.conf.old/.conf}"
- t="${t/.conf.orig/.conf}"
-
- if [ -n "${md5sum}" ]; then
- # find the checksum of the existing file
- md5="$(${md5sum} < "${x}" | cut -d ' ' -f 1)"
- #echo >&2 "md5: ${md5}"
-
- # check if it matches
- if [ "${configs_signatures[${md5}]}" = "${t}" ]; then
- # it matches the default
- run rm -f "${x}"
- deleted_stock_configs=$((deleted_stock_configs + 1))
- fi
- fi
- done < <(find etc -type f)
-
- touch "etc/netdata/.installer-cleanup-of-stock-configs-done"
-fi
-
# -----------------------------------------------------------------------------
progress "Attempt to create user/group netdata/netadata"
@@ -196,10 +157,6 @@ dir_should_be_link . var/log/netdata netdata-logs
dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d orig
-if [ ${deleted_stock_configs} -gt 0 ]; then
- dir_should_be_link etc/netdata ../../usr/lib/netdata/conf.d "000.-.USE.THE.orig.LINK.TO.COPY.AND.EDIT.STOCK.CONFIG.FILES"
-fi
-
# -----------------------------------------------------------------------------
progress "fix permissions"
@@ -214,6 +171,7 @@ progress "changing plugins ownership and permissions"
if command -v setcap >/dev/null 2>&1; then
run setcap "cap_dac_read_search,cap_sys_ptrace=ep" "usr/libexec/netdata/plugins.d/apps.plugin"
run setcap "cap_dac_read_search=ep" "usr/libexec/netdata/plugins.d/slabinfo.plugin"
+ run setcap "cap_dac_read_search=ep" "usr/libexec/netdata/plugins.d/debugfs.plugin"
if command -v capsh >/dev/null 2>&1 && capsh --supports=cap_perfmon 2>/dev/null ; then
run setcap "cap_perfmon=ep" "usr/libexec/netdata/plugins.d/perf.plugin"
@@ -223,7 +181,7 @@ if command -v setcap >/dev/null 2>&1; then
run setcap "cap_net_admin,cap_net_raw=eip" "usr/libexec/netdata/plugins.d/go.d.plugin"
else
- for x in apps.plugin perf.plugin slabinfo.plugin; do
+ for x in apps.plugin perf.plugin slabinfo.plugin debugfs.plugin; do
f="usr/libexec/netdata/plugins.d/${x}"
run chown root:${NETDATA_GROUP} "${f}"
run chmod 4750 "${f}"
diff --git a/packaging/makeself/jobs/99-makeself.install.sh b/packaging/makeself/jobs/99-makeself.install.sh
index 12bd59b66..aa1acd100 100755
--- a/packaging/makeself/jobs/99-makeself.install.sh
+++ b/packaging/makeself/jobs/99-makeself.install.sh
@@ -28,7 +28,6 @@ run cp \
packaging/makeself/post-installer.sh \
packaging/makeself/install-or-update.sh \
packaging/installer/functions.sh \
- configs.signatures \
"${NETDATA_INSTALL_PATH}/system/"
# -----------------------------------------------------------------------------
diff --git a/packaging/version b/packaging/version
index f90a7b3c1..d71bf1753 100644
--- a/packaging/version
+++ b/packaging/version
@@ -1 +1 @@
-v1.39.1
+v1.40.0
diff --git a/streaming/receiver.c b/streaming/receiver.c
index ff7a95629..709f15bd5 100644
--- a/streaming/receiver.c
+++ b/streaming/receiver.c
@@ -31,10 +31,14 @@ void receiver_state_free(struct receiver_state *rpt) {
freez(rpt->program_version);
#ifdef ENABLE_HTTPS
- if(rpt->ssl.conn)
- SSL_free(rpt->ssl.conn);
+ netdata_ssl_close(&rpt->ssl);
#endif
+ if(rpt->fd != -1) {
+ internal_error(true, "closing socket...");
+ close(rpt->fd);
+ }
+
#ifdef ENABLE_COMPRESSION
if (rpt->decompressor)
rpt->decompressor->destroy(&rpt->decompressor);
@@ -100,13 +104,18 @@ static int read_stream(struct receiver_state *r, char* buffer, size_t size) {
return 0;
}
+ ssize_t bytes_read;
+
#ifdef ENABLE_HTTPS
- if (r->ssl.conn && r->ssl.flags == NETDATA_SSL_HANDSHAKE_COMPLETE)
- return (int)netdata_ssl_read(r->ssl.conn, buffer, size);
+ if (SSL_connection(&r->ssl))
+ bytes_read = netdata_ssl_read(&r->ssl, buffer, size);
+ else
+ bytes_read = read(r->fd, buffer, size);
+#else
+ bytes_read = read(r->fd, buffer, size);
#endif
- ssize_t bytes_read = read(r->fd, buffer, size);
- if(bytes_read == 0 && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINPROGRESS)) {
+ if((bytes_read == 0 || bytes_read == -1) && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINPROGRESS)) {
error("STREAM: %s(): timeout while waiting for data on socket!", __FUNCTION__);
bytes_read = -3;
}
@@ -119,23 +128,6 @@ static int read_stream(struct receiver_state *r, char* buffer, size_t size) {
bytes_read = -2;
}
-// do {
-// bytes_read = (int) fread(buffer, 1, size, fp);
-// if (unlikely(bytes_read <= 0)) {
-// if(feof(fp)) {
-// internal_error(true, "%s(): fread() failed with EOF", __FUNCTION__);
-// bytes_read = -2;
-// }
-// else if(ferror(fp)) {
-// internal_error(true, "%s(): fread() failed with ERROR", __FUNCTION__);
-// bytes_read = -3;
-// }
-// else bytes_read = 0;
-// }
-// else
-// worker_set_metric(WORKER_RECEIVER_JOB_BYTES_READ, bytes_read);
-// } while(bytes_read == 0);
-
return (int)bytes_read;
}
@@ -323,12 +315,6 @@ static char *receiver_next_line(struct receiver_state *r, char *buffer, size_t b
return NULL;
}
-static void streaming_parser_thread_cleanup(void *ptr) {
- PARSER *parser = (PARSER *)ptr;
- rrd_collector_finished();
- parser_destroy(parser);
-}
-
bool plugin_is_enabled(struct plugind *cd);
static size_t streaming_parser(struct receiver_state *rpt, struct plugind *cd, int fd, void *ssl) {
@@ -352,7 +338,7 @@ static size_t streaming_parser(struct receiver_state *rpt, struct plugind *cd, i
// this keeps the parser with its current value
// so, parser needs to be allocated before pushing it
- netdata_thread_cleanup_push(streaming_parser_thread_cleanup, parser);
+ netdata_thread_cleanup_push(pluginsd_process_thread_cleanup, parser);
parser_add_keyword(parser, "CLAIMED_ID", streaming_claimed_id);
@@ -437,6 +423,58 @@ static void rrdpush_receiver_replication_reset(RRDHOST *host) {
rrdhost_receiver_replicating_charts_zero(host);
}
+void rrdhost_receiver_to_json(BUFFER *wb, RRDHOST *host, const char *key, time_t now __maybe_unused) {
+ size_t receiver_hops = host->system_info ? host->system_info->hops : (host == localhost) ? 0 : 1;
+
+ netdata_mutex_lock(&host->receiver_lock);
+
+ buffer_json_member_add_object(wb, key);
+ buffer_json_member_add_uint64(wb, "hops", receiver_hops);
+
+ bool online = host == localhost || !rrdhost_flag_check(host, RRDHOST_FLAG_ORPHAN | RRDHOST_FLAG_RRDPUSH_RECEIVER_DISCONNECTED);
+ buffer_json_member_add_boolean(wb, "online", online);
+
+ if(host->child_connect_time || host->child_disconnected_time) {
+ time_t since = MAX(host->child_connect_time, host->child_disconnected_time);
+ buffer_json_member_add_time_t(wb, "since", since);
+ buffer_json_member_add_time_t(wb, "age", now - since);
+ }
+
+ if(!online && host->rrdpush_last_receiver_exit_reason)
+ buffer_json_member_add_string(wb, "reason", host->rrdpush_last_receiver_exit_reason);
+
+ if(host != localhost && host->receiver) {
+ buffer_json_member_add_object(wb, "replication");
+ {
+ size_t instances = rrdhost_receiver_replicating_charts(host);
+ buffer_json_member_add_boolean(wb, "in_progress", instances);
+ buffer_json_member_add_double(wb, "completion", host->rrdpush_receiver_replication_percent);
+ buffer_json_member_add_uint64(wb, "instances", instances);
+ }
+ buffer_json_object_close(wb); // replication
+
+ buffer_json_member_add_object(wb, "source");
+ {
+
+ char buf[1024 + 1];
+ SOCKET_PEERS peers = socket_peers(host->receiver->fd);
+ bool ssl = SSL_connection(&host->receiver->ssl);
+
+ snprintfz(buf, 1024, "[%s]:%d%s", peers.local.ip, peers.local.port, ssl ? ":SSL" : "");
+ buffer_json_member_add_string(wb, "local", buf);
+
+ snprintfz(buf, 1024, "[%s]:%d%s", peers.peer.ip, peers.peer.port, ssl ? ":SSL" : "");
+ buffer_json_member_add_string(wb, "remote", buf);
+
+ stream_capabilities_to_json_array(wb, host->receiver->capabilities, "capabilities");
+ }
+ buffer_json_object_close(wb); // source
+ }
+ buffer_json_object_close(wb); // collection
+
+ netdata_mutex_unlock(&host->receiver_lock);
+}
+
static bool rrdhost_set_receiver(RRDHOST *host, struct receiver_state *rpt) {
bool signal_rrdcontext = false;
bool set_this = false;
@@ -474,6 +512,8 @@ static bool rrdhost_set_receiver(RRDHOST *host, struct receiver_state *rpt) {
rrdhost_flag_clear(rpt->host, RRDHOST_FLAG_RRDPUSH_RECEIVER_DISCONNECTED);
aclk_queue_node_info(rpt->host, true);
+ rrdpush_reset_destinations_postpone_time(host);
+
set_this = true;
}
@@ -506,16 +546,17 @@ static void rrdhost_clear_receiver(struct receiver_state *rpt) {
signal_rrdcontext = true;
rrdpush_receiver_replication_reset(host);
- if (host->receiver == rpt)
- host->receiver = NULL;
-
rrdhost_flag_set(host, RRDHOST_FLAG_ORPHAN);
+ host->receiver = NULL;
+ host->rrdpush_last_receiver_exit_reason = rpt->exit.reason;
}
netdata_mutex_unlock(&host->receiver_lock);
if(signal_rrdcontext)
rrdcontext_host_child_disconnected(host);
+
+ rrdpush_reset_destinations_postpone_time(host);
}
}
@@ -549,7 +590,7 @@ bool stop_streaming_receiver(RRDHOST *host, const char *reason) {
"thread %d takes too long to stop, giving up..."
, rrdhost_hostname(host)
, host->receiver->client_ip, host->receiver->client_port
- , gettid());
+ , host->receiver->tid);
else
ret = true;
@@ -558,6 +599,18 @@ bool stop_streaming_receiver(RRDHOST *host, const char *reason) {
return ret;
}
+static void rrdpush_send_error_on_taken_over_connection(struct receiver_state *rpt, const char *msg) {
+ (void) send_timeout(
+#ifdef ENABLE_HTTPS
+ &rpt->ssl,
+#endif
+ rpt->fd,
+ (char *)msg,
+ strlen(msg),
+ 0,
+ 5);
+}
+
void rrdpush_receive_log_status(struct receiver_state *rpt, const char *msg, const char *status) {
log_stream_connection(rpt->client_ip, rpt->client_port,
@@ -585,7 +638,7 @@ static void rrdhost_reset_destinations(RRDHOST *host) {
d->postpone_reconnection_until = 0;
}
-static int rrdpush_receive(struct receiver_state *rpt)
+static void rrdpush_receive(struct receiver_state *rpt)
{
rpt->config.mode = default_rrd_memory_mode;
rpt->config.history = default_rrd_history_entries;
@@ -689,14 +742,14 @@ static int rrdpush_receive(struct receiver_state *rpt)
if(!host) {
rrdpush_receive_log_status(rpt, "failed to find/create host structure", "INTERNAL ERROR DROPPING CONNECTION");
- close(rpt->fd);
- return 1;
+ rrdpush_send_error_on_taken_over_connection(rpt, START_STREAMING_ERROR_INTERNAL_ERROR);
+ goto cleanup;
}
if (unlikely(rrdhost_flag_check(host, RRDHOST_FLAG_PENDING_CONTEXT_LOAD))) {
rrdpush_receive_log_status(rpt, "host is initializing", "INITIALIZATION IN PROGRESS RETRY LATER");
- close(rpt->fd);
- return 1;
+ rrdpush_send_error_on_taken_over_connection(rpt, START_STREAMING_ERROR_INITIALIZATION);
+ goto cleanup;
}
// system_info has been consumed by the host structure
@@ -704,8 +757,8 @@ static int rrdpush_receive(struct receiver_state *rpt)
if(!rrdhost_set_receiver(host, rpt)) {
rrdpush_receive_log_status(rpt, "host is already served by another receiver", "DUPLICATE RECEIVER DROPPING CONNECTION");
- close(rpt->fd);
- return 1;
+ rrdpush_send_error_on_taken_over_connection(rpt, START_STREAMING_ERROR_ALREADY_STREAMING);
+ goto cleanup;
}
}
@@ -776,15 +829,16 @@ static int rrdpush_receive(struct receiver_state *rpt)
}
debug(D_STREAM, "Initial response to %s: %s", rpt->client_ip, initial_response);
- if(send_timeout(
+ ssize_t bytes_sent = send_timeout(
#ifdef ENABLE_HTTPS
&rpt->ssl,
#endif
- rpt->fd, initial_response, strlen(initial_response), 0, 60) != (ssize_t)strlen(initial_response)) {
+ rpt->fd, initial_response, strlen(initial_response), 0, 60);
+ if(bytes_sent != (ssize_t)strlen(initial_response)) {
+ internal_error(true, "Cannot send response, got %zd bytes, expecting %zu bytes", bytes_sent, strlen(initial_response));
rrdpush_receive_log_status(rpt, "cannot reply back", "CANT REPLY DROPPING CONNECTION");
- close(rpt->fd);
- return 0;
+ goto cleanup;
}
}
@@ -850,9 +904,8 @@ static int rrdpush_receive(struct receiver_state *rpt)
rrdhost_set_is_parent_label(--localhost->connected_children_count);
- // cleanup
- close(rpt->fd);
- return (int)count;
+cleanup:
+ ;
}
static void rrdpush_receiver_thread_cleanup(void *ptr) {
@@ -879,7 +932,8 @@ void *rrdpush_receiver_thread(void *ptr) {
worker_register_job_custom_metric(WORKER_RECEIVER_JOB_REPLICATION_COMPLETION, "replication completion", "%", WORKER_METRIC_ABSOLUTE);
struct receiver_state *rpt = (struct receiver_state *)ptr;
- info("STREAM %s [%s]:%s: receive thread created (task id %d)", rpt->hostname, rpt->client_ip, rpt->client_port, gettid());
+ rpt->tid = gettid();
+ info("STREAM %s [%s]:%s: receive thread created (task id %d)", rpt->hostname, rpt->client_ip, rpt->client_port, rpt->tid);
rrdpush_receive(rpt);
diff --git a/streaming/replication.c b/streaming/replication.c
index a50913a1a..c6fafc357 100644
--- a/streaming/replication.c
+++ b/streaming/replication.c
@@ -274,6 +274,12 @@ static void replication_query_finalize(BUFFER *wb, struct replication_query *q,
replication_queries.queries_finished += queries;
replication_queries.points_read += q->points_read;
replication_queries.points_generated += q->points_generated;
+
+ if(q->st && q->st->rrdhost->sender) {
+ struct sender_state *s = q->st->rrdhost->sender;
+ s->replication.latest_completed_before_t = q->query.before;
+ }
+
netdata_spinlock_unlock(&replication_queries.spinlock);
}
@@ -644,7 +650,7 @@ bool replication_response_execute_and_finalize(struct replication_query *q, size
buffer_fast_strcat(wb, "\n", 1);
worker_is_busy(WORKER_JOB_BUFFER_COMMIT);
- sender_commit(host->sender, wb);
+ sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_REPLICATION);
worker_is_busy(WORKER_JOB_CLEANUP);
if(enable_streaming) {
@@ -1466,6 +1472,9 @@ void replication_add_request(struct sender_state *sender, const char *chart_id,
.not_indexed_preprocessing = false,
};
+ if(!sender->replication.oldest_request_after_t || rq.after < sender->replication.oldest_request_after_t)
+ sender->replication.oldest_request_after_t = rq.after;
+
if(start_streaming && rrdpush_sender_get_buffer_used_percent(sender) <= STREAMING_START_MAX_SENDER_BUFFER_PERCENTAGE_ALLOWED)
replication_execute_request(&rq, false);
diff --git a/streaming/rrdpush.c b/streaming/rrdpush.c
index 62b537f0c..c481871cc 100644
--- a/streaming/rrdpush.c
+++ b/streaming/rrdpush.c
@@ -49,7 +49,6 @@ bool default_rrdpush_enable_replication = true;
time_t default_rrdpush_seconds_to_replicate = 86400;
time_t default_rrdpush_replication_step = 600;
#ifdef ENABLE_HTTPS
-int netdata_use_ssl_on_stream = NETDATA_SSL_OPTIONAL;
char *netdata_ssl_ca_path = NULL;
char *netdata_ssl_ca_file = NULL;
#endif
@@ -137,24 +136,10 @@ int rrdpush_init() {
}
#ifdef ENABLE_HTTPS
- if (netdata_use_ssl_on_stream == NETDATA_SSL_OPTIONAL) {
- if (default_rrdpush_destination){
- char *test = strstr(default_rrdpush_destination,":SSL");
- if(test){
- *test = 0X00;
- netdata_use_ssl_on_stream = NETDATA_SSL_FORCE;
- }
- }
- }
+ netdata_ssl_validate_certificate_sender = !appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, "ssl skip certificate verification", !netdata_ssl_validate_certificate);
- bool invalid_certificate = appconfig_get_boolean(&stream_config, CONFIG_SECTION_STREAM, "ssl skip certificate verification", CONFIG_BOOLEAN_NO);
-
- if(invalid_certificate == CONFIG_BOOLEAN_YES){
- if(netdata_ssl_validate_server == NETDATA_SSL_VALID_CERTIFICATE){
- info("Netdata is configured to accept invalid SSL certificate.");
- netdata_ssl_validate_server = NETDATA_SSL_INVALID_CERTIFICATE;
- }
- }
+ if(!netdata_ssl_validate_certificate_sender)
+ info("SSL: streaming senders will skip SSL certificates verification.");
netdata_ssl_ca_path = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "CApath", NULL);
netdata_ssl_ca_file = appconfig_get(&stream_config, CONFIG_SECTION_STREAM, "CAfile", NULL);
@@ -390,7 +375,7 @@ bool rrdset_push_chart_definition_now(RRDSET *st) {
BUFFER *wb = sender_start(host->sender);
rrdpush_send_chart_definition(wb, st);
- sender_commit(host->sender, wb);
+ sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA);
sender_thread_buffer_free();
return true;
@@ -458,7 +443,7 @@ void rrdset_push_metrics_finished(RRDSET_STREAM_BUFFER *rsb, RRDSET *st) {
buffer_fast_strcat(rsb->wb, PLUGINSD_KEYWORD_END_V2 "\n", sizeof(PLUGINSD_KEYWORD_END_V2) - 1 + 1);
}
- sender_commit(st->rrdhost->sender, rsb->wb);
+ sender_commit(st->rrdhost->sender, rsb->wb, STREAM_TRAFFIC_TYPE_DATA);
*rsb = (RRDSET_STREAM_BUFFER){ .wb = NULL, };
}
@@ -498,7 +483,7 @@ RRDSET_STREAM_BUFFER rrdset_push_metric_initialize(RRDSET *st, time_t wall_clock
if(unlikely(!exposed_upstream)) {
BUFFER *wb = sender_start(host->sender);
replication_in_progress = rrdpush_send_chart_definition(wb, st);
- sender_commit(host->sender, wb);
+ sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA);
}
if(replication_in_progress)
@@ -529,7 +514,7 @@ void rrdpush_send_host_labels(RRDHOST *host) {
rrdlabels_walkthrough_read(host->rrdlabels, send_labels_callback, wb);
buffer_sprintf(wb, "OVERWRITE %s\n", "labels");
- sender_commit(host->sender, wb);
+ sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA);
sender_thread_buffer_free();
}
@@ -548,7 +533,7 @@ void rrdpush_claimed_id(RRDHOST *host)
buffer_sprintf(wb, "CLAIMED_ID %s %s\n", host->machine_guid, (host->aclk_state.claimed_id ? host->aclk_state.claimed_id : "NULL") );
rrdhost_aclk_state_unlock(host);
- sender_commit(host->sender, wb);
+ sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA);
sender_thread_buffer_free();
}
@@ -579,6 +564,7 @@ int connect_to_one_of_destinations(
if (reconnects_counter)
*reconnects_counter += 1;
+ d->last_attempt = now;
sock = connect_to_this(string2str(d->destination), default_port, timeout);
if (sock != -1) {
@@ -610,6 +596,14 @@ bool destinations_init_add_one(char *entry, void *data) {
struct destinations_init_tmp *t = data;
struct rrdpush_destinations *d = callocz(1, sizeof(struct rrdpush_destinations));
+ char *colon_ssl = strstr(entry, ":SSL");
+ if(colon_ssl) {
+ *colon_ssl = '\0';
+ d->ssl = true;
+ }
+ else
+ d->ssl = false;
+
d->destination = string_strdupz(entry);
__atomic_add_fetch(&netdata_buffers_statistics.rrdhost_senders, sizeof(struct rrdpush_destinations), __ATOMIC_RELAXED);
@@ -712,7 +706,7 @@ int rrdpush_receiver_permission_denied(struct web_client *w) {
// we always respond with the same message and error code
// to prevent an attacker from gaining info about the error
buffer_flush(w->response.data);
- buffer_sprintf(w->response.data, "You are not permitted to access this. Check the logs for more info.");
+ buffer_strcat(w->response.data, START_STREAMING_ERROR_NOT_PERMITTED);
return HTTP_RESP_UNAUTHORIZED;
}
@@ -720,10 +714,35 @@ int rrdpush_receiver_too_busy_now(struct web_client *w) {
// we always respond with the same message and error code
// to prevent an attacker from gaining info about the error
buffer_flush(w->response.data);
- buffer_sprintf(w->response.data, "The server is too busy now to accept this request. Try later.");
+ buffer_strcat(w->response.data, START_STREAMING_ERROR_BUSY_TRY_LATER);
return HTTP_RESP_SERVICE_UNAVAILABLE;
}
+static void rrdpush_receiver_takeover_web_connection(struct web_client *w, struct receiver_state *rpt) {
+ rpt->fd = w->ifd;
+
+#ifdef ENABLE_HTTPS
+ rpt->ssl.conn = w->ssl.conn;
+ rpt->ssl.state = w->ssl.state;
+
+ w->ssl = NETDATA_SSL_UNSET_CONNECTION;
+#endif
+
+ WEB_CLIENT_IS_DEAD(w);
+
+ if(web_server_mode == WEB_SERVER_MODE_STATIC_THREADED) {
+ web_client_flag_set(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET);
+ }
+ else {
+ if(w->ifd == w->ofd)
+ w->ifd = w->ofd = -1;
+ else
+ w->ifd = -1;
+ }
+
+ buffer_flush(w->response.data);
+}
+
void *rrdpush_receiver_thread(void *ptr);
int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_string) {
@@ -741,20 +760,16 @@ int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_stri
rpt->system_info = callocz(1, sizeof(struct rrdhost_system_info));
rpt->system_info->hops = rpt->hops;
- rpt->fd = w->ifd;
+ rpt->fd = -1;
rpt->client_ip = strdupz(w->client_ip);
rpt->client_port = strdupz(w->client_port);
- rpt->config.update_every = default_rrd_update_every;
-
#ifdef ENABLE_HTTPS
- rpt->ssl.conn = w->ssl.conn;
- rpt->ssl.flags = w->ssl.flags;
-
- w->ssl.conn = NULL;
- w->ssl.flags = NETDATA_SSL_START;
+ rpt->ssl = NETDATA_SSL_UNSET_CONNECTION;
#endif
+ rpt->config.update_every = default_rrd_update_every;
+
// parse the parameters and fill rpt and rpt->system_info
while(decoded_query_string) {
@@ -1011,6 +1026,8 @@ int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_stri
if (strcmp(rpt->machine_guid, localhost->machine_guid) == 0) {
+ rrdpush_receiver_takeover_web_connection(w, rpt);
+
rrdpush_receive_log_status(
rpt,
"machine GUID is my own",
@@ -1032,9 +1049,8 @@ int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_stri
);
}
- close(rpt->fd);
receiver_state_free(rpt);
- return web_client_socket_is_now_used_for_streaming(w);
+ return HTTP_RESP_OK;
}
if(unlikely(web_client_streaming_rate_t > 0)) {
@@ -1130,7 +1146,7 @@ int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_stri
// Have not set WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET - caller should clean up
buffer_flush(w->response.data);
- buffer_strcat(w->response.data, "This GUID is already streaming to this server");
+ buffer_strcat(w->response.data, START_STREAMING_ERROR_ALREADY_STREAMING);
receiver_state_free(rpt);
return HTTP_RESP_CONFLICT;
}
@@ -1138,8 +1154,11 @@ int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_stri
debug(D_SYSTEM, "starting STREAM receive thread.");
- char tag[FILENAME_MAX + 1];
- snprintfz(tag, FILENAME_MAX, THREAD_TAG_STREAM_RECEIVER "[%s,[%s]:%s]", rpt->hostname, w->client_ip, w->client_port);
+ rrdpush_receiver_takeover_web_connection(w, rpt);
+
+ char tag[NETDATA_THREAD_TAG_MAX + 1];
+ snprintfz(tag, NETDATA_THREAD_TAG_MAX, THREAD_TAG_STREAM_RECEIVER "[%s]", rpt->hostname);
+ tag[NETDATA_THREAD_TAG_MAX] = '\0';
if(netdata_thread_create(&rpt->thread, tag, NETDATA_THREAD_OPTION_DEFAULT, rrdpush_receiver_thread, (void *)rpt)) {
rrdpush_receive_log_status(
@@ -1154,23 +1173,86 @@ int rrdpush_receiver_thread_spawn(struct web_client *w, char *decoded_query_stri
}
// prevent the caller from closing the streaming socket
- return web_client_socket_is_now_used_for_streaming(w);
+ return HTTP_RESP_OK;
+}
+
+void rrdpush_reset_destinations_postpone_time(RRDHOST *host) {
+ struct rrdpush_destinations *d;
+ for (d = host->destinations; d; d = d->next)
+ d->postpone_reconnection_until = 0;
}
+static struct {
+ STREAM_HANDSHAKE err;
+ const char *str;
+} handshake_errors[] = {
+ { STREAM_HANDSHAKE_OK_V5, "OK_V5" },
+ { STREAM_HANDSHAKE_OK_V4, "OK_V4" },
+ { STREAM_HANDSHAKE_OK_V3, "OK_V3" },
+ { STREAM_HANDSHAKE_OK_V2, "OK_V2" },
+ { STREAM_HANDSHAKE_OK_V1, "OK_V1" },
+ { STREAM_HANDSHAKE_ERROR_BAD_HANDSHAKE, "BAD HANDSHAKE" },
+ { STREAM_HANDSHAKE_ERROR_LOCALHOST, "LOCALHOST" },
+ { STREAM_HANDSHAKE_ERROR_ALREADY_CONNECTED, "ALREADY CONNECTED" },
+ { STREAM_HANDSHAKE_ERROR_DENIED, "DENIED" },
+ { STREAM_HANDSHAKE_ERROR_SEND_TIMEOUT, "SEND TIMEOUT" },
+ { STREAM_HANDSHAKE_ERROR_RECEIVE_TIMEOUT, "RECEIVE TIMEOUT" },
+ { STREAM_HANDSHAKE_ERROR_INVALID_CERTIFICATE, "INVALID CERTIFICATE" },
+ { STREAM_HANDSHAKE_ERROR_SSL_ERROR, "SSL ERROR" },
+ { STREAM_HANDSHAKE_ERROR_CANT_CONNECT, "CANT CONNECT" },
+ { STREAM_HANDSHAKE_BUSY_TRY_LATER, "BUSY TRY LATER" },
+ { STREAM_HANDSHAKE_INTERNAL_ERROR, "INTERNAL ERROR" },
+ { STREAM_HANDSHAKE_INITIALIZATION, "INITIALIZING" },
+ { 0, NULL },
+};
+
+const char *stream_handshake_error_to_string(STREAM_HANDSHAKE handshake_error) {
+ for(size_t i = 0; handshake_errors[i].str ; i++) {
+ if(handshake_error == handshake_errors[i].err)
+ return handshake_errors[i].str;
+ }
+
+ return "";
+}
+
+static struct {
+ STREAM_CAPABILITIES cap;
+ const char *str;
+} capability_names[] = {
+ { STREAM_CAP_V1, "V1" },
+ { STREAM_CAP_V2, "V2" },
+ { STREAM_CAP_VN, "VN" },
+ { STREAM_CAP_VCAPS, "VCAPS" },
+ { STREAM_CAP_HLABELS, "HLABELS" },
+ { STREAM_CAP_CLAIM, "CLAIM" },
+ { STREAM_CAP_CLABELS, "CLABELS" },
+ { STREAM_CAP_COMPRESSION, "COMPRESSION" },
+ { STREAM_CAP_FUNCTIONS, "FUNCTIONS" },
+ { STREAM_CAP_REPLICATION, "REPLICATION" },
+ { STREAM_CAP_BINARY, "BINARY" },
+ { STREAM_CAP_INTERPOLATED, "INTERPOLATED" },
+ { STREAM_CAP_IEEE754, "IEEE754" },
+ { 0 , NULL },
+};
+
static void stream_capabilities_to_string(BUFFER *wb, STREAM_CAPABILITIES caps) {
- if(caps & STREAM_CAP_V1) buffer_strcat(wb, "V1 ");
- if(caps & STREAM_CAP_V2) buffer_strcat(wb, "V2 ");
- if(caps & STREAM_CAP_VN) buffer_strcat(wb, "VN ");
- if(caps & STREAM_CAP_VCAPS) buffer_strcat(wb, "VCAPS ");
- if(caps & STREAM_CAP_HLABELS) buffer_strcat(wb, "HLABELS ");
- if(caps & STREAM_CAP_CLAIM) buffer_strcat(wb, "CLAIM ");
- if(caps & STREAM_CAP_CLABELS) buffer_strcat(wb, "CLABELS ");
- if(caps & STREAM_CAP_COMPRESSION) buffer_strcat(wb, "COMPRESSION ");
- if(caps & STREAM_CAP_FUNCTIONS) buffer_strcat(wb, "FUNCTIONS ");
- if(caps & STREAM_CAP_REPLICATION) buffer_strcat(wb, "REPLICATION ");
- if(caps & STREAM_CAP_BINARY) buffer_strcat(wb, "BINARY ");
- if(caps & STREAM_CAP_INTERPOLATED) buffer_strcat(wb, "INTERPOLATED ");
- if(caps & STREAM_CAP_IEEE754) buffer_strcat(wb, "IEEE754 ");
+ for(size_t i = 0; capability_names[i].str ; i++) {
+ if(caps & capability_names[i].cap) {
+ buffer_strcat(wb, capability_names[i].str);
+ buffer_strcat(wb, " ");
+ }
+ }
+}
+
+void stream_capabilities_to_json_array(BUFFER *wb, STREAM_CAPABILITIES caps, const char *key) {
+ buffer_json_member_add_array(wb, key);
+
+ for(size_t i = 0; capability_names[i].str ; i++) {
+ if(caps & capability_names[i].cap)
+ buffer_json_add_array_item_string(wb, capability_names[i].str);
+ }
+
+ buffer_json_array_close(wb);
}
void log_receiver_capabilities(struct receiver_state *rpt) {
diff --git a/streaming/rrdpush.h b/streaming/rrdpush.h
index ff8958440..f97c8ddfb 100644
--- a/streaming/rrdpush.h
+++ b/streaming/rrdpush.h
@@ -72,6 +72,9 @@ STREAM_CAPABILITIES stream_our_capabilities();
#define START_STREAMING_ERROR_SAME_LOCALHOST "Don't hit me baby, you are trying to stream my localhost back"
#define START_STREAMING_ERROR_ALREADY_STREAMING "This GUID is already streaming to this server"
#define START_STREAMING_ERROR_NOT_PERMITTED "You are not permitted to access this. Check the logs for more info."
+#define START_STREAMING_ERROR_BUSY_TRY_LATER "The server is too busy now to accept this request. Try later."
+#define START_STREAMING_ERROR_INTERNAL_ERROR "The server encountered an internal error. Try later."
+#define START_STREAMING_ERROR_INITIALIZATION "The server is initializing. Try later."
typedef enum {
STREAM_HANDSHAKE_OK_V5 = 5, // COMPRESSION
@@ -87,12 +90,27 @@ typedef enum {
STREAM_HANDSHAKE_ERROR_RECEIVE_TIMEOUT = -6,
STREAM_HANDSHAKE_ERROR_INVALID_CERTIFICATE = -7,
STREAM_HANDSHAKE_ERROR_SSL_ERROR = -8,
- STREAM_HANDSHAKE_ERROR_CANT_CONNECT = -9
+ STREAM_HANDSHAKE_ERROR_CANT_CONNECT = -9,
+ STREAM_HANDSHAKE_BUSY_TRY_LATER = -10,
+ STREAM_HANDSHAKE_INTERNAL_ERROR = -11,
+ STREAM_HANDSHAKE_INITIALIZATION = -12,
} STREAM_HANDSHAKE;
// ----------------------------------------------------------------------------
+typedef enum __attribute__((packed)) {
+ STREAM_TRAFFIC_TYPE_REPLICATION,
+ STREAM_TRAFFIC_TYPE_FUNCTIONS,
+ STREAM_TRAFFIC_TYPE_METADATA,
+ STREAM_TRAFFIC_TYPE_DATA,
+
+ // terminator
+ STREAM_TRAFFIC_TYPE_MAX,
+} STREAM_TRAFFIC_TYPE;
+
+// ----------------------------------------------------------------------------
+
typedef struct {
char *os_name;
char *os_id;
@@ -148,6 +166,7 @@ struct sender_state {
size_t sent_bytes_on_this_connection;
size_t send_attempts;
time_t last_traffic_seen_t;
+ time_t last_state_since_t; // the timestamp of the last state (online/offline) change
size_t not_connected_loops;
// Metrics are collected asynchronously by collector threads calling rrdset_done_push(). This can also trigger
// the lazy creation of the sender thread - both cases (buffer access and thread creation) are guarded here.
@@ -157,6 +176,8 @@ struct sender_state {
int read_len;
STREAM_CAPABILITIES capabilities;
+ size_t sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_MAX];
+
int rrdpush_sender_pipe[2]; // collector to sender thread signaling
int rrdpush_sender_socket;
@@ -166,7 +187,7 @@ struct sender_state {
struct compressor_state *compressor;
#endif
#ifdef ENABLE_HTTPS
- struct netdata_ssl ssl; // structure used to encrypt the connection
+ NETDATA_SSL ssl; // structure used to encrypt the connection
#endif
struct {
@@ -176,6 +197,8 @@ struct sender_state {
struct {
DICTIONARY *requests; // de-duplication of replication requests, per chart
+ time_t oldest_request_after_t; // the timestamp of the oldest replication request
+ time_t latest_completed_before_t; // the timestamp of the latest replication request
struct {
size_t pending_requests; // the currently outstanding replication requests
@@ -221,6 +244,7 @@ struct sender_state {
struct receiver_state {
RRDHOST *host;
+ pid_t tid;
netdata_thread_t thread;
int fd;
char *key;
@@ -266,7 +290,7 @@ struct receiver_state {
} config;
#ifdef ENABLE_HTTPS
- struct netdata_ssl ssl;
+ NETDATA_SSL ssl;
#endif
#ifdef ENABLE_COMPRESSION
unsigned int rrdpush_compression;
@@ -278,8 +302,10 @@ struct receiver_state {
struct rrdpush_destinations {
STRING *destination;
+ bool ssl;
const char *last_error;
+ time_t last_attempt;
time_t postpone_reconnection_until;
STREAM_HANDSHAKE last_handshake;
@@ -303,7 +329,7 @@ void rrdpush_destinations_init(RRDHOST *host);
void rrdpush_destinations_free(RRDHOST *host);
BUFFER *sender_start(struct sender_state *s);
-void sender_commit(struct sender_state *s, BUFFER *wb);
+void sender_commit(struct sender_state *s, BUFFER *wb, STREAM_TRAFFIC_TYPE type);
int rrdpush_init();
bool rrdpush_receiver_needs_dbengine();
int configured_as_parent();
@@ -351,7 +377,9 @@ void rrdpush_signal_sender_to_wake_up(struct sender_state *s);
struct compressor_state *create_compressor();
struct decompressor_state *create_decompressor();
#endif
-
+void rrdpush_reset_destinations_postpone_time(RRDHOST *host);
+const char *stream_handshake_error_to_string(STREAM_HANDSHAKE handshake_error);
+void stream_capabilities_to_json_array(BUFFER *wb, STREAM_CAPABILITIES caps, const char *key);
void rrdpush_receive_log_status(struct receiver_state *rpt, const char *msg, const char *status);
void log_receiver_capabilities(struct receiver_state *rpt);
void log_sender_capabilities(struct sender_state *s);
@@ -363,6 +391,9 @@ bool stop_streaming_receiver(RRDHOST *host, const char *reason);
void sender_thread_buffer_free(void);
+void rrdhost_receiver_to_json(BUFFER *wb, RRDHOST *host, const char *key, time_t now __maybe_unused);
+void rrdhost_sender_to_json(BUFFER *wb, RRDHOST *host, const char *key, time_t now __maybe_unused);
+
#include "replication.h"
#endif //NETDATA_RRDPUSH_H
diff --git a/streaming/sender.c b/streaming/sender.c
index 179c2dc60..c74c9b407 100644
--- a/streaming/sender.c
+++ b/streaming/sender.c
@@ -29,7 +29,6 @@
#endif
extern struct config stream_config;
-extern int netdata_use_ssl_on_stream;
extern char *netdata_ssl_ca_path;
extern char *netdata_ssl_ca_file;
@@ -85,7 +84,7 @@ static inline void deactivate_compression(struct sender_state *s) {
#define SENDER_BUFFER_ADAPT_TO_TIMES_MAX_SIZE 3
// Collector thread finishing a transmission
-void sender_commit(struct sender_state *s, BUFFER *wb) {
+void sender_commit(struct sender_state *s, BUFFER *wb, STREAM_TRAFFIC_TYPE type) {
if(unlikely(wb != sender_thread_buffer))
fatal("STREAMING: sender is trying to commit a buffer that is not this thread's buffer.");
@@ -164,6 +163,8 @@ void sender_commit(struct sender_state *s, BUFFER *wb) {
if(cbuffer_add_unsafe(s->buffer, dst, dst_len))
s->flags |= SENDER_FLAG_OVERFLOW;
+ else
+ s->sent_bytes_on_this_connection_per_type[type] += dst_len;
src = src + size_to_compress;
src_len -= size_to_compress;
@@ -171,9 +172,13 @@ void sender_commit(struct sender_state *s, BUFFER *wb) {
}
else if(cbuffer_add_unsafe(s->buffer, src, src_len))
s->flags |= SENDER_FLAG_OVERFLOW;
+ else
+ s->sent_bytes_on_this_connection_per_type[type] += src_len;
#else
if(cbuffer_add_unsafe(s->buffer, src, src_len))
s->flags |= SENDER_FLAG_OVERFLOW;
+ else
+ s->sent_bytes_on_this_connection_per_type[type] += src_len;
#endif
replication_recalculate_buffer_used_ratio_unsafe(s);
@@ -205,7 +210,7 @@ void rrdpush_sender_send_this_host_variable_now(RRDHOST *host, const RRDVAR_ACQU
if(rrdhost_can_send_definitions_to_parent(host)) {
BUFFER *wb = sender_start(host->sender);
rrdpush_sender_add_host_variable_to_buffer(wb, rva);
- sender_commit(host->sender, wb);
+ sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA);
sender_thread_buffer_free();
}
}
@@ -234,7 +239,7 @@ static void rrdpush_sender_thread_send_custom_host_variables(RRDHOST *host) {
};
int ret = rrdvar_walkthrough_read(host->rrdvars, rrdpush_sender_thread_custom_host_variables_callback, &tmp);
(void)ret;
- sender_commit(host->sender, wb);
+ sender_commit(host->sender, wb, STREAM_TRAFFIC_TYPE_METADATA);
sender_thread_buffer_free();
debug(D_STREAM, "RRDVAR sent %d VARIABLES", ret);
@@ -320,6 +325,10 @@ static void rrdpush_sender_after_connect(RRDHOST *host) {
}
static inline void rrdpush_sender_thread_close_socket(RRDHOST *host) {
+#ifdef ENABLE_HTTPS
+ netdata_ssl_close(&host->sender->ssl);
+#endif
+
if(host->sender->rrdpush_sender_socket != -1) {
close(host->sender->rrdpush_sender_socket);
host->sender->rrdpush_sender_socket = -1;
@@ -335,11 +344,11 @@ static inline void rrdpush_sender_thread_close_socket(RRDHOST *host) {
void rrdpush_encode_variable(stream_encoded_t *se, RRDHOST *host)
{
- se->os_name = (host->system_info->host_os_name)?url_encode(host->system_info->host_os_name):"";
- se->os_id = (host->system_info->host_os_id)?url_encode(host->system_info->host_os_id):"";
- se->os_version = (host->system_info->host_os_version)?url_encode(host->system_info->host_os_version):"";
- se->kernel_name = (host->system_info->kernel_name)?url_encode(host->system_info->kernel_name):"";
- se->kernel_version = (host->system_info->kernel_version)?url_encode(host->system_info->kernel_version):"";
+ se->os_name = (host->system_info->host_os_name)?url_encode(host->system_info->host_os_name):strdupz("");
+ se->os_id = (host->system_info->host_os_id)?url_encode(host->system_info->host_os_id):strdupz("");
+ se->os_version = (host->system_info->host_os_version)?url_encode(host->system_info->host_os_version):strdupz("");
+ se->kernel_name = (host->system_info->kernel_name)?url_encode(host->system_info->kernel_name):strdupz("");
+ se->kernel_version = (host->system_info->kernel_version)?url_encode(host->system_info->kernel_version):strdupz("");
}
void rrdpush_clean_encoded(stream_encoded_t *se)
@@ -423,6 +432,33 @@ struct {
.worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE,
.postpone_reconnect_seconds = 1 * 60, // 1 minute
},
+ {
+ .response = START_STREAMING_ERROR_BUSY_TRY_LATER,
+ .length = sizeof(START_STREAMING_ERROR_BUSY_TRY_LATER) - 1,
+ .version = STREAM_HANDSHAKE_BUSY_TRY_LATER,
+ .dynamic = false,
+ .error = "remote server is currently busy, we should try later",
+ .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE,
+ .postpone_reconnect_seconds = 2 * 60, // 2 minutes
+ },
+ {
+ .response = START_STREAMING_ERROR_INTERNAL_ERROR,
+ .length = sizeof(START_STREAMING_ERROR_INTERNAL_ERROR) - 1,
+ .version = STREAM_HANDSHAKE_INTERNAL_ERROR,
+ .dynamic = false,
+ .error = "remote server is encountered an internal error, we should try later",
+ .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE,
+ .postpone_reconnect_seconds = 5 * 60, // 5 minutes
+ },
+ {
+ .response = START_STREAMING_ERROR_INITIALIZATION,
+ .length = sizeof(START_STREAMING_ERROR_INITIALIZATION) - 1,
+ .version = STREAM_HANDSHAKE_INITIALIZATION,
+ .dynamic = false,
+ .error = "remote server is initializing, we should try later",
+ .worker_job_id = WORKER_SENDER_JOB_DISCONNECT_BAD_HANDSHAKE,
+ .postpone_reconnect_seconds = 2 * 60, // 2 minute
+ },
// terminator
{
@@ -480,6 +516,53 @@ static inline bool rrdpush_sender_validate_response(RRDHOST *host, struct sender
return false;
}
+static bool rrdpush_sender_connect_ssl(struct sender_state *s) {
+#ifdef ENABLE_HTTPS
+ RRDHOST *host = s->host;
+ bool ssl_required = host->destination && host->destination->ssl;
+
+ netdata_ssl_close(&host->sender->ssl);
+
+ if(!ssl_required)
+ return true;
+
+ if (netdata_ssl_open(&host->sender->ssl, netdata_ssl_streaming_sender_ctx, s->rrdpush_sender_socket)) {
+ if(!netdata_ssl_connect(&host->sender->ssl)) {
+ // couldn't connect
+
+ worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SSL_ERROR);
+ rrdpush_sender_thread_close_socket(host);
+ host->destination->last_error = "SSL error";
+ host->destination->last_handshake = STREAM_HANDSHAKE_ERROR_SSL_ERROR;
+ host->destination->postpone_reconnection_until = now_realtime_sec() + 5 * 60;
+ return false;
+ }
+
+ if (netdata_ssl_validate_certificate_sender &&
+ security_test_certificate(host->sender->ssl.conn)) {
+ // certificate is not valid
+
+ worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SSL_ERROR);
+ error("SSL: closing the stream connection, because the server SSL certificate is not valid.");
+ rrdpush_sender_thread_close_socket(host);
+ host->destination->last_error = "invalid SSL certificate";
+ host->destination->last_handshake = STREAM_HANDSHAKE_ERROR_INVALID_CERTIFICATE;
+ host->destination->postpone_reconnection_until = now_realtime_sec() + 5 * 60;
+ return false;
+ }
+
+ return true;
+ }
+
+ // failed to establish connection
+ return false;
+
+#else
+ // SSL is not enabled
+ return true;
+#endif
+}
+
static bool rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_port, int timeout, struct sender_state *s) {
struct timeval tv = {
@@ -507,35 +590,6 @@ static bool rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_p
// info("STREAM %s [send to %s]: initializing communication...", rrdhost_hostname(host), s->connected_to);
-#ifdef ENABLE_HTTPS
- if(netdata_ssl_client_ctx){
- host->sender->ssl.flags = NETDATA_SSL_START;
- if (!host->sender->ssl.conn){
- host->sender->ssl.conn = SSL_new(netdata_ssl_client_ctx);
- if(!host->sender->ssl.conn){
- error("Failed to allocate SSL structure.");
- host->sender->ssl.flags = NETDATA_SSL_NO_HANDSHAKE;
- }
- }
- else{
- SSL_clear(host->sender->ssl.conn);
- }
-
- if (host->sender->ssl.conn)
- {
- if (SSL_set_fd(host->sender->ssl.conn, s->rrdpush_sender_socket) != 1) {
- error("Failed to set the socket to the SSL on socket fd %d.", s->rrdpush_sender_socket);
- host->sender->ssl.flags = NETDATA_SSL_NO_HANDSHAKE;
- } else{
- host->sender->ssl.flags = NETDATA_SSL_HANDSHAKE_COMPLETE;
- }
- }
- }
- else {
- host->sender->ssl.flags = NETDATA_SSL_NO_HANDSHAKE;
- }
-#endif
-
// reset our capabilities to default
s->capabilities = stream_our_capabilities();
@@ -651,43 +705,8 @@ static bool rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_p
http[eol] = 0x00;
rrdpush_clean_encoded(&se);
-#ifdef ENABLE_HTTPS
- if (!host->sender->ssl.flags) {
- ERR_clear_error();
- SSL_set_connect_state(host->sender->ssl.conn);
- int err = SSL_connect(host->sender->ssl.conn);
- if (err != 1){
- err = SSL_get_error(host->sender->ssl.conn, err);
- error("SSL cannot connect with the server: %s ",ERR_error_string((long)SSL_get_error(host->sender->ssl.conn,err),NULL));
- if (netdata_use_ssl_on_stream == NETDATA_SSL_FORCE) {
- worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SSL_ERROR);
- rrdpush_sender_thread_close_socket(host);
- host->destination->last_error = "SSL error";
- host->destination->last_handshake = STREAM_HANDSHAKE_ERROR_SSL_ERROR;
- host->destination->postpone_reconnection_until = now_realtime_sec() + 5 * 60;
- return false;
- }
- else {
- host->sender->ssl.flags = NETDATA_SSL_NO_HANDSHAKE;
- }
- }
- else {
- if (netdata_use_ssl_on_stream == NETDATA_SSL_FORCE) {
- if (netdata_ssl_validate_server == NETDATA_SSL_VALID_CERTIFICATE) {
- if ( security_test_certificate(host->sender->ssl.conn)) {
- worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SSL_ERROR);
- error("Closing the stream connection, because the server SSL certificate is not valid.");
- rrdpush_sender_thread_close_socket(host);
- host->destination->last_error = "invalid SSL certificate";
- host->destination->last_handshake = STREAM_HANDSHAKE_ERROR_INVALID_CERTIFICATE;
- host->destination->postpone_reconnection_until = now_realtime_sec() + 5 * 60;
- return false;
- }
- }
- }
- }
- }
-#endif
+ if(!rrdpush_sender_connect_ssl(s))
+ return false;
ssize_t bytes;
@@ -733,6 +752,12 @@ static bool rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_p
return false;
}
+ if(sock_setnonblock(s->rrdpush_sender_socket) < 0)
+ error("STREAM %s [send to %s]: cannot set non-blocking mode for socket.", rrdhost_hostname(host), s->connected_to);
+
+ if(sock_enlarge_out(s->rrdpush_sender_socket) < 0)
+ error("STREAM %s [send to %s]: cannot enlarge the socket buffer.", rrdhost_hostname(host), s->connected_to);
+
http[bytes] = '\0';
debug(D_STREAM, "Response to sender from far end: %s", http);
if(!rrdpush_sender_validate_response(host, s, http, bytes))
@@ -749,12 +774,6 @@ static bool rrdpush_sender_thread_connect_to_parent(RRDHOST *host, int default_p
log_sender_capabilities(s);
- if(sock_setnonblock(s->rrdpush_sender_socket) < 0)
- error("STREAM %s [send to %s]: cannot set non-blocking mode for socket.", rrdhost_hostname(host), s->connected_to);
-
- if(sock_enlarge_out(s->rrdpush_sender_socket) < 0)
- error("STREAM %s [send to %s]: cannot enlarge the socket buffer.", rrdhost_hostname(host), s->connected_to);
-
debug(D_STREAM, "STREAM: Connected on fd %d...", s->rrdpush_sender_socket);
return true;
@@ -764,6 +783,10 @@ static bool attempt_to_connect(struct sender_state *state)
{
state->send_attempts = 0;
+ // reset the bytes we have sent for this session
+ state->sent_bytes_on_this_connection = 0;
+ memset(state->sent_bytes_on_this_connection_per_type, 0, sizeof(state->sent_bytes_on_this_connection_per_type));
+
if(rrdpush_sender_thread_connect_to_parent(state->host, state->default_port, state->timeout, state)) {
// reset the buffer, to properly send charts and metrics
rrdpush_sender_on_connect(state->host);
@@ -774,9 +797,6 @@ static bool attempt_to_connect(struct sender_state *state)
// make sure the next reconnection will be immediate
state->not_connected_loops = 0;
- // reset the bytes we have sent for this session
- state->sent_bytes_on_this_connection = 0;
-
// let the data collection threads know we are ready
rrdhost_flag_set(state->host, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED);
@@ -790,9 +810,6 @@ static bool attempt_to_connect(struct sender_state *state)
// increase the failed connections counter
state->not_connected_loops++;
- // reset the number of bytes sent
- state->sent_bytes_on_this_connection = 0;
-
// slow re-connection on repeating errors
usec_t now_ut = now_monotonic_usec();
usec_t end_ut = now_ut + USEC_PER_SEC * state->reconnect_delay;
@@ -819,9 +836,8 @@ static ssize_t attempt_to_send(struct sender_state *s) {
debug(D_STREAM, "STREAM: Sending data. Buffer r=%zu w=%zu s=%zu, next chunk=%zu", cb->read, cb->write, cb->size, outstanding);
#ifdef ENABLE_HTTPS
- SSL *conn = s->ssl.conn ;
- if(conn && s->ssl.flags == NETDATA_SSL_HANDSHAKE_COMPLETE)
- ret = netdata_ssl_write(conn, chunk, outstanding);
+ if(SSL_connection(&s->ssl))
+ ret = netdata_ssl_write(&s->ssl, chunk, outstanding);
else
ret = send(s->rrdpush_sender_socket, chunk, outstanding, MSG_DONTWAIT);
#else
@@ -852,25 +868,17 @@ static ssize_t attempt_to_send(struct sender_state *s) {
}
static ssize_t attempt_read(struct sender_state *s) {
- ssize_t ret = 0;
+ ssize_t ret;
#ifdef ENABLE_HTTPS
- if (s->ssl.conn && s->ssl.flags == NETDATA_SSL_HANDSHAKE_COMPLETE) {
- size_t desired = sizeof(s->read_buffer) - s->read_len - 1;
- ret = netdata_ssl_read(s->ssl.conn, s->read_buffer, desired);
- if (ret > 0 ) {
- s->read_len += (int)ret;
- return ret;
- }
-
- if (ret == -1) {
- worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SSL_ERROR);
- rrdpush_sender_thread_close_socket(s->host);
- }
- return ret;
- }
-#endif
+ if (SSL_connection(&s->ssl))
+ ret = netdata_ssl_read(&s->ssl, s->read_buffer + s->read_len, sizeof(s->read_buffer) - s->read_len - 1);
+ else
+ ret = recv(s->rrdpush_sender_socket, s->read_buffer + s->read_len, sizeof(s->read_buffer) - s->read_len - 1,MSG_DONTWAIT);
+#else
ret = recv(s->rrdpush_sender_socket, s->read_buffer + s->read_len, sizeof(s->read_buffer) - s->read_len - 1,MSG_DONTWAIT);
+#endif
+
if (ret > 0) {
s->read_len += ret;
return ret;
@@ -879,6 +887,12 @@ static ssize_t attempt_read(struct sender_state *s) {
if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK || errno == EINTR))
return ret;
+#ifdef ENABLE_HTTPS
+ if (SSL_connection(&s->ssl))
+ worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_SSL_ERROR);
+ else
+#endif
+
if (ret == 0 || errno == ECONNRESET) {
worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_PARENT_CLOSED);
error("STREAM %s [send to %s]: connection closed by far end.", rrdhost_hostname(s->host), s->connected_to);
@@ -887,6 +901,7 @@ static ssize_t attempt_read(struct sender_state *s) {
worker_is_busy(WORKER_SENDER_JOB_DISCONNECT_RECEIVE_ERROR);
error("STREAM %s [send to %s]: error during receive (%zd) - closing connection.", rrdhost_hostname(s->host), s->connected_to, ret);
}
+
rrdpush_sender_thread_close_socket(s->host);
return ret;
@@ -915,7 +930,7 @@ void stream_execute_function_callback(BUFFER *func_wb, int code, void *data) {
buffer_fast_strcat(wb, buffer_tostring(func_wb), buffer_strlen(func_wb));
pluginsd_function_result_end_to_buffer(wb);
- sender_commit(s, wb);
+ sender_commit(s, wb, STREAM_TRAFFIC_TYPE_FUNCTIONS);
sender_thread_buffer_free();
internal_error(true, "STREAM %s [send to %s] FUNCTION transaction %s sending back response (%zu bytes, %llu usec).",
@@ -1083,6 +1098,119 @@ void rrdpush_signal_sender_to_wake_up(struct sender_state *s) {
}
}
+static NETDATA_DOUBLE rrdhost_sender_replication_completion(RRDHOST *host, time_t now, size_t *instances) {
+ size_t charts = rrdhost_sender_replicating_charts(host);
+ NETDATA_DOUBLE completion;
+ if(!charts || !host->sender || !host->sender->replication.oldest_request_after_t)
+ completion = 100.0;
+ else if(!host->sender->replication.latest_completed_before_t || host->sender->replication.latest_completed_before_t < host->sender->replication.oldest_request_after_t)
+ completion = 0.0;
+ else {
+ time_t total = now - host->sender->replication.oldest_request_after_t;
+ time_t current = host->sender->replication.latest_completed_before_t - host->sender->replication.oldest_request_after_t;
+ completion = (NETDATA_DOUBLE) current * 100.0 / (NETDATA_DOUBLE) total;
+ }
+
+ *instances = charts;
+
+ return completion;
+}
+
+void rrdhost_sender_to_json(BUFFER *wb, RRDHOST *host, const char *key, time_t now __maybe_unused) {
+ bool online = rrdhost_flag_check(host, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED);
+ buffer_json_member_add_object(wb, key);
+
+ if(host->sender)
+ buffer_json_member_add_uint64(wb, "hops", host->sender->hops);
+
+ buffer_json_member_add_boolean(wb, "online", online);
+
+ if(host->sender && host->sender->last_state_since_t) {
+ buffer_json_member_add_time_t(wb, "since", host->sender->last_state_since_t);
+ buffer_json_member_add_time_t(wb, "age", now - host->sender->last_state_since_t);
+ }
+
+ if(!online && host->sender && host->sender->exit.reason)
+ buffer_json_member_add_string(wb, "reason", host->sender->exit.reason);
+
+ buffer_json_member_add_object(wb, "replication");
+ {
+ size_t instances;
+ NETDATA_DOUBLE completion = rrdhost_sender_replication_completion(host, now, &instances);
+ buffer_json_member_add_boolean(wb, "in_progress", instances);
+ buffer_json_member_add_double(wb, "completion", completion);
+ buffer_json_member_add_uint64(wb, "instances", instances);
+ }
+ buffer_json_object_close(wb);
+
+ if(host->sender) {
+ netdata_mutex_lock(&host->sender->mutex);
+
+ buffer_json_member_add_object(wb, "destination");
+ {
+ char buf[1024 + 1];
+ if(online && host->sender->rrdpush_sender_socket != -1) {
+ SOCKET_PEERS peers = socket_peers(host->sender->rrdpush_sender_socket);
+ bool ssl = SSL_connection(&host->sender->ssl);
+
+ snprintfz(buf, 1024, "[%s]:%d%s", peers.local.ip, peers.local.port, ssl ? ":SSL" : "");
+ buffer_json_member_add_string(wb, "local", buf);
+
+ snprintfz(buf, 1024, "[%s]:%d%s", peers.peer.ip, peers.peer.port, ssl ? ":SSL" : "");
+ buffer_json_member_add_string(wb, "remote", buf);
+
+ stream_capabilities_to_json_array(wb, host->sender->capabilities, "capabilities");
+
+ buffer_json_member_add_object(wb, "traffic");
+ {
+ bool compression = false;
+#ifdef ENABLE_COMPRESSION
+ compression = (stream_has_capability(host->sender, STREAM_CAP_COMPRESSION) && host->sender->compressor);
+#endif
+ buffer_json_member_add_boolean(wb, "compression", compression);
+ buffer_json_member_add_uint64(wb, "data", host->sender->sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_DATA]);
+ buffer_json_member_add_uint64(wb, "metadata", host->sender->sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_METADATA]);
+ buffer_json_member_add_uint64(wb, "functions", host->sender->sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_FUNCTIONS]);
+ buffer_json_member_add_uint64(wb, "replication", host->sender->sent_bytes_on_this_connection_per_type[STREAM_TRAFFIC_TYPE_REPLICATION]);
+ }
+ buffer_json_object_close(wb); // traffic
+ }
+
+ buffer_json_member_add_array(wb, "candidates");
+ struct rrdpush_destinations *d;
+ for (d = host->destinations; d; d = d->next) {
+ buffer_json_add_array_item_object(wb);
+ {
+
+ if (d->ssl) {
+ snprintfz(buf, 1024, "%s:SSL", string2str(d->destination));
+ buffer_json_member_add_string(wb, "destination", buf);
+ }
+ else
+ buffer_json_member_add_string(wb, "destination", string2str(d->destination));
+
+ buffer_json_member_add_time_t(wb, "last_check", d->last_attempt);
+ buffer_json_member_add_time_t(wb, "age", now - d->last_attempt);
+ buffer_json_member_add_string(wb, "last_error", d->last_error);
+ buffer_json_member_add_string(wb, "last_handshake",
+ stream_handshake_error_to_string(d->last_handshake));
+ buffer_json_member_add_time_t(wb, "next_check", d->postpone_reconnection_until);
+ buffer_json_member_add_time_t(wb, "next_in",
+ (d->postpone_reconnection_until > now) ?
+ d->postpone_reconnection_until - now : 0);
+ }
+ buffer_json_object_close(wb); // each candidate
+ }
+ buffer_json_array_close(wb); // candidates
+ }
+ buffer_json_object_close(wb); // destination
+
+ netdata_mutex_unlock(&host->sender->mutex);
+ }
+
+ buffer_json_object_close(wb); // streaming
+}
+
static bool rrdhost_set_sender(RRDHOST *host) {
if(unlikely(!host->sender)) return false;
@@ -1092,10 +1220,14 @@ static bool rrdhost_set_sender(RRDHOST *host) {
rrdhost_flag_clear(host, RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED | RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS);
rrdhost_flag_set(host, RRDHOST_FLAG_RRDPUSH_SENDER_SPAWN);
host->sender->tid = gettid();
+ host->sender->last_state_since_t = now_realtime_sec();
+ host->sender->exit.reason = NULL;
ret = true;
}
netdata_mutex_unlock(&host->sender->mutex);
+ rrdpush_reset_destinations_postpone_time(host);
+
return ret;
}
@@ -1105,9 +1237,11 @@ static void rrdhost_clear_sender___while_having_sender_mutex(RRDHOST *host) {
if(host->sender->tid == gettid()) {
host->sender->tid = 0;
host->sender->exit.shutdown = false;
- host->sender->exit.reason = NULL;
rrdhost_flag_clear(host, RRDHOST_FLAG_RRDPUSH_SENDER_SPAWN | RRDHOST_FLAG_RRDPUSH_SENDER_CONNECTED | RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS);
+ host->sender->last_state_since_t = now_realtime_sec();
}
+
+ rrdpush_reset_destinations_postpone_time(host);
}
static bool rrdhost_sender_should_exit(struct sender_state *s) {
@@ -1134,7 +1268,7 @@ static bool rrdhost_sender_should_exit(struct sender_state *s) {
if(unlikely(rrdhost_flag_check(s->host, RRDHOST_FLAG_ORPHAN))) {
if(!s->exit.reason)
- s->exit.reason = "RECEIVER LEFT";
+ s->exit.reason = "RECEIVER LEFT (ORPHAN HOST)";
return true;
}
@@ -1162,6 +1296,32 @@ static void rrdpush_sender_thread_cleanup_callback(void *ptr) {
freez(s);
}
+void rrdpush_initialize_ssl_ctx(RRDHOST *host) {
+#ifdef ENABLE_HTTPS
+ static SPINLOCK sp = NETDATA_SPINLOCK_INITIALIZER;
+ netdata_spinlock_lock(&sp);
+
+ if(netdata_ssl_streaming_sender_ctx || !host) {
+ netdata_spinlock_unlock(&sp);
+ return;
+ }
+
+ for(struct rrdpush_destinations *d = host->destinations; d ; d = d->next) {
+ if (d->ssl) {
+ // we need to initialize SSL
+
+ netdata_ssl_initialize_ctx(NETDATA_SSL_STREAMING_SENDER_CTX);
+ ssl_security_location_for_context(netdata_ssl_streaming_sender_ctx, netdata_ssl_ca_file, netdata_ssl_ca_path);
+
+ // stop the loop
+ break;
+ }
+ }
+
+ netdata_spinlock_unlock(&sp);
+#endif
+}
+
void *rrdpush_sender_thread(void *ptr) {
worker_register("STREAMSND");
worker_register_job_name(WORKER_SENDER_JOB_CONNECT, "connect");
@@ -1206,17 +1366,7 @@ void *rrdpush_sender_thread(void *ptr) {
return NULL;
}
-#ifdef ENABLE_HTTPS
- if (netdata_use_ssl_on_stream & NETDATA_SSL_FORCE ) {
- static SPINLOCK sp = NETDATA_SPINLOCK_INITIALIZER;
- netdata_spinlock_lock(&sp);
- if(!netdata_ssl_client_ctx) {
- security_start_ssl(NETDATA_SSL_CONTEXT_STREAMING);
- ssl_security_location_for_context(netdata_ssl_client_ctx, netdata_ssl_ca_file, netdata_ssl_ca_path);
- }
- netdata_spinlock_unlock(&sp);
- }
-#endif
+ rrdpush_initialize_ssl_ctx(s->host);
info("STREAM %s [send]: thread created (task id %d)", rrdhost_hostname(s->host), gettid());
@@ -1287,6 +1437,7 @@ void *rrdpush_sender_thread(void *ptr) {
now_s = s->last_traffic_seen_t = now_monotonic_sec();
rrdpush_claimed_id(s->host);
rrdpush_send_host_labels(s->host);
+ s->replication.oldest_request_after_t = 0;
rrdhost_flag_set(s->host, RRDHOST_FLAG_RRDPUSH_SENDER_READY_4_METRICS);
info("STREAM %s [send to %s]: enabling metrics streaming...", rrdhost_hostname(s->host), s->connected_to);
diff --git a/web/api/formatters/json/json.c b/web/api/formatters/json/json.c
index d5b8c7570..3a7a23ba1 100644
--- a/web/api/formatters/json/json.c
+++ b/web/api/formatters/json/json.c
@@ -244,12 +244,12 @@ void rrdr2json(RRDR *r, BUFFER *wb, RRDR_OPTIONS options, int datatable) {
//info("RRD2JSON(): %s: END", r->st->id);
}
-
void rrdr2json_v2(RRDR *r, BUFFER *wb) {
QUERY_TARGET *qt = r->internal.qt;
RRDR_OPTIONS options = qt->window.options;
- bool expose_gbc = query_target_aggregatable(qt);
+ bool send_count = query_target_aggregatable(qt);
+ bool send_hidden = send_count && r->vh && query_has_group_by_aggregation_percentage(qt);
buffer_json_member_add_object(wb, "result");
@@ -267,12 +267,17 @@ void rrdr2json_v2(RRDR *r, BUFFER *wb) {
buffer_json_array_close(wb); // labels
buffer_json_member_add_object(wb, "point");
- buffer_json_member_add_uint64(wb, "value", 0);
- buffer_json_member_add_uint64(wb, "arp", 1);
- buffer_json_member_add_uint64(wb, "pa", 2);
- if(expose_gbc)
- buffer_json_member_add_uint64(wb, "count", 3);
- buffer_json_object_close(wb);
+ {
+ size_t point_count = 0;
+ buffer_json_member_add_uint64(wb, "value", point_count++);
+ buffer_json_member_add_uint64(wb, "arp", point_count++);
+ buffer_json_member_add_uint64(wb, "pa", point_count++);
+ if (send_count)
+ buffer_json_member_add_uint64(wb, "count", point_count++);
+ if (send_hidden)
+ buffer_json_member_add_uint64(wb, "hidden", point_count++);
+ }
+ buffer_json_object_close(wb); // point
buffer_json_member_add_array(wb, "data");
if(i) {
@@ -286,6 +291,7 @@ void rrdr2json_v2(RRDR *r, BUFFER *wb) {
// for each line in the array
for (i = start; i != end; i += step) {
NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
+ NETDATA_DOUBLE *ch = send_hidden ? &r->vh[i * r->d ] : NULL;
RRDR_VALUE_FLAGS *co = &r->o[ i * r->d ];
NETDATA_DOUBLE *ar = &r->ar[ i * r->d ];
uint32_t *gbc = &r->gbc [ i * r->d ];
@@ -325,8 +331,10 @@ void rrdr2json_v2(RRDR *r, BUFFER *wb) {
buffer_json_add_array_item_uint64(wb, o);
// add the count
- if(expose_gbc)
+ if(send_count)
buffer_json_add_array_item_uint64(wb, gbc[d]);
+ if(send_hidden)
+ buffer_json_add_array_item_double(wb, ch[d]);
buffer_json_array_close(wb); // point
}
diff --git a/web/api/formatters/rrd2json.h b/web/api/formatters/rrd2json.h
index def26c754..ca3a41aae 100644
--- a/web/api/formatters/rrd2json.h
+++ b/web/api/formatters/rrd2json.h
@@ -87,7 +87,7 @@ int rrdset2value_api_v1(
);
static inline bool rrdr_dimension_should_be_exposed(RRDR_DIMENSION_FLAGS rrdr_dim_flags, RRDR_OPTIONS options) {
- if(unlikely(options & RRDR_OPTION_RETURN_RAW))
+ if(unlikely((options & RRDR_OPTION_RETURN_RAW) && (rrdr_dim_flags & RRDR_DIMENSION_QUERIED)))
return true;
if(unlikely(rrdr_dim_flags & RRDR_DIMENSION_HIDDEN)) return false;
diff --git a/web/api/netdata-swagger.yaml b/web/api/netdata-swagger.yaml
index c25f0b719..b050f3407 100644
--- a/web/api/netdata-swagger.yaml
+++ b/web/api/netdata-swagger.yaml
@@ -241,6 +241,7 @@ paths:
A comma separated list of the groupings required.
All possible values can be combined together, except `selected`. If `selected` is given in the list, all others are ignored.
The order they are placed in the list is currently ignored.
+ This parameter is also accepted as `group_by[0]` and `group_by[1]` when multiple grouping passes are required.
required: false
schema:
type: array
@@ -261,6 +262,7 @@ paths:
in: query
description: |
A comma separated list of the label keys to group by their values. The order of the labels in the list is respected.
+ This parameter is also accepted as `group_by_label[0]` and `group_by_label[1]` when multiple grouping passes are required.
required: false
schema:
type: string
@@ -271,6 +273,7 @@ paths:
description: |
The aggregation function to apply when grouping metrics together.
When option `raw` is given, `average` and `avg` behave like `sum` and the caller is expected to calculate the average.
+ This parameter is also accepted as `aggregation[0]` and `aggregation[1]` when multiple grouping passes are required.
required: false
schema:
type: string
@@ -280,6 +283,7 @@ paths:
- avg
- average
- sum
+ - percentage
default: average
- $ref: '#/components/parameters/scopeNodes'
- $ref: '#/components/parameters/scopeContexts'
@@ -2741,8 +2745,13 @@ components:
type: integer
count:
description: |
- The number of metrics aggregated into this point. This exists only when the option `raw` is given to the query.
+ The number of metrics aggregated into this point.
+ This exists only when the option `raw` is given to the query and the final aggregation point is NOT `percentage`.
type: integer
+ hidden:
+ description: |
+ The sum of the non-selected dimensions aggregated for this group item point.
+ This exists only when the option `raw` is given to the query and the final aggregation method is `percentage`.
data:
type: array
items:
diff --git a/web/api/queries/query.c b/web/api/queries/query.c
index 3770d4770..a0347f6fe 100644
--- a/web/api/queries/query.c
+++ b/web/api/queries/query.c
@@ -883,6 +883,9 @@ RRDR_GROUP_BY_FUNCTION group_by_aggregate_function_parse(const char *s) {
if(strcmp(s, "sum") == 0)
return RRDR_GROUP_BY_FUNCTION_SUM;
+ if(strcmp(s, "percentage") == 0)
+ return RRDR_GROUP_BY_FUNCTION_PERCENTAGE;
+
return RRDR_GROUP_BY_FUNCTION_AVERAGE;
}
@@ -900,6 +903,9 @@ const char *group_by_aggregate_function_to_string(RRDR_GROUP_BY_FUNCTION group_b
case RRDR_GROUP_BY_FUNCTION_SUM:
return "sum";
+
+ case RRDR_GROUP_BY_FUNCTION_PERCENTAGE:
+ return "percentage";
}
}
@@ -2555,9 +2561,9 @@ static void rrd2rrdr_set_timestamps(RRDR *r) {
before_wanted, r->t[points_wanted - 1]);
}
-static void query_group_by_make_dimension_key(BUFFER *key, RRDR_GROUP_BY group_by, size_t group_by_id, QUERY_TARGET *qt, QUERY_NODE *qn, QUERY_CONTEXT *qc, QUERY_INSTANCE *qi, QUERY_DIMENSION *qd __maybe_unused, QUERY_METRIC *qm, bool query_has_percentage_of_instance) {
+static void query_group_by_make_dimension_key(BUFFER *key, RRDR_GROUP_BY group_by, size_t group_by_id, QUERY_TARGET *qt, QUERY_NODE *qn, QUERY_CONTEXT *qc, QUERY_INSTANCE *qi, QUERY_DIMENSION *qd __maybe_unused, QUERY_METRIC *qm, bool query_has_percentage_of_group) {
buffer_flush(key);
- if(unlikely(!query_has_percentage_of_instance && qm->status & RRDR_DIMENSION_HIDDEN)) {
+ if(unlikely(!query_has_percentage_of_group && qm->status & RRDR_DIMENSION_HIDDEN)) {
buffer_strcat(key, "__hidden_dimensions__");
}
else if(unlikely(group_by & RRDR_GROUP_BY_SELECTED)) {
@@ -2599,9 +2605,9 @@ static void query_group_by_make_dimension_key(BUFFER *key, RRDR_GROUP_BY group_b
}
}
-static void query_group_by_make_dimension_id(BUFFER *key, RRDR_GROUP_BY group_by, size_t group_by_id, QUERY_TARGET *qt, QUERY_NODE *qn, QUERY_CONTEXT *qc, QUERY_INSTANCE *qi, QUERY_DIMENSION *qd __maybe_unused, QUERY_METRIC *qm, bool query_has_percentage_of_instance) {
+static void query_group_by_make_dimension_id(BUFFER *key, RRDR_GROUP_BY group_by, size_t group_by_id, QUERY_TARGET *qt, QUERY_NODE *qn, QUERY_CONTEXT *qc, QUERY_INSTANCE *qi, QUERY_DIMENSION *qd __maybe_unused, QUERY_METRIC *qm, bool query_has_percentage_of_group) {
buffer_flush(key);
- if(unlikely(!query_has_percentage_of_instance && qm->status & RRDR_DIMENSION_HIDDEN)) {
+ if(unlikely(!query_has_percentage_of_group && qm->status & RRDR_DIMENSION_HIDDEN)) {
buffer_strcat(key, "__hidden_dimensions__");
}
else if(unlikely(group_by & RRDR_GROUP_BY_SELECTED)) {
@@ -2654,9 +2660,9 @@ static void query_group_by_make_dimension_id(BUFFER *key, RRDR_GROUP_BY group_by
}
}
-static void query_group_by_make_dimension_name(BUFFER *key, RRDR_GROUP_BY group_by, size_t group_by_id, QUERY_TARGET *qt, QUERY_NODE *qn, QUERY_CONTEXT *qc, QUERY_INSTANCE *qi, QUERY_DIMENSION *qd __maybe_unused, QUERY_METRIC *qm, bool query_has_percentage_of_instance) {
+static void query_group_by_make_dimension_name(BUFFER *key, RRDR_GROUP_BY group_by, size_t group_by_id, QUERY_TARGET *qt, QUERY_NODE *qn, QUERY_CONTEXT *qc, QUERY_INSTANCE *qi, QUERY_DIMENSION *qd __maybe_unused, QUERY_METRIC *qm, bool query_has_percentage_of_group) {
buffer_flush(key);
- if(unlikely(!query_has_percentage_of_instance && qm->status & RRDR_DIMENSION_HIDDEN)) {
+ if(unlikely(!query_has_percentage_of_group && qm->status & RRDR_DIMENSION_HIDDEN)) {
buffer_strcat(key, "__hidden_dimensions__");
}
else if(unlikely(group_by & RRDR_GROUP_BY_SELECTED)) {
@@ -2758,16 +2764,16 @@ static RRDR *rrd2rrdr_group_by_initialize(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
}
// make sure there are valid group-by methods
- bool query_has_percentage_of_instance = false;
- for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES - 1 ;g++) {
+ for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
if(!(qt->request.group_by[g].group_by & SUPPORTED_GROUP_BY_METHODS))
qt->request.group_by[g].group_by = (g == 0) ? RRDR_GROUP_BY_DIMENSION : RRDR_GROUP_BY_NONE;
-
- if(qt->request.group_by[g].group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE)
- query_has_percentage_of_instance = true;
}
- // merge all group-by options to upper levels
+ bool query_has_percentage_of_group = query_target_has_percentage_of_group(qt);
+
+ // merge all group-by options to upper levels,
+ // so that the top level has all the groupings of the inner levels,
+ // and each subsequent level has all the groupings of its inner levels.
for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES - 1 ;g++) {
if(qt->request.group_by[g].group_by == RRDR_GROUP_BY_NONE)
continue;
@@ -2815,6 +2821,7 @@ static RRDR *rrd2rrdr_group_by_initialize(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
for(size_t g = 0; g < MAX_QUERY_GROUP_BY_PASSES ;g++) {
RRDR_GROUP_BY group_by = qt->request.group_by[g].group_by;
+ RRDR_GROUP_BY_FUNCTION aggregation_method = qt->request.group_by[g].aggregation;
if(group_by == RRDR_GROUP_BY_NONE)
break;
@@ -2855,7 +2862,7 @@ static RRDR *rrd2rrdr_group_by_initialize(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
// --------------------------------------------------------------------
// generate the group by key
- query_group_by_make_dimension_key(key, group_by, g, qt, qn, qc, qi, qd, qm, query_has_percentage_of_instance);
+ query_group_by_make_dimension_key(key, group_by, g, qt, qn, qc, qi, qd, qm, query_has_percentage_of_group);
// lookup the key in the dictionary
@@ -2869,13 +2876,13 @@ static RRDR *rrd2rrdr_group_by_initialize(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
// ----------------------------------------------------------------
// generate the dimension id
- query_group_by_make_dimension_id(key, group_by, g, qt, qn, qc, qi, qd, qm, query_has_percentage_of_instance);
+ query_group_by_make_dimension_id(key, group_by, g, qt, qn, qc, qi, qd, qm, query_has_percentage_of_group);
entries[pos].id = string_strdupz(buffer_tostring(key));
// ----------------------------------------------------------------
// generate the dimension name
- query_group_by_make_dimension_name(key, group_by, g, qt, qn, qc, qi, qd, qm, query_has_percentage_of_instance);
+ query_group_by_make_dimension_name(key, group_by, g, qt, qn, qc, qi, qd, qm, query_has_percentage_of_group);
entries[pos].name = string_strdupz(buffer_tostring(key));
// add the rest of the info
@@ -2914,9 +2921,9 @@ static RRDR *rrd2rrdr_group_by_initialize(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
// the query target adds to it the non-zero flag
qm->status |= RRDR_DIMENSION_GROUPED;
- if(query_has_percentage_of_instance)
- // when the query has percentage of instance
- // there will be no hidden dimensions in the final query
+ if(query_has_percentage_of_group)
+ // when the query has percentage of group
+ // there will be no hidden dimensions in the final query,
// so we have to remove the hidden flag from all dimensions
entries[pos].od |= qm->status & ~RRDR_DIMENSION_HIDDEN;
else
@@ -2934,12 +2941,10 @@ static RRDR *rrd2rrdr_group_by_initialize(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
qt->id, qt->window.after, qt->window.before, added, qt->window.points);
goto cleanup;
}
-
- bool hidden_dimension_on_percentage_of_instance = hidden_dimensions && (group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE);
-
- // prevent double cleanup in case of error
+ // prevent double free at cleanup in case of error
added = 0;
+ // link this RRDR
if(!last_r)
first_r = last_r = r;
else
@@ -2954,7 +2959,7 @@ static RRDR *rrd2rrdr_group_by_initialize(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
r->gbc = onewayalloc_callocz(owa, r->n * r->d, sizeof(*r->gbc));
r->dqp = onewayalloc_callocz(owa, r->d, sizeof(STORAGE_POINT));
- if(hidden_dimension_on_percentage_of_instance)
+ if(hidden_dimensions && ((group_by & RRDR_GROUP_BY_PERCENTAGE_OF_INSTANCE) || (aggregation_method == RRDR_GROUP_BY_FUNCTION_PERCENTAGE)))
// this is where we are going to group the hidden dimensions
r->vh = onewayalloc_mallocz(owa, r->n * r->d * sizeof(*r->vh));
@@ -2987,7 +2992,7 @@ static RRDR *rrd2rrdr_group_by_initialize(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
// initialize partial trimming
r->partial_data_trimming.max_update_every = update_every_max;
r->partial_data_trimming.expected_after =
- (!(qt->window.options & RRDR_OPTION_RETURN_RAW) &&
+ (!query_target_aggregatable(qt) &&
qt->window.before >= qt->window.now - update_every_max) ?
qt->window.before - update_every_max :
qt->window.before;
@@ -3006,7 +3011,7 @@ static RRDR *rrd2rrdr_group_by_initialize(ONEWAYALLOC *owa, QUERY_TARGET *qt) {
co[d] = RRDR_VALUE_EMPTY;
if(vh)
- *vh = NAN;
+ vh[d] = NAN;
}
}
}
@@ -3073,9 +3078,9 @@ static void rrd2rrdr_group_by_add_metric(RRDR *r_dst, size_t d_dst, RRDR *r_tmp,
internal_fatal(!r_dst->dqp, "QUERY: group-by destination is not properly prepared (missing dqp array)");
internal_fatal(!r_dst->gbc, "QUERY: group-by destination is not properly prepared (missing gbc array)");
- bool hidden_dimension_on_percentage_of_instance = (r_tmp->od[d_tmp] & RRDR_DIMENSION_HIDDEN) && r_dst->vh;
+ bool hidden_dimension_on_percentage_of_group = (r_tmp->od[d_tmp] & RRDR_DIMENSION_HIDDEN) && r_dst->vh;
- if(!hidden_dimension_on_percentage_of_instance) {
+ if(!hidden_dimension_on_percentage_of_group) {
r_dst->od[d_dst] |= r_tmp->od[d_tmp];
storage_point_merge_to(r_dst->dqp[d_dst], *query_points);
}
@@ -3092,7 +3097,7 @@ static void rrd2rrdr_group_by_add_metric(RRDR *r_dst, size_t d_dst, RRDR *r_tmp,
continue;
size_t idx_dst = i * r_dst->d + d_dst;
- NETDATA_DOUBLE *cn = (hidden_dimension_on_percentage_of_instance) ? &r_dst->vh[ idx_dst ] : &r_dst->v[ idx_dst ];
+ NETDATA_DOUBLE *cn = (hidden_dimension_on_percentage_of_group) ? &r_dst->vh[ idx_dst ] : &r_dst->v[ idx_dst ];
RRDR_VALUE_FLAGS *co = &r_dst->o[ idx_dst ];
NETDATA_DOUBLE *ar = &r_dst->ar[ idx_dst ];
uint32_t *gbc = &r_dst->gbc[ idx_dst ];
@@ -3101,6 +3106,7 @@ static void rrd2rrdr_group_by_add_metric(RRDR *r_dst, size_t d_dst, RRDR *r_tmp,
default:
case RRDR_GROUP_BY_FUNCTION_AVERAGE:
case RRDR_GROUP_BY_FUNCTION_SUM:
+ case RRDR_GROUP_BY_FUNCTION_PERCENTAGE:
if(isnan(*cn))
*cn = n_tmp;
else
@@ -3118,7 +3124,7 @@ static void rrd2rrdr_group_by_add_metric(RRDR *r_dst, size_t d_dst, RRDR *r_tmp,
break;
}
- if(!hidden_dimension_on_percentage_of_instance) {
+ if(!hidden_dimension_on_percentage_of_group) {
*co &= ~RRDR_VALUE_EMPTY;
*co |= (o_tmp & (RRDR_VALUE_RESET | RRDR_VALUE_PARTIAL));
*ar += ar_tmp;
@@ -3161,10 +3167,13 @@ static void rrdr2rrdr_group_by_partial_trimming(RRDR *r) {
}
}
-static void rrdr2rrdr_group_by_calculate_percentage_of_instance(RRDR *r) {
+static void rrdr2rrdr_group_by_calculate_percentage_of_group(RRDR *r) {
if(!r->vh)
return;
+ if(query_target_aggregatable(r->internal.qt) && query_has_group_by_aggregation_percentage(r->internal.qt))
+ return;
+
for(size_t i = 0; i < r->n ;i++) {
NETDATA_DOUBLE *cn = &r->v[ i * r->d ];
NETDATA_DOUBLE *ch = &r->vh[ i * r->d ];
@@ -3185,7 +3194,10 @@ static void rrdr2rrdr_group_by_calculate_percentage_of_instance(RRDR *r) {
}
}
-static void rrd2rrdr_convert_to_percentage(RRDR *r) {
+static void rrd2rrdr_convert_values_to_percentage_of_total(RRDR *r) {
+ if(!(r->internal.qt->window.options & RRDR_OPTION_PERCENTAGE) || query_target_aggregatable(r->internal.qt))
+ return;
+
size_t global_min_max_values = 0;
NETDATA_DOUBLE global_min = NAN, global_max = NAN;
@@ -3279,19 +3291,17 @@ static void rrd2rrdr_convert_to_percentage(RRDR *r) {
static RRDR *rrd2rrdr_group_by_finalize(RRDR *r_tmp) {
QUERY_TARGET *qt = r_tmp->internal.qt;
- RRDR_OPTIONS options = qt->window.options;
if(!r_tmp->group_by.r) {
// v1 query
- if(options & RRDR_OPTION_PERCENTAGE)
- rrd2rrdr_convert_to_percentage(r_tmp);
+ rrd2rrdr_convert_values_to_percentage_of_total(r_tmp);
return r_tmp;
}
// v2 query
// do the additional passes on RRDRs
RRDR *last_r = r_tmp->group_by.r;
- rrdr2rrdr_group_by_calculate_percentage_of_instance(last_r);
+ rrdr2rrdr_group_by_calculate_percentage_of_group(last_r);
RRDR *r = last_r->group_by.r;
size_t pass = 0;
@@ -3302,7 +3312,7 @@ static RRDR *rrd2rrdr_group_by_finalize(RRDR *r_tmp) {
qt->request.group_by[pass].aggregation,
&last_r->dqp[d], pass);
}
- rrdr2rrdr_group_by_calculate_percentage_of_instance(r);
+ rrdr2rrdr_group_by_calculate_percentage_of_group(r);
last_r = r;
r = last_r->group_by.r;
@@ -3324,7 +3334,7 @@ static RRDR *rrd2rrdr_group_by_finalize(RRDR *r_tmp) {
if(qt->request.group_by[g].group_by != RRDR_GROUP_BY_NONE)
aggregation = qt->request.group_by[g].aggregation;
- if(!(options & RRDR_OPTION_RETURN_RAW) && r->partial_data_trimming.expected_after < qt->window.before)
+ if(!query_target_aggregatable(qt) && r->partial_data_trimming.expected_after < qt->window.before)
rrdr2rrdr_group_by_partial_trimming(r);
// apply averaging, remove RRDR_VALUE_EMPTY, find the non-zero dimensions, min and max
@@ -3416,8 +3426,7 @@ static RRDR *rrd2rrdr_group_by_finalize(RRDR *r_tmp) {
qt->window.options &= ~RRDR_OPTION_NONZERO;
}
- if(options & RRDR_OPTION_PERCENTAGE && !(options & RRDR_OPTION_RETURN_RAW))
- rrd2rrdr_convert_to_percentage(r);
+ rrd2rrdr_convert_values_to_percentage_of_total(r);
// update query instance counts in query host and query context
{
diff --git a/web/api/queries/query.h b/web/api/queries/query.h
index e6fdcfbe4..5eabb6c03 100644
--- a/web/api/queries/query.h
+++ b/web/api/queries/query.h
@@ -85,6 +85,7 @@ typedef enum rrdr_group_by_function {
RRDR_GROUP_BY_FUNCTION_MIN,
RRDR_GROUP_BY_FUNCTION_MAX,
RRDR_GROUP_BY_FUNCTION_SUM,
+ RRDR_GROUP_BY_FUNCTION_PERCENTAGE,
} RRDR_GROUP_BY_FUNCTION;
RRDR_GROUP_BY_FUNCTION group_by_aggregate_function_parse(const char *s);
diff --git a/web/api/web_api_v1.c b/web/api/web_api_v1.c
index 6e23549d4..637329696 100644
--- a/web/api/web_api_v1.c
+++ b/web/api/web_api_v1.c
@@ -354,7 +354,7 @@ inline int web_client_api_request_v1_alarm_log(RRDHOST *host, struct web_client
buffer_flush(w->response.data);
w->response.data->content_type = CT_APPLICATION_JSON;
- health_alarm_log2json(host, w->response.data, after, chart);
+ sql_health_alarm_log2json(host, w->response.data, after, chart);
return HTTP_RESP_OK;
}
diff --git a/web/gui/dashboard_info.js b/web/gui/dashboard_info.js
index 3dfbd6166..23574a701 100644
--- a/web/gui/dashboard_info.js
+++ b/web/gui/dashboard_info.js
@@ -883,6 +883,19 @@ netdataDashboard.submenu = {
'When the kernel or an application requests some memory, the buddy allocator provides a page that matches closest the request.'
},
+ 'mem.fragmentation': {
+ info: 'These charts show whether the kernel will compact memory or direct reclaim to satisfy a high-order allocation. '+
+ 'The extfrag/extfrag_index file in debugfs shows what the fragmentation index for each order is in each zone in the system.' +
+ 'Values tending towards 0 imply allocations would fail due to lack of memory, values towards 1000 imply failures are due to ' +
+ 'fragmentation and -1 implies that the allocation will succeed as long as watermarks are met.'
+ },
+
+ 'system.zswap': {
+ info : 'Zswap is a backend for frontswap that takes pages that are in the process of being swapped out and attempts to compress and store them in a ' +
+ 'RAM-based memory pool. This can result in a significant I/O reduction on the swap device and, in the case where decompressing from RAM is faster ' +
+ 'than reading from the swap device, can also improve workload performance.'
+ },
+
'ip.ecn': {
info: '<a href="https://en.wikipedia.org/wiki/Explicit_Congestion_Notification" target="_blank">Explicit Congestion Notification (ECN)</a> '+
'is an extension to the IP and to the TCP that allows end-to-end notification of network congestion without dropping packets. '+
@@ -1522,6 +1535,14 @@ netdataDashboard.context = {
info: '<a href="https://en.wikipedia.org/wiki/Entropy_(computing)" target="_blank">Entropy</a>, is a pool of random numbers (<a href="https://en.wikipedia.org/wiki//dev/random" target="_blank">/dev/random</a>) that is mainly used in cryptography. If the pool of entropy gets empty, processes requiring random numbers may run a lot slower (it depends on the interface each program uses), waiting for the pool to be replenished. Ideally a system with high entropy demands should have a hardware device for that purpose (TPM is one such device). There are also several software-only options you may install, like <code>haveged</code>, although these are generally useful only in servers.'
},
+ 'system.zswap_rejections': {
+ info: '<p>Zswap rejected pages per access.</p>' +
+ '<p><b>CompressPoor</b> - compressed page was too big for the allocator to store. ' +
+ '<b>KmemcacheFail</b> - number of entry metadata that could not be allocated. ' +
+ '<b>AllocFail</b> - allocator could not get memory. ' +
+ '<b>ReclaimFail</b> - memory cannot be reclaimed (pool limit was reached).</p>'
+ },
+
'system.clock_sync_state': {
info:'<p>The system clock synchronization state as provided by the <a href="https://man7.org/linux/man-pages/man2/adjtimex.2.html" target="_blank">ntp_adjtime()</a> system call. '+
'An unsynchronized clock may be the result of synchronization issues by the NTP daemon or a hardware clock fault. '+
diff --git a/web/server/static/static-threaded.c b/web/server/static/static-threaded.c
index 52bb56cd6..4cb3dcd92 100644
--- a/web/server/static/static-threaded.c
+++ b/web/server/static/static-threaded.c
@@ -211,58 +211,32 @@ static void *web_server_add_callback(POLLINFO *pi, short int *events, void *data
}
#ifdef ENABLE_HTTPS
- if ((!web_client_check_unix(w)) && (netdata_ssl_srv_ctx)) {
- if( sock_delnonblock(w->ifd) < 0 ){
- error("Web server cannot remove the non-blocking flag from socket %d",w->ifd);
- }
+ if ((!web_client_check_unix(w)) && (netdata_ssl_web_server_ctx)) {
+ sock_delnonblock(w->ifd);
//Read the first 7 bytes from the message, but the message
//is not removed from the queue, because we are using MSG_PEEK
char test[8];
- if ( recv(w->ifd,test, 7,MSG_PEEK) == 7 ) {
- test[7] = 0x00;
+ if ( recv(w->ifd,test, 7, MSG_PEEK) == 7 ) {
+ test[7] = '\0';
}
else {
- //Case I do not have success to read 7 bytes,
- //this means that the mensage was not completely read, so
- //I cannot identify it yet.
+ // we couldn't read 7 bytes
sock_setnonblock(w->ifd);
goto cleanup;
}
- //The next two ifs are not together because I am reusing SSL structure
- if (!w->ssl.conn)
- {
- w->ssl.conn = SSL_new(netdata_ssl_srv_ctx);
- if ( w->ssl.conn ) {
- SSL_set_accept_state(w->ssl.conn);
- } else {
- error("Failed to create SSL context on socket fd %d.", w->ifd);
- if (test[0] < 0x18){
- WEB_CLIENT_IS_DEAD(w);
- sock_setnonblock(w->ifd);
- goto cleanup;
- }
- }
+ if(test[0] > 0x17) {
+ // no SSL
+ netdata_ssl_close(&w->ssl); // free any previous SSL data
}
-
- if (w->ssl.conn) {
- if (SSL_set_fd(w->ssl.conn, w->ifd) != 1) {
- error("Failed to set the socket to the SSL on socket fd %d.", w->ifd);
- //The client is not set dead, because I received a normal HTTP request
- //instead a Client Hello(HTTPS).
- if ( test[0] < 0x18 ){
- WEB_CLIENT_IS_DEAD(w);
- }
- }
- else{
- w->ssl.flags = security_process_accept(w->ssl.conn, (int)test[0]);
- }
+ else {
+ // SSL
+ if(!netdata_ssl_open(&w->ssl, netdata_ssl_web_server_ctx, w->ifd) || !netdata_ssl_accept(&w->ssl))
+ WEB_CLIENT_IS_DEAD(w);
}
sock_setnonblock(w->ifd);
- } else{
- w->ssl.flags = NETDATA_SSL_NO_HANDSHAKE;
}
#endif
@@ -525,9 +499,15 @@ void *socket_listen_main_static_threaded(void *ptr) {
if(!api_sockets.opened)
fatal("LISTENER: no listen sockets available.");
+ netdata_ssl_validate_certificate = !config_get_boolean(CONFIG_SECTION_WEB, "ssl skip certificate verification", !netdata_ssl_validate_certificate);
+
+ if(!netdata_ssl_validate_certificate_sender)
+ info("SSL: web server will skip SSL certificates verification.");
+
#ifdef ENABLE_HTTPS
- security_start_ssl(NETDATA_SSL_CONTEXT_SERVER);
+ netdata_ssl_initialize_ctx(NETDATA_SSL_WEB_SERVER_CTX);
#endif
+
// 6 threads is the optimal value
// since 6 are the parallel connections browsers will do
// so, if the machine has more CPUs, avoid using resources unnecessarily
@@ -541,6 +521,7 @@ void *socket_listen_main_static_threaded(void *ptr) {
static_threaded_workers_count = config_get_number(CONFIG_SECTION_WEB, "web server threads", def_thread_count);
if (static_threaded_workers_count < 1) static_threaded_workers_count = 1;
+
#ifdef ENABLE_HTTPS
// See https://github.com/netdata/netdata/issues/11081#issuecomment-831998240 for more details
if (OPENSSL_VERSION_NUMBER < OPENSSL_VERSION_110) {
diff --git a/web/server/web_client.c b/web/server/web_client.c
index 8bc72e71f..6e3c1225e 100644
--- a/web/server/web_client.c
+++ b/web/server/web_client.c
@@ -8,9 +8,7 @@
int respect_web_browser_do_not_track_policy = 0;
char *web_x_frame_options = NULL;
-#ifdef NETDATA_WITH_ZLIB
int web_enable_gzip = 1, web_gzip_level = 3, web_gzip_strategy = Z_DEFAULT_STRATEGY;
-#endif /* NETDATA_WITH_ZLIB */
inline int web_client_permission_denied(struct web_client *w) {
w->response.data->content_type = CT_TEXT_PLAIN;
@@ -36,11 +34,10 @@ static inline int web_client_crock_socket(struct web_client *w __maybe_unused) {
return 0;
}
-static inline void web_client_enable_wait_from_ssl(struct web_client *w, int bytes) {
- int ssl_err = SSL_get_error(w->ssl.conn, bytes);
- if (ssl_err == SSL_ERROR_WANT_READ)
+static inline void web_client_enable_wait_from_ssl(struct web_client *w) {
+ if (w->ssl.ssl_errno == SSL_ERROR_WANT_READ)
web_client_enable_ssl_wait_receive(w);
- else if (ssl_err == SSL_ERROR_WANT_WRITE)
+ else if (w->ssl.ssl_errno == SSL_ERROR_WANT_WRITE)
web_client_enable_ssl_wait_send(w);
else {
web_client_disable_ssl_wait_receive(w);
@@ -101,15 +98,6 @@ static void web_client_reset_allocations(struct web_client *w, bool free_all) {
freez(w->post_payload);
w->post_payload = NULL;
w->post_payload_size = 0;
-
-#ifdef ENABLE_HTTPS
- if ((!web_client_check_unix(w)) && (netdata_ssl_srv_ctx)) {
- if (w->ssl.conn) {
- SSL_free(w->ssl.conn);
- w->ssl.conn = NULL;
- }
- }
-#endif
}
else {
// the web client is to be re-used
@@ -123,7 +111,6 @@ static void web_client_reset_allocations(struct web_client *w, bool free_all) {
buffer_reset(w->response.data);
// leave w->post_payload
- // leave w->ssl
}
freez(w->server_host);
@@ -142,7 +129,6 @@ static void web_client_reset_allocations(struct web_client *w, bool free_all) {
w->auth_bearer_token = NULL;
// if we had enabled compression, release it
-#ifdef NETDATA_WITH_ZLIB
if(w->response.zinitialized) {
deflateEnd(&w->response.zstream);
w->response.zsent = 0;
@@ -154,7 +140,6 @@ static void web_client_reset_allocations(struct web_client *w, bool free_all) {
w->response.zinitialized = false;
w->flags &= ~WEB_CLIENT_CHUNKED_TRANSFER;
}
-#endif // NETDATA_WITH_ZLIB
}
void web_client_request_done(struct web_client *w) {
@@ -168,9 +153,7 @@ void web_client_request_done(struct web_client *w) {
size_t size = (w->mode == WEB_CLIENT_MODE_FILECOPY)?w->response.rlen:w->response.data->len;
size_t sent = size;
-#ifdef NETDATA_WITH_ZLIB
if(likely(w->response.zoutput)) sent = (size_t)w->response.zstream.total_out;
-#endif
// --------------------------------------------------------------------
// global statistics
@@ -444,9 +427,6 @@ int mysendfile(struct web_client *w, char *filename) {
}
#endif
-
-
-#ifdef NETDATA_WITH_ZLIB
void web_client_enable_deflate(struct web_client *w, int gzip) {
if(unlikely(w->response.zinitialized)) {
debug(D_DEFLATE, "%llu: Compression has already be initialized for this client.", w->id);
@@ -492,7 +472,6 @@ void web_client_enable_deflate(struct web_client *w, int gzip) {
debug(D_DEFLATE, "%llu: Initialized compression.", w->id);
}
-#endif // NETDATA_WITH_ZLIB
void buffer_data_options2string(BUFFER *wb, uint32_t options) {
int count = 0;
@@ -730,16 +709,12 @@ const char *web_response_code_to_string(int code) {
static inline char *http_header_parse(struct web_client *w, char *s, int parse_useragent) {
static uint32_t hash_origin = 0, hash_connection = 0, hash_donottrack = 0, hash_useragent = 0,
hash_authorization = 0, hash_host = 0, hash_forwarded_proto = 0, hash_forwarded_host = 0;
-#ifdef NETDATA_WITH_ZLIB
static uint32_t hash_accept_encoding = 0;
-#endif
if(unlikely(!hash_origin)) {
hash_origin = simple_uhash("Origin");
hash_connection = simple_uhash("Connection");
-#ifdef NETDATA_WITH_ZLIB
hash_accept_encoding = simple_uhash("Accept-Encoding");
-#endif
hash_donottrack = simple_uhash("DNT");
hash_useragent = simple_uhash("User-Agent");
hash_authorization = simple_uhash("X-Auth-Token");
@@ -798,7 +773,6 @@ static inline char *http_header_parse(struct web_client *w, char *s, int parse_u
strncpyz(buffer, v, ((size_t)(ve - v) < sizeof(buffer) - 1 ? (size_t)(ve - v) : sizeof(buffer) - 1));
w->server_host = strdupz(buffer);
}
-#ifdef NETDATA_WITH_ZLIB
else if(hash == hash_accept_encoding && !strcasecmp(s, "Accept-Encoding")) {
if(web_enable_gzip) {
if(strcasestr(v, "gzip"))
@@ -809,13 +783,10 @@ static inline char *http_header_parse(struct web_client *w, char *s, int parse_u
// web_client_enable_deflate(w, 0);
}
}
-#endif /* NETDATA_WITH_ZLIB */
-#ifdef ENABLE_HTTPS
else if(hash == hash_forwarded_proto && !strcasecmp(s, "X-Forwarded-Proto")) {
if(strcasestr(v, "https"))
- w->ssl.flags |= NETDATA_SSL_PROXY_HTTPS;
+ w->flags |= WEB_CLIENT_FLAG_PROXY_HTTPS;
}
-#endif
else if(hash == hash_forwarded_host && !strcasecmp(s, "X-Forwarded-Host")) {
char buffer[NI_MAXHOST];
strncpyz(buffer, v, ((size_t)(ve - v) < sizeof(buffer) - 1 ? (size_t)(ve - v) : sizeof(buffer) - 1));
@@ -855,7 +826,7 @@ static inline char *web_client_valid_method(struct web_client *w, char *s) {
s = &s[7];
#ifdef ENABLE_HTTPS
- if (w->ssl.flags && web_client_is_using_ssl_force(w)){
+ if (!SSL_connection(&w->ssl) && web_client_is_using_ssl_force(w)) {
w->header_parse_tries = 0;
w->header_parse_last_size = 0;
web_client_disable_wait_receive(w);
@@ -996,8 +967,8 @@ static inline HTTP_VALIDATION http_request_validate(struct web_client *w) {
*ue = c;
#ifdef ENABLE_HTTPS
- if ( (!web_client_check_unix(w)) && (netdata_ssl_srv_ctx) ) {
- if ((w->ssl.conn) && ((w->ssl.flags & NETDATA_SSL_NO_HANDSHAKE) && (web_client_is_using_ssl_force(w) || web_client_is_using_ssl_default(w)) && (w->mode != WEB_CLIENT_MODE_STREAM)) ) {
+ if ( (!web_client_check_unix(w)) && (netdata_ssl_web_server_ctx) ) {
+ if (!w->ssl.conn && (web_client_is_using_ssl_force(w) || web_client_is_using_ssl_default(w)) && (w->mode != WEB_CLIENT_MODE_STREAM)) {
w->header_parse_tries = 0;
w->header_parse_last_size = 0;
web_client_disable_wait_receive(w);
@@ -1026,16 +997,15 @@ static inline ssize_t web_client_send_data(struct web_client *w,const void *buf,
{
ssize_t bytes;
#ifdef ENABLE_HTTPS
- if ( (!web_client_check_unix(w)) && (netdata_ssl_srv_ctx) ) {
- if ( ( w->ssl.conn ) && ( !w->ssl.flags ) ){
- bytes = netdata_ssl_write(w->ssl.conn, buf, len) ;
- web_client_enable_wait_from_ssl(w, bytes);
- } else {
- bytes = send(w->ofd,buf, len , flags);
+ if ((!web_client_check_unix(w)) && (netdata_ssl_web_server_ctx)) {
+ if (SSL_connection(&w->ssl)) {
+ bytes = netdata_ssl_write(&w->ssl, buf, len) ;
+ web_client_enable_wait_from_ssl(w);
}
- } else {
+ else
+ bytes = send(w->ofd,buf, len , flags);
+ } else
bytes = send(w->ofd,buf, len , flags);
- }
#else
bytes = send(w->ofd, buf, len, flags);
#endif
@@ -1172,10 +1142,10 @@ static inline void web_client_send_http_header(struct web_client *w) {
size_t count = 0;
ssize_t bytes;
#ifdef ENABLE_HTTPS
- if ( (!web_client_check_unix(w)) && (netdata_ssl_srv_ctx) ) {
- if ( ( w->ssl.conn ) && ( w->ssl.flags == NETDATA_SSL_HANDSHAKE_COMPLETE ) ) {
- bytes = netdata_ssl_write(w->ssl.conn, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output));
- web_client_enable_wait_from_ssl(w, bytes);
+ if ( (!web_client_check_unix(w)) && (netdata_ssl_web_server_ctx) ) {
+ if (SSL_connection(&w->ssl)) {
+ bytes = netdata_ssl_write(&w->ssl, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output));
+ web_client_enable_wait_from_ssl(w);
}
else {
while((bytes = send(w->ofd, buffer_tostring(w->response.header_output), buffer_strlen(w->response.header_output), 0)) == -1) {
@@ -1276,11 +1246,11 @@ static inline int web_client_switch_host(RRDHOST *host, struct web_client *w, ch
if(!url) { //no delim found
debug(D_WEB_CLIENT, "%llu: URL doesn't end with / generating redirect.", w->id);
char *protocol, *url_host;
+ protocol = (
#ifdef ENABLE_HTTPS
- protocol = ((w->ssl.conn && !w->ssl.flags) || w->ssl.flags & NETDATA_SSL_PROXY_HTTPS) ? "https" : "http";
-#else
- protocol = "http";
+ SSL_connection(&w->ssl) ||
#endif
+ (w->flags & WEB_CLIENT_FLAG_PROXY_HTTPS)) ? "https" : "http";
url_host = w->forwarded_host;
if(!url_host) {
@@ -1736,7 +1706,6 @@ ssize_t web_client_send_chunk_finalize(struct web_client *w)
return bytes;
}
-#ifdef NETDATA_WITH_ZLIB
ssize_t web_client_send_deflate(struct web_client *w)
{
ssize_t len = 0, t = 0;
@@ -1851,12 +1820,9 @@ ssize_t web_client_send_deflate(struct web_client *w)
return(len);
}
-#endif // NETDATA_WITH_ZLIB
ssize_t web_client_send(struct web_client *w) {
-#ifdef NETDATA_WITH_ZLIB
if(likely(w->response.zoutput)) return web_client_send_deflate(w);
-#endif // NETDATA_WITH_ZLIB
ssize_t bytes;
@@ -1968,11 +1934,12 @@ ssize_t web_client_receive(struct web_client *w)
buffer_need_bytes(w->response.data, NETDATA_WEB_REQUEST_INITIAL_SIZE);
#ifdef ENABLE_HTTPS
- if ( (!web_client_check_unix(w)) && (netdata_ssl_srv_ctx) ) {
- if ( ( w->ssl.conn ) && (!w->ssl.flags)) {
- bytes = netdata_ssl_read(w->ssl.conn, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1));
- web_client_enable_wait_from_ssl(w, bytes);
- }else {
+ if ( (!web_client_check_unix(w)) && (netdata_ssl_web_server_ctx) ) {
+ if (SSL_connection(&w->ssl)) {
+ bytes = netdata_ssl_read(&w->ssl, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1));
+ web_client_enable_wait_from_ssl(w);
+ }
+ else {
bytes = recv(w->ifd, &w->response.data->buffer[w->response.data->len], (size_t) (left - 1), MSG_DONTWAIT);
}
}
@@ -2005,26 +1972,6 @@ ssize_t web_client_receive(struct web_client *w)
}
-int web_client_socket_is_now_used_for_streaming(struct web_client *w) {
- // prevent the web_client from closing the streaming socket
-
- WEB_CLIENT_IS_DEAD(w);
-
- if(web_server_mode == WEB_SERVER_MODE_STATIC_THREADED) {
- web_client_flag_set(w, WEB_CLIENT_FLAG_DONT_CLOSE_SOCKET);
- }
- else {
- if(w->ifd == w->ofd)
- w->ifd = w->ofd = -1;
- else
- w->ifd = -1;
- }
-
- buffer_flush(w->response.data);
-
- return HTTP_RESP_OK;
-}
-
void web_client_decode_path_and_query_string(struct web_client *w, const char *path_and_query_string) {
char buffer[NETDATA_WEB_REQUEST_URL_SIZE + 2];
buffer[0] = '\0';
@@ -2072,25 +2019,6 @@ void web_client_decode_path_and_query_string(struct web_client *w, const char *p
}
}
-#ifdef ENABLE_HTTPS
-void web_client_reuse_ssl(struct web_client *w) {
- if (netdata_ssl_srv_ctx) {
- if (w->ssl.conn) {
- SSL_SESSION *session = SSL_get_session(w->ssl.conn);
- SSL *old = w->ssl.conn;
- w->ssl.conn = SSL_new(netdata_ssl_srv_ctx);
- if (session) {
-#if OPENSSL_VERSION_NUMBER >= OPENSSL_VERSION_111
- if (SSL_SESSION_is_resumable(session))
-#endif
- SSL_set_session(w->ssl.conn, session);
- }
- SSL_free(old);
- }
- }
-}
-#endif
-
void web_client_zero(struct web_client *w) {
// zero everything about it - but keep the buffers
@@ -2105,8 +2033,7 @@ void web_client_zero(struct web_client *w) {
BUFFER *b6 = w->url_query_string_decoded;
#ifdef ENABLE_HTTPS
- web_client_reuse_ssl(w);
- SSL *ssl = w->ssl.conn;
+ NETDATA_SSL ssl = w->ssl;
#endif
size_t use_count = w->use_count;
@@ -2120,9 +2047,7 @@ void web_client_zero(struct web_client *w) {
w->use_count = use_count;
#ifdef ENABLE_HTTPS
- w->ssl.conn = ssl;
- w->ssl.flags = NETDATA_SSL_START;
- debug(D_WEB_CLIENT_ACCESS,"Reusing SSL structure with (w->ssl = NULL, w->accepted = %u)", w->ssl.flags);
+ w->ssl = ssl;
#endif
// restore the pointers of the buffers
@@ -2136,6 +2061,11 @@ void web_client_zero(struct web_client *w) {
struct web_client *web_client_create(size_t *statistics_memory_accounting) {
struct web_client *w = (struct web_client *)callocz(1, sizeof(struct web_client));
+
+#ifdef ENABLE_HTTPS
+ w->ssl = NETDATA_SSL_UNSET_CONNECTION;
+#endif
+
w->use_count = 1;
w->statistics.memory_accounting = statistics_memory_accounting;
@@ -2152,6 +2082,10 @@ struct web_client *web_client_create(size_t *statistics_memory_accounting) {
}
void web_client_free(struct web_client *w) {
+#ifdef ENABLE_HTTPS
+ netdata_ssl_close(&w->ssl);
+#endif
+
web_client_reset_allocations(w, true);
__atomic_sub_fetch(w->statistics.memory_accounting, sizeof(struct web_client), __ATOMIC_RELAXED);
diff --git a/web/server/web_client.h b/web/server/web_client.h
index c61a8b813..4c2b06a70 100644
--- a/web/server/web_client.h
+++ b/web/server/web_client.h
@@ -5,33 +5,7 @@
#include "libnetdata/libnetdata.h"
-#ifdef NETDATA_WITH_ZLIB
extern int web_enable_gzip, web_gzip_level, web_gzip_strategy;
-#endif /* NETDATA_WITH_ZLIB */
-
-// HTTP_CODES 2XX Success
-#define HTTP_RESP_OK 200
-
-// HTTP_CODES 3XX Redirections
-#define HTTP_RESP_MOVED_PERM 301
-#define HTTP_RESP_REDIR_TEMP 307
-#define HTTP_RESP_REDIR_PERM 308
-
-// HTTP_CODES 4XX Client Errors
-#define HTTP_RESP_BAD_REQUEST 400
-#define HTTP_RESP_UNAUTHORIZED 401
-#define HTTP_RESP_FORBIDDEN 403
-#define HTTP_RESP_NOT_FOUND 404
-#define HTTP_RESP_CONFLICT 409
-#define HTTP_RESP_PRECOND_FAIL 412
-#define HTTP_RESP_CONTENT_TOO_LONG 413
-
-// HTTP_CODES 5XX Server Errors
-#define HTTP_RESP_INTERNAL_SERVER_ERROR 500
-#define HTTP_RESP_BACKEND_FETCH_FAILED 503
-#define HTTP_RESP_SERVICE_UNAVAILABLE 503
-#define HTTP_RESP_GATEWAY_TIMEOUT 504
-#define HTTP_RESP_BACKEND_RESPONSE_INVALID 591
#define HTTP_REQ_MAX_HEADER_FETCH_TRIES 100
@@ -78,6 +52,8 @@ typedef enum web_client_flags {
WEB_CLIENT_FLAG_SSL_WAIT_RECEIVE = 1 << 11, // if set, we are waiting more input data from an ssl conn
WEB_CLIENT_FLAG_SSL_WAIT_SEND = 1 << 12, // if set, we have data to send to the client from an ssl conn
+
+ WEB_CLIENT_FLAG_PROXY_HTTPS = 1 << 13, // if set, the client reaches us via an https proxy
} WEB_CLIENT_FLAGS;
#define web_client_flag_check(w, flag) ((w)->flags & (flag))
@@ -145,13 +121,11 @@ struct response {
bool zoutput; // if set to 1, web_client_send() will send compressed data
-#ifdef NETDATA_WITH_ZLIB
bool zinitialized;
z_stream zstream; // zlib stream for sending compressed output to client
size_t zsent; // the compressed bytes we have sent to the client
size_t zhave; // the compressed bytes that we have received from zlib
Bytef zbuffer[NETDATA_WEB_RESPONSE_ZLIB_CHUNK_SIZE]; // temporary buffer for storing compressed output
-#endif /* NETDATA_WITH_ZLIB */
};
struct web_client;
@@ -196,7 +170,7 @@ struct web_client {
size_t pollinfo_filecopy_slot; // POLLINFO slot of the file read
#ifdef ENABLE_HTTPS
- struct netdata_ssl ssl;
+ NETDATA_SSL ssl;
#endif
struct { // A callback to check if the query should be interrupted / stopped
@@ -241,16 +215,10 @@ int mysendfile(struct web_client *w, char *filename);
void web_client_build_http_header(struct web_client *w);
char *strip_control_characters(char *url);
-int web_client_socket_is_now_used_for_streaming(struct web_client *w);
-
void web_client_zero(struct web_client *w);
struct web_client *web_client_create(size_t *statistics_memory_accounting);
void web_client_free(struct web_client *w);
-#ifdef ENABLE_HTTPS
-void web_client_reuse_ssl(struct web_client *w);
-#endif
-
#include "web/api/web_api_v1.h"
#include "web/api/web_api_v2.h"
#include "daemon/common.h"
diff --git a/web/server/web_client_cache.c b/web/server/web_client_cache.c
index b410ba7f9..394bea32b 100644
--- a/web/server/web_client_cache.c
+++ b/web/server/web_client_cache.c
@@ -104,11 +104,6 @@ struct web_client *web_client_get_from_cache(void) {
// allocate it
w = web_client_create(&netdata_buffers_statistics.buffers_web);
-#ifdef ENABLE_HTTPS
- w->ssl.flags = NETDATA_SSL_START;
- debug(D_WEB_CLIENT_ACCESS,"Starting SSL structure with (w->ssl = NULL, w->accepted = %u)", w->ssl.flags);
-#endif
-
netdata_spinlock_lock(&web_clients_cache.used.spinlock);
web_clients_cache.used.allocated++;
}
@@ -127,6 +122,11 @@ struct web_client *web_client_get_from_cache(void) {
}
void web_client_release_to_cache(struct web_client *w) {
+
+#ifdef ENABLE_HTTPS
+ netdata_ssl_close(&w->ssl);
+#endif
+
// unlink it from the used
netdata_spinlock_lock(&web_clients_cache.used.spinlock);
DOUBLE_LINKED_LIST_REMOVE_ITEM_UNSAFE(web_clients_cache.used.head, w, cache.prev, cache.next);