summaryrefslogtreecommitdiffstats
path: root/collectors/ebpf.plugin
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2023-02-06 16:11:30 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2023-02-06 16:11:30 +0000
commitaa2fe8ccbfcb117efa207d10229eeeac5d0f97c7 (patch)
tree941cbdd387b41c1a81587c20a6df9f0e5e0ff7ab /collectors/ebpf.plugin
parentAdding upstream version 1.37.1. (diff)
downloadnetdata-aa2fe8ccbfcb117efa207d10229eeeac5d0f97c7.tar.xz
netdata-aa2fe8ccbfcb117efa207d10229eeeac5d0f97c7.zip
Adding upstream version 1.38.0.upstream/1.38.0
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collectors/ebpf.plugin')
-rw-r--r--collectors/ebpf.plugin/README.md39
-rw-r--r--collectors/ebpf.plugin/ebpf.c98
-rw-r--r--collectors/ebpf.plugin/ebpf.d.conf12
-rw-r--r--collectors/ebpf.plugin/ebpf.h3
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.c139
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.h6
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.c78
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.h2
-rw-r--r--collectors/ebpf.plugin/ebpf_disk.c76
-rw-r--r--collectors/ebpf.plugin/ebpf_disk.h2
-rw-r--r--collectors/ebpf.plugin/ebpf_fd.c145
-rw-r--r--collectors/ebpf.plugin/ebpf_fd.h13
-rw-r--r--collectors/ebpf.plugin/ebpf_filesystem.c75
-rw-r--r--collectors/ebpf.plugin/ebpf_filesystem.h1
-rw-r--r--collectors/ebpf.plugin/ebpf_hardirq.c77
-rw-r--r--collectors/ebpf.plugin/ebpf_hardirq.h1
-rw-r--r--collectors/ebpf.plugin/ebpf_mdflush.c84
-rw-r--r--collectors/ebpf.plugin/ebpf_mdflush.h2
-rw-r--r--collectors/ebpf.plugin/ebpf_mount.c85
-rw-r--r--collectors/ebpf.plugin/ebpf_mount.h2
-rw-r--r--collectors/ebpf.plugin/ebpf_oomkill.c11
-rw-r--r--collectors/ebpf.plugin/ebpf_oomkill.h1
-rw-r--r--collectors/ebpf.plugin/ebpf_process.c72
-rw-r--r--collectors/ebpf.plugin/ebpf_process.h2
-rw-r--r--collectors/ebpf.plugin/ebpf_shm.c82
-rw-r--r--collectors/ebpf.plugin/ebpf_shm.h2
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.c247
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.h3
-rw-r--r--collectors/ebpf.plugin/ebpf_softirq.c79
-rw-r--r--collectors/ebpf.plugin/ebpf_softirq.h1
-rw-r--r--collectors/ebpf.plugin/ebpf_swap.c79
-rw-r--r--collectors/ebpf.plugin/ebpf_sync.c81
-rw-r--r--collectors/ebpf.plugin/ebpf_vfs.c81
-rw-r--r--collectors/ebpf.plugin/ebpf_vfs.h2
34 files changed, 474 insertions, 1209 deletions
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md
index 7762ed34f..deedf4d79 100644
--- a/collectors/ebpf.plugin/README.md
+++ b/collectors/ebpf.plugin/README.md
@@ -1,9 +1,11 @@
<!--
-title: "eBPF monitoring with Netdata"
-description: "Use Netdata's extended Berkeley Packet Filter (eBPF) collector to monitor kernel-level metrics about your
-complex applications with per-second granularity."
-custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/README.md
-sidebar_label: "eBPF"
+title: "Kernel traces/metrics (eBPF) monitoring with Netdata"
+description: "Use Netdata's extended Berkeley Packet Filter (eBPF) collector to monitor kernel-level metrics about yourcomplex applications with per-second granularity."
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/README.md"
+sidebar_label: "Kernel traces/metrics (eBPF)"
+learn_status: "Published"
+learn_topic_type: "References"
+learn_rel_path: "References/Collectors references/System metrics"
-->
# eBPF monitoring with Netdata
@@ -13,7 +15,7 @@ The Netdata Agent provides many [eBPF](https://ebpf.io/what-is-ebpf/) programs t
> ❗ eBPF monitoring only works on Linux systems and with specific Linux kernels, including all kernels newer than `4.11.0`, and all kernels on CentOS 7.6 or later. For kernels older than `4.11.0`, improved support is in active development.
This document provides comprehensive details about the `ebpf.plugin`.
-For hands-on configuration and troubleshooting tips see our [tutorial on troubleshooting apps with eBPF metrics](/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md).
+For hands-on configuration and troubleshooting tips see our [tutorial on troubleshooting apps with eBPF metrics](https://github.com/netdata/netdata/blob/master/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md).
<figure>
<img src="https://user-images.githubusercontent.com/1153921/74746434-ad6a1e00-5222-11ea-858a-a7882617ae02.png" alt="An example of VFS charts, made possible by the eBPF collector plugin" />
@@ -42,12 +44,12 @@ If your Agent is v1.22 or older, you may to enable the collector yourself.
To enable or disable the entire eBPF collector:
-1. Navigate to the [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory).
+1. Navigate to the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
```bash
cd /etc/netdata
```
-2. Use the [`edit-config`](/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) script to edit `netdata.conf`.
+2. Use the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) script to edit `netdata.conf`.
```bash
./edit-config netdata.conf
@@ -67,11 +69,11 @@ You can configure the eBPF collector's behavior to fine-tune which metrics you r
To edit the `ebpf.d.conf`:
-1. Navigate to the [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory).
+1. Navigate to the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
```bash
cd /etc/netdata
```
-2. Use the [`edit-config`](/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) script to edit [`ebpf.d.conf`](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/ebpf.d.conf).
+2. Use the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) script to edit [`ebpf.d.conf`](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/ebpf.d.conf).
```bash
./edit-config ebpf.d.conf
@@ -103,11 +105,10 @@ accepts the following values:
#### Integration with `apps.plugin`
The eBPF collector also creates charts for each running application through an integration with the
-[`apps.plugin`](/collectors/apps.plugin/README.md). This integration helps you understand how specific applications
+[`apps.plugin`](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md). This integration helps you understand how specific applications
interact with the Linux kernel.
-If you want to _disable_ the integration with `apps.plugin` along with the above charts, change the setting `apps` to
-`no`.
+If you want to enable `apps.plugin` integration, change the "apps" setting to "yes".
```conf
[global]
@@ -122,7 +123,7 @@ it runs.
#### Integration with `cgroups.plugin`
The eBPF collector also creates charts for each cgroup through an integration with the
-[`cgroups.plugin`](/collectors/cgroups.plugin/README.md). This integration helps you understand how a specific cgroup
+[`cgroups.plugin`](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md). This integration helps you understand how a specific cgroup
interacts with the Linux kernel.
The integration with `cgroups.plugin` is disabled by default to avoid creating overhead on your system. If you want to
@@ -244,7 +245,7 @@ The eBPF collector enables and runs the following eBPF programs by default:
You can also enable the following eBPF programs:
- `cachestat`: Netdata's eBPF data collector creates charts about the memory page cache. When the integration with
- [`apps.plugin`](/collectors/apps.plugin/README.md) is enabled, this collector creates charts for the whole host _and_
+ [`apps.plugin`](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md) is enabled, this collector creates charts for the whole host _and_
for each application.
- `dcstat` : This eBPF program creates charts that show information about file access using directory cache. It appends
`kprobes` for `lookup_fast()` and `d_lookup()` to identify if files are inside directory cache, outside and files are
@@ -261,11 +262,11 @@ You can configure each thread of the eBPF data collector. This allows you to ove
To configure an eBPF thread:
-1. Navigate to the [Netdata config directory](/docs/configure/nodes.md#the-netdata-config-directory).
+1. Navigate to the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
```bash
cd /etc/netdata
```
-2. Use the [`edit-config`](/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) script to edit a thread configuration file. The following configuration files are available:
+2. Use the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) script to edit a thread configuration file. The following configuration files are available:
- `network.conf`: Configuration for the [`network` thread](#network-configuration). This config file overwrites the global options and also
lets you specify which network the eBPF collector monitors.
@@ -304,7 +305,7 @@ You can configure the information shown on `outbound` and `inbound` charts with
When you define a `ports` setting, Netdata will collect network metrics for that specific port. For example, if you
write `ports = 19999`, Netdata will collect only connections for itself. The `hostnames` setting accepts
-[simple patterns](/libnetdata/simple_pattern/README.md). The `ports`, and `ips` settings accept negation (`!`) to deny
+[simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md). The `ports`, and `ips` settings accept negation (`!`) to deny
specific values or asterisk alone to define all values.
In the above example, Netdata will collect metrics for all ports between 1 and 443, with the exception of 53 (domain)
@@ -881,7 +882,7 @@ significantly increases kernel memory usage by several hundred MB.
If your node is experiencing high memory usage and there is no obvious culprit to be found in the `apps.mem` chart,
consider testing for high kernel memory usage by [disabling eBPF monitoring](#configuring-ebpfplugin). Next,
-[restart Netdata](/docs/configure/start-stop-restart.md) with `sudo systemctl restart netdata` to see if system memory
+[restart Netdata](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) with `sudo systemctl restart netdata` to see if system memory
usage (see the `system.ram` chart) has dropped significantly.
Beginning with `v1.31`, kernel memory usage is configurable via the [`pid table size` setting](#ebpf-load-mode)
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
index 00b53a57d..67fe477c2 100644
--- a/collectors/ebpf.plugin/ebpf.c
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -483,6 +483,16 @@ static void ebpf_exit()
if (unlink(filename))
error("Cannot remove PID file %s", filename);
+#ifdef NETDATA_INTERNAL_CHECKS
+ error("Good bye world! I was PID %d", main_thread_id);
+#endif
+ printf("DISABLE\n");
+
+ if (shm_ebpf_cgroup.header) {
+ munmap(shm_ebpf_cgroup.header, shm_ebpf_cgroup.header->body_length);
+ shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
+ }
+
exit(0);
}
@@ -534,7 +544,7 @@ static void ebpf_stop_threads(int sig)
pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_exit_plugin = 1;
- usec_t max = 3 * USEC_PER_SEC, step = 100000;
+ usec_t max = USEC_PER_SEC, step = 100000;
while (i && max) {
max -= step;
sleep_usec(step);
@@ -548,32 +558,35 @@ static void ebpf_stop_threads(int sig)
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
- //Unload threads(except sync and filesystem)
- pthread_mutex_lock(&ebpf_exit_cleanup);
- for (i = 0; ebpf_threads[i].name != NULL; i++) {
- if (ebpf_threads[i].enabled == NETDATA_THREAD_EBPF_STOPPED && i != EBPF_MODULE_FILESYSTEM_IDX &&
- i != EBPF_MODULE_SYNC_IDX)
- ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links);
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
+ if (!i) {
+ //Unload threads(except sync and filesystem)
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ for (i = 0; ebpf_threads[i].name != NULL; i++) {
+ if (ebpf_threads[i].enabled == NETDATA_THREAD_EBPF_STOPPED && i != EBPF_MODULE_FILESYSTEM_IDX &&
+ i != EBPF_MODULE_SYNC_IDX)
+ ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links);
+ }
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
- //Unload filesystem
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (ebpf_threads[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_THREAD_EBPF_STOPPED) {
- for (i = 0; localfs[i].filesystem != NULL; i++) {
- ebpf_unload_legacy_code(localfs[i].objects, localfs[i].probe_links);
+ //Unload filesystem
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ if (ebpf_threads[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_THREAD_EBPF_STOPPED) {
+ for (i = 0; localfs[i].filesystem != NULL; i++) {
+ ebpf_unload_legacy_code(localfs[i].objects, localfs[i].probe_links);
+ }
}
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
- //Unload Sync
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (ebpf_threads[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_STOPPED) {
- for (i = 0; local_syscalls[i].syscall != NULL; i++) {
- ebpf_unload_legacy_code(local_syscalls[i].objects, local_syscalls[i].probe_links);
+ //Unload Sync
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ if (ebpf_threads[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_STOPPED) {
+ for (i = 0; local_syscalls[i].syscall != NULL; i++) {
+ ebpf_unload_legacy_code(local_syscalls[i].objects, local_syscalls[i].probe_links);
+ }
}
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
+
}
- pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_exit();
}
@@ -1317,7 +1330,7 @@ static void read_local_addresses()
}
}
- fill_ip_list((family == AF_INET)?&network_viewer_opt.ipv4_local_ip:&network_viewer_opt.ipv6_local_ip,
+ ebpf_fill_ip_list((family == AF_INET)?&network_viewer_opt.ipv4_local_ip:&network_viewer_opt.ipv6_local_ip,
w,
"selector");
}
@@ -1520,13 +1533,8 @@ static void read_collector_values(int *disable_apps, int *disable_cgroups,
enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION,
ebpf_modules[EBPF_MODULE_SOCKET_IDX].config_name,
CONFIG_BOOLEAN_NO);
-
if (enabled) {
ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, *disable_apps, *disable_cgroups);
- // Read network viewer section if network viewer is enabled
- // This is kept here to keep backward compatibility
- parse_network_viewer_section(&collector_config);
- parse_service_name_section(&collector_config);
started++;
}
@@ -1536,7 +1544,17 @@ static void read_collector_values(int *disable_apps, int *disable_cgroups,
if (!enabled)
enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network connections",
CONFIG_BOOLEAN_NO);
- ebpf_modules[EBPF_MODULE_SOCKET_IDX].optional = (int)enabled;
+ network_viewer_opt.enabled = enabled;
+ if (enabled) {
+ if (!ebpf_modules[EBPF_MODULE_SOCKET_IDX].enabled)
+ ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, *disable_apps, *disable_cgroups);
+
+ // Read network viewer section if network viewer is enabled
+ // This is kept here to keep backward compatibility
+ parse_network_viewer_section(&collector_config);
+ parse_service_name_section(&collector_config);
+ started++;
+ }
enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "cachestat",
CONFIG_BOOLEAN_NO);
@@ -1642,8 +1660,10 @@ static void read_collector_values(int *disable_apps, int *disable_cgroups,
ebpf_enable_all_charts(*disable_apps, *disable_cgroups);
// Read network viewer section
// This is kept here to keep backward compatibility
- parse_network_viewer_section(&collector_config);
- parse_service_name_section(&collector_config);
+ if (network_viewer_opt.enabled) {
+ parse_network_viewer_section(&collector_config);
+ parse_service_name_section(&collector_config);
+ }
}
}
@@ -2158,6 +2178,7 @@ static void ebpf_manage_pid(pid_t pid)
*/
int main(int argc, char **argv)
{
+ stderror = stderr;
clocks_init();
main_thread_id = gettid();
@@ -2237,13 +2258,26 @@ int main(int argc, char **argv)
}
}
- usec_t step = EBPF_DEFAULT_UPDATE_EVERY * USEC_PER_SEC;
+ usec_t step = USEC_PER_SEC;
+ int counter = NETDATA_EBPF_CGROUP_UPDATE - 1;
heartbeat_t hb;
heartbeat_init(&hb);
//Plugin will be killed when it receives a signal
while (!ebpf_exit_plugin) {
(void)heartbeat_next(&hb, step);
+
+ // We are using a small heartbeat time to wake up thread,
+ // but we should not update so frequently the shared memory data
+ if (++counter >= NETDATA_EBPF_CGROUP_UPDATE) {
+ counter = 0;
+ if (!shm_ebpf_cgroup.header)
+ ebpf_map_cgroup_shared_memory();
+
+ ebpf_parse_cgroup_shm_data();
+ }
}
+ ebpf_stop_threads(0);
+
return 0;
}
diff --git a/collectors/ebpf.plugin/ebpf.d.conf b/collectors/ebpf.plugin/ebpf.d.conf
index cf5c740fc..112df275d 100644
--- a/collectors/ebpf.plugin/ebpf.d.conf
+++ b/collectors/ebpf.plugin/ebpf.d.conf
@@ -17,7 +17,7 @@
#
[global]
ebpf load mode = entry
- apps = yes
+ apps = no
cgroups = no
update every = 5
pid table size = 32768
@@ -50,7 +50,7 @@
# When plugin detects that system has support to BTF, it enables integration with apps.plugin.
#
[ebpf programs]
- cachestat = no
+ cachestat = yes
dcstat = no
disk = no
fd = yes
@@ -60,10 +60,10 @@
mount = yes
oomkill = yes
process = yes
- shm = no
- socket = yes
+ shm = yes
+ socket = no
softirq = yes
sync = yes
- swap = no
- vfs = yes
+ swap = yes
+ vfs = no
network connections = no
diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h
index 28b04ce48..16e62498c 100644
--- a/collectors/ebpf.plugin/ebpf.h
+++ b/collectors/ebpf.plugin/ebpf.h
@@ -123,6 +123,9 @@ enum ebpf_threads_status {
#endif
#endif
+// Messages
+#define NETDATA_EBPF_DEFAULT_FNT_NOT_FOUND "Cannot find the necessary functions to monitor"
+
// Chart definitions
#define NETDATA_EBPF_FAMILY "ebpf"
#define NETDATA_EBPF_IP_FAMILY "ip"
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c
index 4c410647d..b21cc6103 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.c
+++ b/collectors/ebpf.plugin/ebpf_cachestat.c
@@ -15,15 +15,6 @@ netdata_cachestat_pid_t *cachestat_vector = NULL;
static netdata_idx_t cachestat_hash_values[NETDATA_CACHESTAT_END];
static netdata_idx_t *cachestat_values = NULL;
-struct netdata_static_thread cachestat_threads = {.name = "CACHESTAT KERNEL",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL};
-
ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_global", .internal_input = NETDATA_CACHESTAT_END,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
.map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
@@ -51,6 +42,9 @@ netdata_ebpf_targets_t cachestat_targets[] = { {.name = "add_to_page_cache_lru",
{.name = "mark_buffer_dirty", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
+static char *account_page[NETDATA_CACHESTAT_ACCOUNT_DIRTY_END] ={ "account_page_dirtied",
+ "__set_page_dirty", "__folio_mark_dirty" };
+
#ifdef LIBBPF_MAJOR_VERSION
#include "includes/cachestat.skel.h" // BTF code
@@ -83,10 +77,12 @@ static void ebpf_cachestat_disable_probe(struct cachestat_bpf *obj)
*/
static void ebpf_cachestat_disable_specific_probe(struct cachestat_bpf *obj)
{
- if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_16) {
+ if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
+ account_page[NETDATA_CACHESTAT_FOLIO_DIRTY])) {
bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_kprobe, false);
bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_kprobe, false);
- } else if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_15) {
+ } else if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
+ account_page[NETDATA_CACHESTAT_SET_PAGE_DIRTY])) {
bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_kprobe, false);
bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_kprobe, false);
} else {
@@ -122,10 +118,12 @@ static void ebpf_cachestat_disable_trampoline(struct cachestat_bpf *obj)
*/
static void ebpf_cachestat_disable_specific_trampoline(struct cachestat_bpf *obj)
{
- if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_16) {
+ if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
+ account_page[NETDATA_CACHESTAT_FOLIO_DIRTY])) {
bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_fentry, false);
bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_fentry, false);
- } else if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_15) {
+ } else if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
+ account_page[NETDATA_CACHESTAT_SET_PAGE_DIRTY])) {
bpf_program__set_autoload(obj->progs.netdata_folio_mark_dirty_fentry, false);
bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_fentry, false);
} else {
@@ -149,10 +147,12 @@ static inline void netdata_set_trampoline_target(struct cachestat_bpf *obj)
bpf_program__set_attach_target(obj->progs.netdata_mark_page_accessed_fentry, 0,
cachestat_targets[NETDATA_KEY_CALLS_MARK_PAGE_ACCESSED].name);
- if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_16) {
+ if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
+ account_page[NETDATA_CACHESTAT_FOLIO_DIRTY])) {
bpf_program__set_attach_target(obj->progs.netdata_folio_mark_dirty_fentry, 0,
cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
- } else if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_15) {
+ } else if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
+ account_page[NETDATA_CACHESTAT_SET_PAGE_DIRTY])) {
bpf_program__set_attach_target(obj->progs.netdata_set_page_dirty_fentry, 0,
cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
} else {
@@ -192,12 +192,14 @@ static int ebpf_cachestat_attach_probe(struct cachestat_bpf *obj)
if (ret)
return -1;
- if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_16) {
+ if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
+ account_page[NETDATA_CACHESTAT_FOLIO_DIRTY])) {
obj->links.netdata_folio_mark_dirty_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_folio_mark_dirty_kprobe,
false,
cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
ret = libbpf_get_error(obj->links.netdata_folio_mark_dirty_kprobe);
- } else if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_15) {
+ } else if (!strcmp(cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name,
+ account_page[NETDATA_CACHESTAT_SET_PAGE_DIRTY])) {
obj->links.netdata_set_page_dirty_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_set_page_dirty_kprobe,
false,
cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name);
@@ -278,7 +280,7 @@ static void ebpf_cachestat_disable_release_task(struct cachestat_bpf *obj)
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*
- * @return it returns 0 on succes and -1 otherwise
+ * @return it returns 0 on success and -1 otherwise
*/
static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf_module_t *em)
{
@@ -331,18 +333,13 @@ static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf
static void ebpf_cachestat_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
+ em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_cleanup_publish_syscall(cachestat_counter_publish_aggregated);
freez(cachestat_vector);
freez(cachestat_values);
- freez(cachestat_threads.thread);
#ifdef LIBBPF_MAJOR_VERSION
if (bpf_obj)
@@ -363,20 +360,7 @@ static void ebpf_cachestat_free(ebpf_module_t *em)
static void ebpf_cachestat_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
- netdata_thread_cancel(*cachestat_threads.thread);
- ebpf_cachestat_free(em);
-}
-/**
- * Cachestat cleanup
- *
- * Clean up allocated addresses.
- *
- * @param ptr thread data.
- */
-static void ebpf_cachestat_cleanup(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
ebpf_cachestat_free(em);
}
@@ -656,7 +640,7 @@ void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *ptr)
*
* Read the table with number of calls for all functions
*/
-static void read_global_table()
+static void ebpf_cachestat_read_global_table()
{
uint32_t idx;
netdata_idx_t *val = cachestat_hash_values;
@@ -677,35 +661,6 @@ static void read_global_table()
}
/**
- * Socket read hash
- *
- * This is the thread callback.
- * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket.
- *
- * @param ptr It is a NULL value for this thread.
- *
- * @return It always returns NULL.
- */
-void *ebpf_cachestat_read_hash(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_cachestat_cleanup, ptr);
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- usec_t step = NETDATA_LATENCY_CACHESTAT_SLEEP_MS * em->update_every;
- while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
-
- read_global_table();
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-/**
* Send global
*
* Send global charts to Netdata
@@ -1106,26 +1061,23 @@ void ebpf_cachestat_send_cgroup_data(int update_every)
*/
static void cachestat_collector(ebpf_module_t *em)
{
- cachestat_threads.thread = callocz(1, sizeof(netdata_thread_t));
- cachestat_threads.start_routine = ebpf_cachestat_read_hash;
-
- netdata_thread_create(cachestat_threads.thread, cachestat_threads.name, NETDATA_THREAD_OPTION_DEFAULT,
- ebpf_cachestat_read_hash, em);
-
netdata_publish_cachestat_t publish;
memset(&publish, 0, sizeof(publish));
int cgroups = em->cgroup_charts;
int update_every = em->update_every;
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = update_every * USEC_PER_SEC;
+ int counter = update_every - 1;
//This will be cancelled by its parent
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
- if (ebpf_exit_plugin)
- break;
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+
+ if (ebpf_exit_plugin || ++counter != update_every)
+ continue;
+ counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
+ ebpf_cachestat_read_global_table();
pthread_mutex_lock(&collect_data_mutex);
if (apps)
read_apps_table();
@@ -1237,16 +1189,28 @@ static void ebpf_cachestat_allocate_global_vectors(int apps)
* Update Internal value
*
* Update values used during runtime.
+ *
+ * @return It returns 0 when one of the functions is present and -1 otherwise.
*/
-static void ebpf_cachestat_set_internal_value()
+static int ebpf_cachestat_set_internal_value()
{
- static char *account_page[] = { "account_page_dirtied", "__set_page_dirty", "__folio_mark_dirty" };
- if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_16)
- cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name = account_page[NETDATA_CACHESTAT_FOLIO_DIRTY];
- else if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_15)
- cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name = account_page[NETDATA_CACHESTAT_SET_PAGE_DIRTY];
- else
- cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name = account_page[NETDATA_CACHESTAT_ACCOUNT_PAGE_DIRTY];
+ ebpf_addresses_t address = {.function = NULL, .hash = 0, .addr = 0};
+ int i;
+ for (i = 0; i < NETDATA_CACHESTAT_ACCOUNT_DIRTY_END ; i++) {
+ address.function = account_page[i];
+ ebpf_load_addresses(&address, -1);
+ if (address.addr)
+ break;
+ }
+
+ if (!address.addr) {
+ error("%s cachestat.", NETDATA_EBPF_DEFAULT_FNT_NOT_FOUND);
+ return -1;
+ }
+
+ cachestat_targets[NETDATA_KEY_CALLS_ACCOUNT_PAGE_DIRTIED].name = address.function;
+
+ return 0;
}
/*
@@ -1300,7 +1264,10 @@ void *ebpf_cachestat_thread(void *ptr)
ebpf_update_pid_table(&cachestat_maps[NETDATA_CACHESTAT_PID_STATS], em);
- ebpf_cachestat_set_internal_value();
+ if (ebpf_cachestat_set_internal_value()) {
+ em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ goto endcachestat;
+ }
#ifdef LIBBPF_MAJOR_VERSION
ebpf_adjust_thread_load(em, default_btf);
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.h b/collectors/ebpf.plugin/ebpf_cachestat.h
index 07f0745d4..15b06511e 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.h
+++ b/collectors/ebpf.plugin/ebpf_cachestat.h
@@ -19,8 +19,6 @@
#define EBPF_CACHESTAT_DIMENSION_HITS "hits/s"
#define EBPF_CACHESTAT_DIMENSION_MISSES "misses/s"
-#define NETDATA_LATENCY_CACHESTAT_SLEEP_MS 600000ULL
-
// configuration file
#define NETDATA_CACHESTAT_CONFIG_FILE "cachestat.conf"
@@ -48,7 +46,9 @@ enum cachestat_counters {
enum cachestat_account_dirty_pages {
NETDATA_CACHESTAT_ACCOUNT_PAGE_DIRTY,
NETDATA_CACHESTAT_SET_PAGE_DIRTY,
- NETDATA_CACHESTAT_FOLIO_DIRTY
+ NETDATA_CACHESTAT_FOLIO_DIRTY,
+
+ NETDATA_CACHESTAT_ACCOUNT_DIRTY_END
};
enum cachestat_indexes {
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c
index 71169e153..75e83214a 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.c
+++ b/collectors/ebpf.plugin/ebpf_dcstat.c
@@ -19,15 +19,6 @@ struct config dcstat_config = { .first_section = NULL,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
-struct netdata_static_thread dcstat_threads = {"DCSTAT KERNEL",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL};
-
ebpf_local_maps_t dcstat_maps[] = {{.name = "dcstat_global", .internal_input = NETDATA_DIRECTORY_CACHE_END,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
.map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
@@ -216,7 +207,7 @@ static void ebpf_dc_disable_release_task(struct dc_bpf *obj)
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*
- * @return it returns 0 on succes and -1 otherwise
+ * @return it returns 0 on success and -1 otherwise
*/
static inline int ebpf_dc_load_and_attach(struct dc_bpf *obj, ebpf_module_t *em)
{
@@ -303,16 +294,11 @@ void ebpf_dcstat_clean_names()
static void ebpf_dcstat_free(ebpf_module_t *em )
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
+ em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
freez(dcstat_vector);
freez(dcstat_values);
- freez(dcstat_threads.thread);
ebpf_cleanup_publish_syscall(dcstat_counter_publish_aggregated);
@@ -338,18 +324,6 @@ static void ebpf_dcstat_free(ebpf_module_t *em )
static void ebpf_dcstat_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
- netdata_thread_cancel(*dcstat_threads.thread);
- ebpf_dcstat_free(em);
-}
-
-/**
- * Clean up the main thread.
- *
- * @param ptr thread data.
- */
-static void ebpf_dcstat_cleanup(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
ebpf_dcstat_free(em);
}
@@ -538,7 +512,7 @@ static void ebpf_update_dc_cgroup()
*
* Read the table with number of calls for all functions
*/
-static void read_global_table()
+static void ebpf_dc_read_global_table()
{
uint32_t idx;
netdata_idx_t *val = dcstat_hash_values;
@@ -559,35 +533,6 @@ static void read_global_table()
}
/**
- * DCstat read hash
- *
- * This is the thread callback.
- * This thread is necessary, because we cannot freeze the whole plugin to read the data.
- *
- * @param ptr It is a NULL value for this thread.
- *
- * @return It always returns NULL.
- */
-void *ebpf_dcstat_read_hash(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_dcstat_cleanup, ptr);
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- usec_t step = NETDATA_LATENCY_DCSTAT_SLEEP_MS * em->update_every;
- while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
-
- read_global_table();
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-/**
* Cachestat sum PIDs
*
* Sum values for all PIDs associated to a group
@@ -1034,25 +979,22 @@ void ebpf_dc_send_cgroup_data(int update_every)
*/
static void dcstat_collector(ebpf_module_t *em)
{
- dcstat_threads.thread = mallocz(sizeof(netdata_thread_t));
- dcstat_threads.start_routine = ebpf_dcstat_read_hash;
-
- netdata_thread_create(dcstat_threads.thread, dcstat_threads.name, NETDATA_THREAD_OPTION_DEFAULT,
- ebpf_dcstat_read_hash, em);
-
netdata_publish_dcstat_t publish;
memset(&publish, 0, sizeof(publish));
int cgroups = em->cgroup_charts;
int update_every = em->update_every;
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = update_every * USEC_PER_SEC;
+ int counter = update_every - 1;
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
- if (ebpf_exit_plugin)
- break;
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+
+ if (ebpf_exit_plugin || ++counter != update_every)
+ continue;
+ counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
+ ebpf_dc_read_global_table();
pthread_mutex_lock(&collect_data_mutex);
if (apps)
read_apps_table();
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.h b/collectors/ebpf.plugin/ebpf_dcstat.h
index d8687f968..201fc8a02 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.h
+++ b/collectors/ebpf.plugin/ebpf_dcstat.h
@@ -28,8 +28,6 @@
#define NETDATA_SYSTEMD_DC_NOT_CACHE_CONTEXT "services.dc_not_cache"
#define NETDATA_SYSTEMD_DC_NOT_FOUND_CONTEXT "services.dc_not_found"
-#define NETDATA_LATENCY_DCSTAT_SLEEP_MS 700000ULL
-
enum directory_cache_indexes {
NETDATA_DCSTAT_IDX_RATIO,
NETDATA_DCSTAT_IDX_REFERENCE,
diff --git a/collectors/ebpf.plugin/ebpf_disk.c b/collectors/ebpf.plugin/ebpf_disk.c
index a27bd81e3..5e7e2599d 100644
--- a/collectors/ebpf.plugin/ebpf_disk.c
+++ b/collectors/ebpf.plugin/ebpf_disk.c
@@ -33,16 +33,6 @@ static netdata_syscall_stat_t disk_aggregated_data[NETDATA_EBPF_HIST_MAX_BINS];
static netdata_publish_syscall_t disk_publish_aggregated[NETDATA_EBPF_HIST_MAX_BINS];
static netdata_idx_t *disk_hash_values = NULL;
-static struct netdata_static_thread disk_threads = {
- .name = "DISK KERNEL",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
-};
ebpf_publish_disk_t *plot_disks = NULL;
pthread_mutex_t plot_mutex;
@@ -439,11 +429,7 @@ static void ebpf_cleanup_disk_list()
static void ebpf_disk_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
+ em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_disk_disable_tracepoints();
@@ -452,7 +438,6 @@ static void ebpf_disk_free(ebpf_module_t *em)
ebpf_histogram_dimension_cleanup(dimensions, NETDATA_EBPF_HIST_MAX_BINS);
freez(disk_hash_values);
- freez(disk_threads.thread);
pthread_mutex_destroy(&plot_mutex);
ebpf_cleanup_plot_disks();
@@ -473,20 +458,6 @@ static void ebpf_disk_free(ebpf_module_t *em)
static void ebpf_disk_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
- netdata_thread_cancel(*disk_threads.thread);
- ebpf_disk_free(em);
-}
-
-/**
- * Disk Cleanup
- *
- * Clean up allocated memory.
- *
- * @param ptr thread data.
- */
-static void ebpf_disk_cleanup(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
ebpf_disk_free(em);
}
@@ -592,35 +563,6 @@ static void read_hard_disk_tables(int table)
}
/**
- * Disk read hash
- *
- * This is the thread callback.
- * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket.
- *
- * @param ptr It is a NULL value for this thread.
- *
- * @return It always returns NULL.
- */
-void *ebpf_disk_read_hash(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_disk_cleanup, ptr);
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- usec_t step = NETDATA_LATENCY_DISK_SLEEP_MS * em->update_every;
- while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
-
- read_hard_disk_tables(disk_maps[NETDATA_DISK_READ].map_fd);
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-/**
* Obsolete Hard Disk charts
*
* Make Hard disk charts and fill chart name
@@ -743,21 +685,19 @@ static void ebpf_latency_send_hd_data(int update_every)
static void disk_collector(ebpf_module_t *em)
{
disk_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
- disk_threads.thread = mallocz(sizeof(netdata_thread_t));
- disk_threads.start_routine = ebpf_disk_read_hash;
-
- netdata_thread_create(disk_threads.thread, disk_threads.name, NETDATA_THREAD_OPTION_DEFAULT,
- ebpf_disk_read_hash, em);
int update_every = em->update_every;
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = update_every * USEC_PER_SEC;
+ int counter = update_every - 1;
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
- if (ebpf_exit_plugin)
- break;
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+
+ if (ebpf_exit_plugin || ++counter != update_every)
+ continue;
+ counter = 0;
+ read_hard_disk_tables(disk_maps[NETDATA_DISK_READ].map_fd);
pthread_mutex_lock(&lock);
ebpf_remove_pointer_from_plot_disk(em);
ebpf_latency_send_hd_data(update_every);
diff --git a/collectors/ebpf.plugin/ebpf_disk.h b/collectors/ebpf.plugin/ebpf_disk.h
index c14b887f8..c606d6594 100644
--- a/collectors/ebpf.plugin/ebpf_disk.h
+++ b/collectors/ebpf.plugin/ebpf_disk.h
@@ -11,8 +11,6 @@
#define NETDATA_EBPF_PROC_PARTITIONS "/proc/partitions"
-#define NETDATA_LATENCY_DISK_SLEEP_MS 650000ULL
-
// Process configuration name
#define NETDATA_DISK_CONFIG_FILE "disk.conf"
diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c
index 30b7f22ce..79537066c 100644
--- a/collectors/ebpf.plugin/ebpf_fd.c
+++ b/collectors/ebpf.plugin/ebpf_fd.c
@@ -6,6 +6,9 @@
static char *fd_dimension_names[NETDATA_FD_SYSCALL_END] = { "open", "close" };
static char *fd_id_names[NETDATA_FD_SYSCALL_END] = { "do_sys_open", "__close_fd" };
+static char *close_targets[NETDATA_EBPF_MAX_FD_TARGETS] = {"close_fd", "__close_fd"};
+static char *open_targets[NETDATA_EBPF_MAX_FD_TARGETS] = {"do_sys_openat2", "do_sys_open"};
+
static netdata_syscall_stat_t fd_aggregated_data[NETDATA_FD_SYSCALL_END];
static netdata_publish_syscall_t fd_publish_aggregated[NETDATA_FD_SYSCALL_END];
@@ -29,15 +32,6 @@ struct config fd_config = { .first_section = NULL, .last_section = NULL, .mutex
.index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
-struct netdata_static_thread fd_thread = {"FD KERNEL",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL};
-
static netdata_idx_t fd_hash_values[NETDATA_FD_COUNTER];
static netdata_idx_t *fd_values = NULL;
@@ -65,7 +59,7 @@ static inline void ebpf_fd_disable_probes(struct fd_bpf *obj)
bpf_program__set_autoload(obj->progs.netdata_sys_open_kprobe, false);
bpf_program__set_autoload(obj->progs.netdata_sys_open_kretprobe, false);
bpf_program__set_autoload(obj->progs.netdata_release_task_fd_kprobe, false);
- if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_11) {
+ if (!strcmp(fd_targets[NETDATA_FD_SYSCALL_CLOSE].name, close_targets[NETDATA_FD_CLOSE_FD])) {
bpf_program__set_autoload(obj->progs.netdata___close_fd_kretprobe, false);
bpf_program__set_autoload(obj->progs.netdata___close_fd_kprobe, false);
bpf_program__set_autoload(obj->progs.netdata_close_fd_kprobe, false);
@@ -85,7 +79,7 @@ static inline void ebpf_fd_disable_probes(struct fd_bpf *obj)
*/
static inline void ebpf_disable_specific_probes(struct fd_bpf *obj)
{
- if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_11) {
+ if (!strcmp(fd_targets[NETDATA_FD_SYSCALL_CLOSE].name, close_targets[NETDATA_FD_CLOSE_FD])) {
bpf_program__set_autoload(obj->progs.netdata___close_fd_kretprobe, false);
bpf_program__set_autoload(obj->progs.netdata___close_fd_kprobe, false);
} else {
@@ -121,7 +115,7 @@ static inline void ebpf_disable_trampoline(struct fd_bpf *obj)
*/
static inline void ebpf_disable_specific_trampoline(struct fd_bpf *obj)
{
- if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_11) {
+ if (!strcmp(fd_targets[NETDATA_FD_SYSCALL_CLOSE].name, close_targets[NETDATA_FD_CLOSE_FD])) {
bpf_program__set_autoload(obj->progs.netdata___close_fd_fentry, false);
bpf_program__set_autoload(obj->progs.netdata___close_fd_fexit, false);
} else {
@@ -143,7 +137,7 @@ static void ebpf_set_trampoline_target(struct fd_bpf *obj)
bpf_program__set_attach_target(obj->progs.netdata_sys_open_fexit, 0, fd_targets[NETDATA_FD_SYSCALL_OPEN].name);
bpf_program__set_attach_target(obj->progs.netdata_release_task_fd_fentry, 0, EBPF_COMMON_FNCT_CLEAN_UP);
- if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_11) {
+ if (!strcmp(fd_targets[NETDATA_FD_SYSCALL_CLOSE].name, close_targets[NETDATA_FD_CLOSE_FD])) {
bpf_program__set_attach_target(
obj->progs.netdata_close_fd_fentry, 0, fd_targets[NETDATA_FD_SYSCALL_CLOSE].name);
bpf_program__set_attach_target(obj->progs.netdata_close_fd_fexit, 0, fd_targets[NETDATA_FD_SYSCALL_CLOSE].name);
@@ -185,7 +179,7 @@ static int ebpf_fd_attach_probe(struct fd_bpf *obj)
if (ret)
return -1;
- if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_11) {
+ if (!strcmp(fd_targets[NETDATA_FD_SYSCALL_CLOSE].name, close_targets[NETDATA_FD_CLOSE_FD])) {
obj->links.netdata_close_fd_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_close_fd_kretprobe, true,
fd_targets[NETDATA_FD_SYSCALL_CLOSE].name);
ret = libbpf_get_error(obj->links.netdata_close_fd_kretprobe);
@@ -217,24 +211,50 @@ static int ebpf_fd_attach_probe(struct fd_bpf *obj)
}
/**
- * Set target values
+ * FD Fill Address
*
- * Set pointers used to laod data.
+ * Fill address value used to load probes/trampoline.
*/
-static void ebpf_fd_set_target_values()
+static inline void ebpf_fd_fill_address(ebpf_addresses_t *address, char **targets)
{
- static char *close_targets[] = {"close_fd", "__close_fd"};
- static char *open_targets[] = {"do_sys_openat2", "do_sys_open"};
- if (running_on_kernel >= NETDATA_EBPF_KERNEL_5_11) {
- fd_targets[NETDATA_FD_SYSCALL_OPEN].name = open_targets[0];
- fd_targets[NETDATA_FD_SYSCALL_CLOSE].name = close_targets[0];
- } else {
- fd_targets[NETDATA_FD_SYSCALL_OPEN].name = open_targets[1];
- fd_targets[NETDATA_FD_SYSCALL_CLOSE].name = close_targets[1];
+ int i;
+ for (i = 0; i < NETDATA_EBPF_MAX_FD_TARGETS; i++) {
+ address->function = targets[i];
+ ebpf_load_addresses(address, -1);
+ if (address->addr)
+ break;
}
}
/**
+ * Set target values
+ *
+ * Set pointers used to load data.
+ *
+ * @return It returns 0 on success and -1 otherwise.
+ */
+static int ebpf_fd_set_target_values()
+{
+ ebpf_addresses_t address = {.function = NULL, .hash = 0, .addr = 0};
+ ebpf_fd_fill_address(&address, close_targets);
+
+ if (!address.addr)
+ return -1;
+
+ fd_targets[NETDATA_FD_SYSCALL_CLOSE].name = address.function;
+
+ address.addr = 0;
+ ebpf_fd_fill_address(&address, open_targets);
+
+ if (!address.addr)
+ return -1;
+
+ fd_targets[NETDATA_FD_SYSCALL_OPEN].name = address.function;
+
+ return 0;
+}
+
+/**
* Set hash tables
*
* Set the values for maps according the value given by kernel.
@@ -283,14 +303,18 @@ static void ebpf_fd_disable_release_task(struct fd_bpf *obj)
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*
- * @return it returns 0 on succes and -1 otherwise
+ * @return it returns 0 on success and -1 otherwise
*/
static inline int ebpf_fd_load_and_attach(struct fd_bpf *obj, ebpf_module_t *em)
{
netdata_ebpf_targets_t *mt = em->targets;
netdata_ebpf_program_loaded_t test = mt[NETDATA_FD_SYSCALL_OPEN].mode;
- ebpf_fd_set_target_values();
+ if (ebpf_fd_set_target_values()) {
+ error("%s file descriptor.", NETDATA_EBPF_DEFAULT_FNT_NOT_FOUND);
+ return -1;
+ }
+
if (test == EBPF_LOAD_TRAMPOLINE) {
ebpf_fd_disable_probes(obj);
ebpf_disable_specific_trampoline(obj);
@@ -340,15 +364,10 @@ static inline int ebpf_fd_load_and_attach(struct fd_bpf *obj, ebpf_module_t *em)
static void ebpf_fd_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
+ em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_cleanup_publish_syscall(fd_publish_aggregated);
- freez(fd_thread.thread);
freez(fd_values);
freez(fd_vector);
@@ -372,18 +391,6 @@ static void ebpf_fd_free(ebpf_module_t *em)
static void ebpf_fd_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
- netdata_thread_cancel(*fd_thread.thread);
- ebpf_fd_free(em);
-}
-
-/**
- * Clean up the main thread.
- *
- * @param ptr thread data.
- */
-static void ebpf_fd_cleanup(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
ebpf_fd_free(em);
}
@@ -420,7 +427,7 @@ static void ebpf_fd_send_data(ebpf_module_t *em)
*
* Read the table with number of calls for all functions
*/
-static void read_global_table()
+static void ebpf_fd_read_global_table()
{
uint32_t idx;
netdata_idx_t *val = fd_hash_values;
@@ -441,34 +448,6 @@ static void read_global_table()
}
/**
- * File descriptor read hash
- *
- * This is the thread callback.
- * This thread is necessary, because we cannot freeze the whole plugin to read the data.
- *
- * @param ptr It is a NULL value for this thread.
- *
- * @return It always returns NULL.
- */
-void *ebpf_fd_read_hash(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_fd_cleanup, ptr);
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- usec_t step = NETDATA_FD_SLEEP_MS * em->update_every;
- while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
-
- read_global_table();
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-/**
* Apps Accumulator
*
* Sum all values read from kernel and store in the first address.
@@ -942,22 +921,20 @@ static void ebpf_fd_send_cgroup_data(ebpf_module_t *em)
*/
static void fd_collector(ebpf_module_t *em)
{
- fd_thread.thread = mallocz(sizeof(netdata_thread_t));
- fd_thread.start_routine = ebpf_fd_read_hash;
-
- netdata_thread_create(fd_thread.thread, fd_thread.name, NETDATA_THREAD_OPTION_DEFAULT,
- ebpf_fd_read_hash, em);
-
int cgroups = em->cgroup_charts;
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = em->update_every * USEC_PER_SEC;
+ int update_every = em->update_every;
+ int counter = update_every - 1;
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
- if (ebpf_exit_plugin)
- break;
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+
+ if (ebpf_exit_plugin || ++counter != update_every)
+ continue;
+ counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
+ ebpf_fd_read_global_table();
pthread_mutex_lock(&collect_data_mutex);
if (apps)
read_apps_table();
diff --git a/collectors/ebpf.plugin/ebpf_fd.h b/collectors/ebpf.plugin/ebpf_fd.h
index 914a34b98..e6545d79c 100644
--- a/collectors/ebpf.plugin/ebpf_fd.h
+++ b/collectors/ebpf.plugin/ebpf_fd.h
@@ -6,8 +6,6 @@
// Module name
#define NETDATA_EBPF_MODULE_NAME_FD "filedescriptor"
-#define NETDATA_FD_SLEEP_MS 850000ULL
-
// Menu group
#define NETDATA_FILE_GROUP "file_access"
@@ -36,9 +34,6 @@
#define NETDATA_SYSTEMD_FD_CLOSE_ERR_CONTEXT "services.fd_close_error"
typedef struct netdata_fd_stat {
- uint64_t pid_tgid; // Unique identifier
- uint32_t pid; // Process ID
-
uint32_t open_call; // Open syscalls (open and openat)
uint32_t close_call; // Close syscall (close)
@@ -74,6 +69,14 @@ enum fd_syscalls {
NETDATA_FD_SYSCALL_END
};
+enum fd_close_syscall {
+ NETDATA_FD_CLOSE_FD,
+ NETDATA_FD___CLOSE_FD,
+
+ NETDATA_FD_CLOSE_END
+};
+
+#define NETDATA_EBPF_MAX_FD_TARGETS 2
void *ebpf_fd_thread(void *ptr);
void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr);
diff --git a/collectors/ebpf.plugin/ebpf_filesystem.c b/collectors/ebpf.plugin/ebpf_filesystem.c
index 7dbec7410..5250ed8af 100644
--- a/collectors/ebpf.plugin/ebpf_filesystem.c
+++ b/collectors/ebpf.plugin/ebpf_filesystem.c
@@ -30,17 +30,6 @@ static ebpf_local_maps_t fs_maps[] = {{.name = "tbl_ext4", .internal_input = NET
.type = NETDATA_EBPF_MAP_CONTROLLER,
.map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
-struct netdata_static_thread filesystem_threads = {
- .name = "EBPF FS READ",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
-};
-
static netdata_syscall_stat_t filesystem_aggregated_data[NETDATA_EBPF_HIST_MAX_BINS];
static netdata_publish_syscall_t filesystem_publish_aggregated[NETDATA_EBPF_HIST_MAX_BINS];
@@ -337,14 +326,9 @@ void ebpf_filesystem_cleanup_ebpf_data()
static void ebpf_filesystem_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
+ em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
- freez(filesystem_threads.thread);
ebpf_cleanup_publish_syscall(filesystem_publish_aggregated);
ebpf_filesystem_cleanup_ebpf_data();
@@ -367,20 +351,6 @@ static void ebpf_filesystem_free(ebpf_module_t *em)
static void ebpf_filesystem_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
- netdata_thread_cancel(*filesystem_threads.thread);
- ebpf_filesystem_free(em);
-}
-
-/**
- * File system cleanup
- *
- * Clean up allocated thread.
- *
- * @param ptr thread data.
- */
-static void ebpf_filesystem_cleanup(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
ebpf_filesystem_free(em);
}
@@ -483,30 +453,16 @@ static void read_filesystem_tables()
*
* @return It always returns NULL.
*/
-void *ebpf_filesystem_read_hash(void *ptr)
+void ebpf_filesystem_read_hash(ebpf_module_t *em)
{
- netdata_thread_cleanup_push(ebpf_filesystem_cleanup, ptr);
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- heartbeat_t hb;
- heartbeat_init(&hb);
- usec_t step = NETDATA_FILESYSTEM_READ_SLEEP_MS * em->update_every;
- int update_every = em->update_every;
- while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
+ ebpf_obsolete_fs_charts(em->update_every);
- (void) ebpf_update_partitions(em);
- ebpf_obsolete_fs_charts(update_every);
+ (void) ebpf_update_partitions(em);
- // No more partitions, it is not necessary to read tables
- if (em->optional)
- continue;
-
- read_filesystem_tables();
- }
+ if (em->optional)
+ return;
- netdata_thread_cleanup_pop(1);
- return NULL;
+ read_filesystem_tables();
}
/**
@@ -543,21 +499,18 @@ static void ebpf_histogram_send_data()
*/
static void filesystem_collector(ebpf_module_t *em)
{
- filesystem_threads.thread = mallocz(sizeof(netdata_thread_t));
- filesystem_threads.start_routine = ebpf_filesystem_read_hash;
-
- netdata_thread_create(filesystem_threads.thread, filesystem_threads.name,
- NETDATA_THREAD_OPTION_DEFAULT, ebpf_filesystem_read_hash, em);
-
int update_every = em->update_every;
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = update_every * USEC_PER_SEC;
+ int counter = update_every - 1;
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
- if (ebpf_exit_plugin)
- break;
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+
+ if (ebpf_exit_plugin || ++counter != update_every)
+ continue;
+ counter = 0;
+ ebpf_filesystem_read_hash(em);
pthread_mutex_lock(&lock);
ebpf_create_fs_charts(update_every);
diff --git a/collectors/ebpf.plugin/ebpf_filesystem.h b/collectors/ebpf.plugin/ebpf_filesystem.h
index 0d558df7d..cf19b253e 100644
--- a/collectors/ebpf.plugin/ebpf_filesystem.h
+++ b/collectors/ebpf.plugin/ebpf_filesystem.h
@@ -11,7 +11,6 @@
#define NETDATA_FS_MAX_DIST_NAME 64UL
#define NETDATA_FILESYSTEM_CONFIG_NAME "filesystem"
-#define NETDATA_FILESYSTEM_READ_SLEEP_MS 600000ULL
// Process configuration name
#define NETDATA_FILESYSTEM_CONFIG_FILE "filesystem.conf"
diff --git a/collectors/ebpf.plugin/ebpf_hardirq.c b/collectors/ebpf.plugin/ebpf_hardirq.c
index b07dd24ca..20c4b9d05 100644
--- a/collectors/ebpf.plugin/ebpf_hardirq.c
+++ b/collectors/ebpf.plugin/ebpf_hardirq.c
@@ -135,17 +135,6 @@ static hardirq_ebpf_val_t *hardirq_ebpf_vals = NULL;
// tmp store for static hard IRQ values we get from a per-CPU eBPF map.
static hardirq_ebpf_static_val_t *hardirq_ebpf_static_vals = NULL;
-static struct netdata_static_thread hardirq_threads = {
- .name = "HARDIRQ KERNEL",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
-};
-
/**
* Hardirq Free
*
@@ -156,21 +145,18 @@ static struct netdata_static_thread hardirq_threads = {
static void ebpf_hardirq_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
+ em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
- freez(hardirq_threads.thread);
for (int i = 0; hardirq_tracepoints[i].class != NULL; i++) {
ebpf_disable_tracepoint(&hardirq_tracepoints[i]);
}
freez(hardirq_ebpf_vals);
freez(hardirq_ebpf_static_vals);
+ pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
}
/**
@@ -183,20 +169,6 @@ static void ebpf_hardirq_free(ebpf_module_t *em)
static void hardirq_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
- netdata_thread_cancel(*hardirq_threads.thread);
- ebpf_hardirq_free(em);
-}
-
-/**
- * Hardirq clean up
- *
- * Clean up allocated memory.
- *
- * @param ptr thread data.
- */
-static void hardirq_cleanup(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
ebpf_hardirq_free(em);
}
@@ -331,24 +303,10 @@ static void hardirq_read_latency_static_map(int mapfd)
/**
* Read eBPF maps for hard IRQ.
*/
-static void *hardirq_reader(void *ptr)
+static void hardirq_reader()
{
- netdata_thread_cleanup_push(hardirq_cleanup, ptr);
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- usec_t step = NETDATA_HARDIRQ_SLEEP_MS * em->update_every;
- while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
-
- hardirq_read_latency_map(hardirq_maps[HARDIRQ_MAP_LATENCY].map_fd);
- hardirq_read_latency_static_map(hardirq_maps[HARDIRQ_MAP_LATENCY_STATIC].map_fd);
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
+ hardirq_read_latency_map(hardirq_maps[HARDIRQ_MAP_LATENCY].map_fd);
+ hardirq_read_latency_static_map(hardirq_maps[HARDIRQ_MAP_LATENCY_STATIC].map_fd);
}
static void hardirq_create_charts(int update_every)
@@ -428,17 +386,6 @@ static void hardirq_collector(ebpf_module_t *em)
avl_init_lock(&hardirq_pub, hardirq_val_cmp);
- // create reader thread.
- hardirq_threads.thread = mallocz(sizeof(netdata_thread_t));
- hardirq_threads.start_routine = hardirq_reader;
- netdata_thread_create(
- hardirq_threads.thread,
- hardirq_threads.name,
- NETDATA_THREAD_OPTION_DEFAULT,
- hardirq_reader,
- em
- );
-
// create chart and static dims.
pthread_mutex_lock(&lock);
hardirq_create_charts(em->update_every);
@@ -449,13 +396,17 @@ static void hardirq_collector(ebpf_module_t *em)
// loop and read from published data until ebpf plugin is closed.
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = em->update_every * USEC_PER_SEC;
+ int update_every = em->update_every;
+ int counter = update_every - 1;
//This will be cancelled by its parent
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
- if (ebpf_exit_plugin)
- break;
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+
+ if (ebpf_exit_plugin || ++counter != update_every)
+ continue;
+ counter = 0;
+ hardirq_reader();
pthread_mutex_lock(&lock);
// write dims now for all hitherto discovered IRQs.
diff --git a/collectors/ebpf.plugin/ebpf_hardirq.h b/collectors/ebpf.plugin/ebpf_hardirq.h
index 381da57d8..fe38b1bb1 100644
--- a/collectors/ebpf.plugin/ebpf_hardirq.h
+++ b/collectors/ebpf.plugin/ebpf_hardirq.h
@@ -47,7 +47,6 @@ typedef struct hardirq_ebpf_static_val {
*****************************************************************/
#define NETDATA_EBPF_MODULE_NAME_HARDIRQ "hardirq"
-#define NETDATA_HARDIRQ_SLEEP_MS 650000ULL
#define NETDATA_HARDIRQ_CONFIG_FILE "hardirq.conf"
typedef struct hardirq_val {
diff --git a/collectors/ebpf.plugin/ebpf_mdflush.c b/collectors/ebpf.plugin/ebpf_mdflush.c
index dc805da23..1a5a7731e 100644
--- a/collectors/ebpf.plugin/ebpf_mdflush.c
+++ b/collectors/ebpf.plugin/ebpf_mdflush.c
@@ -35,17 +35,6 @@ static avl_tree_lock mdflush_pub;
// tmp store for mdflush values we get from a per-CPU eBPF map.
static mdflush_ebpf_val_t *mdflush_ebpf_vals = NULL;
-static struct netdata_static_thread mdflush_threads = {
- .name = "MDFLUSH KERNEL",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
-};
-
/**
* MDflush Free
*
@@ -55,18 +44,10 @@ static struct netdata_static_thread mdflush_threads = {
*/
static void ebpf_mdflush_free(ebpf_module_t *em)
{
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
freez(mdflush_ebpf_vals);
- freez(mdflush_threads.thread);
-
+ pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
}
/**
@@ -83,20 +64,6 @@ static void mdflush_exit(void *ptr)
}
/**
- * CLeanup
- *
- * Clean allocated memory.
- *
- * @param ptr thread data.
- */
-static void mdflush_cleanup(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- netdata_thread_cancel(*mdflush_threads.thread);
- ebpf_mdflush_free(em);
-}
-
-/**
* Compare mdflush values.
*
* @param a `netdata_mdflush_t *`.
@@ -188,28 +155,6 @@ static void mdflush_read_count_map()
}
}
-/**
- * Read eBPF maps for mdflush.
- */
-static void *mdflush_reader(void *ptr)
-{
- netdata_thread_cleanup_push(mdflush_cleanup, ptr);
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- usec_t step = NETDATA_MDFLUSH_SLEEP_MS * em->update_every;
- while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
-
- mdflush_read_count_map();
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
static void mdflush_create_charts(int update_every)
{
ebpf_create_chart(
@@ -256,34 +201,27 @@ static void mdflush_collector(ebpf_module_t *em)
{
mdflush_ebpf_vals = callocz(ebpf_nprocs, sizeof(mdflush_ebpf_val_t));
+ int update_every = em->update_every;
avl_init_lock(&mdflush_pub, mdflush_val_cmp);
- // create reader thread.
- mdflush_threads.thread = mallocz(sizeof(netdata_thread_t));
- mdflush_threads.start_routine = mdflush_reader;
- netdata_thread_create(
- mdflush_threads.thread,
- mdflush_threads.name,
- NETDATA_THREAD_OPTION_DEFAULT,
- mdflush_reader,
- em
- );
-
// create chart and static dims.
pthread_mutex_lock(&lock);
- mdflush_create_charts(em->update_every);
+ mdflush_create_charts(update_every);
ebpf_update_stats(&plugin_statistics, em);
pthread_mutex_unlock(&lock);
// loop and read from published data until ebpf plugin is closed.
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = em->update_every * USEC_PER_SEC;
+ int counter = update_every - 1;
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
- if (ebpf_exit_plugin)
- break;
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+ if (ebpf_exit_plugin || ++counter != update_every)
+ continue;
+
+ counter = 0;
+ mdflush_read_count_map();
// write dims now for all hitherto discovered devices.
write_begin_chart("mdstat", "mdstat_flush");
avl_traverse_lock(&mdflush_pub, mdflush_write_dims, NULL);
diff --git a/collectors/ebpf.plugin/ebpf_mdflush.h b/collectors/ebpf.plugin/ebpf_mdflush.h
index b04eefd28..4913ad019 100644
--- a/collectors/ebpf.plugin/ebpf_mdflush.h
+++ b/collectors/ebpf.plugin/ebpf_mdflush.h
@@ -6,8 +6,6 @@
// Module name
#define NETDATA_EBPF_MODULE_NAME_MDFLUSH "mdflush"
-#define NETDATA_MDFLUSH_SLEEP_MS 850000ULL
-
// charts
#define NETDATA_MDFLUSH_GLOBAL_CHART "mdflush"
diff --git a/collectors/ebpf.plugin/ebpf_mount.c b/collectors/ebpf.plugin/ebpf_mount.c
index ec1f07a65..e06010b5b 100644
--- a/collectors/ebpf.plugin/ebpf_mount.c
+++ b/collectors/ebpf.plugin/ebpf_mount.c
@@ -22,17 +22,6 @@ static netdata_idx_t *mount_values = NULL;
static netdata_idx_t mount_hash_values[NETDATA_MOUNT_END];
-struct netdata_static_thread mount_thread = {
- .name = "MOUNT KERNEL",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
-};
-
netdata_ebpf_targets_t mount_targets[] = { {.name = "mount", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = "umount", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
@@ -187,7 +176,7 @@ static void ebpf_mount_set_hash_tables(struct mount_bpf *obj)
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*
- * @return it returns 0 on succes and -1 otherwise
+ * @return it returns 0 on success and -1 otherwise
*/
static inline int ebpf_mount_load_and_attach(struct mount_bpf *obj, ebpf_module_t *em)
{
@@ -239,14 +228,9 @@ static inline int ebpf_mount_load_and_attach(struct mount_bpf *obj, ebpf_module_
static void ebpf_mount_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
+ em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
- freez(mount_thread.thread);
freez(mount_values);
#ifdef LIBBPF_MAJOR_VERSION
@@ -269,20 +253,6 @@ static void ebpf_mount_free(ebpf_module_t *em)
static void ebpf_mount_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
- netdata_thread_cancel(*mount_thread.thread);
- ebpf_mount_free(em);
-}
-
-/**
- * Mount cleanup
- *
- * Clean up allocated memory.
- *
- * @param ptr thread data.
- */
-static void ebpf_mount_cleanup(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
ebpf_mount_free(em);
}
@@ -297,7 +267,7 @@ static void ebpf_mount_cleanup(void *ptr)
*
* Read the table with number of calls for all functions
*/
-static void read_global_table()
+static void ebpf_mount_read_global_table()
{
uint32_t idx;
netdata_idx_t *val = mount_hash_values;
@@ -318,36 +288,6 @@ static void read_global_table()
}
/**
- * Mount read hash
- *
- * This is the thread callback.
- * This thread is necessary, because we cannot freeze the whole plugin to read the data.
- *
- * @param ptr It is a NULL value for this thread.
- *
- * @return It always returns NULL.
- */
-void *ebpf_mount_read_hash(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_mount_cleanup, ptr);
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- usec_t step = NETDATA_LATENCY_MOUNT_SLEEP_MS * em->update_every;
- //This will be cancelled by its parent
- while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
-
- read_global_table();
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-/**
* Send data to Netdata calling auxiliary functions.
*/
static void ebpf_mount_send_data()
@@ -371,23 +311,20 @@ static void ebpf_mount_send_data()
*/
static void mount_collector(ebpf_module_t *em)
{
- mount_thread.thread = mallocz(sizeof(netdata_thread_t));
- mount_thread.start_routine = ebpf_mount_read_hash;
- memset(mount_hash_values, 0, sizeof(mount_hash_values));
-
mount_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
-
- netdata_thread_create(mount_thread.thread, mount_thread.name, NETDATA_THREAD_OPTION_DEFAULT,
- ebpf_mount_read_hash, em);
+ memset(mount_hash_values, 0, sizeof(mount_hash_values));
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = em->update_every * USEC_PER_SEC;
+ int update_every = em->update_every;
+ int counter = update_every - 1;
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
- if (ebpf_exit_plugin)
- break;
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+ if (ebpf_exit_plugin || ++counter != update_every)
+ continue;
+ counter = 0;
+ ebpf_mount_read_global_table();
pthread_mutex_lock(&lock);
ebpf_mount_send_data();
diff --git a/collectors/ebpf.plugin/ebpf_mount.h b/collectors/ebpf.plugin/ebpf_mount.h
index 5a8d11a59..11b21f832 100644
--- a/collectors/ebpf.plugin/ebpf_mount.h
+++ b/collectors/ebpf.plugin/ebpf_mount.h
@@ -8,8 +8,6 @@
#define NETDATA_EBPF_MOUNT_SYSCALL 2
-#define NETDATA_LATENCY_MOUNT_SLEEP_MS 700000ULL
-
#define NETDATA_EBPF_MOUNT_CALLS "call"
#define NETDATA_EBPF_MOUNT_ERRORS "error"
#define NETDATA_EBPF_MOUNT_FAMILY "mount (eBPF)"
diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c
index d93e4159e..82420d54e 100644
--- a/collectors/ebpf.plugin/ebpf_oomkill.c
+++ b/collectors/ebpf.plugin/ebpf_oomkill.c
@@ -46,7 +46,9 @@ static netdata_publish_syscall_t oomkill_publish_aggregated = {.name = "oomkill"
static void oomkill_cleanup(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
+ pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
}
static void oomkill_write_data(int32_t *keys, uint32_t total)
@@ -294,12 +296,13 @@ static void oomkill_collector(ebpf_module_t *em)
// loop and read until ebpf plugin is closed.
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = update_every * USEC_PER_SEC;
+ int counter = update_every - 1;
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
- if (ebpf_exit_plugin)
- break;
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+ if (!ebpf_exit_plugin || ++counter != update_every)
+ continue;
+ counter = 0;
pthread_mutex_lock(&collect_data_mutex);
pthread_mutex_lock(&lock);
diff --git a/collectors/ebpf.plugin/ebpf_oomkill.h b/collectors/ebpf.plugin/ebpf_oomkill.h
index 786086384..f921f9d87 100644
--- a/collectors/ebpf.plugin/ebpf_oomkill.h
+++ b/collectors/ebpf.plugin/ebpf_oomkill.h
@@ -17,7 +17,6 @@ typedef uint8_t oomkill_ebpf_val_t;
*****************************************************************/
#define NETDATA_EBPF_MODULE_NAME_OOMKILL "oomkill"
-#define NETDATA_OOMKILL_SLEEP_MS 650000ULL
#define NETDATA_OOMKILL_CONFIG_FILE "oomkill.conf"
#define NETDATA_OOMKILL_CHART "oomkills"
diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c
index 682577da7..9a191d391 100644
--- a/collectors/ebpf.plugin/ebpf_process.c
+++ b/collectors/ebpf.plugin/ebpf_process.c
@@ -57,17 +57,6 @@ struct config process_config = { .first_section = NULL,
static char *threads_stat[NETDATA_EBPF_THREAD_STAT_END] = {"total", "running"};
static char *load_event_stat[NETDATA_EBPF_LOAD_STAT_END] = {"legacy", "co-re"};
-static struct netdata_static_thread cgroup_thread = {
- .name = "EBPF CGROUP",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
-};
-
/*****************************************************************
*
* PROCESS DATA AND SEND TO NETDATA
@@ -327,55 +316,6 @@ static void ebpf_process_update_apps_data()
}
/**
- * Cgroup Exit
- *
- * Function used with netdata_thread_clean_push
- *
- * @param ptr unused argument
- */
-static void ebpf_cgroup_exit(void *ptr)
-{
- UNUSED(ptr);
-}
-
-/**
- * Cgroup update shm
- *
- * This is the thread callback.
- * This thread is necessary, because we cannot freeze the whole plugin to read the data from shared memory.
- *
- * @param ptr It is a NULL value for this thread.
- *
- * @return It always returns NULL.
- */
-void *ebpf_cgroup_update_shm(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_cgroup_exit, ptr);
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- usec_t step = 3 * USEC_PER_SEC;
- int counter = NETDATA_EBPF_CGROUP_UPDATE - 1;
- //This will be cancelled by its parent
- while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
-
- // We are using a small heartbeat time to wake up thread,
- // but we should not update so frequently the shared memory data
- if (++counter >= NETDATA_EBPF_CGROUP_UPDATE) {
- counter = 0;
- if (!shm_ebpf_cgroup.header)
- ebpf_map_cgroup_shared_memory();
-
- ebpf_parse_cgroup_shm_data();
- }
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-/**
* Update cgroup
*
* Update cgroup data based in
@@ -745,7 +685,6 @@ static void ebpf_process_exit(void *ptr)
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
- pthread_cancel(*cgroup_thread.thread);
}
/*****************************************************************
@@ -1104,13 +1043,6 @@ void ebpf_send_statistic_data()
*/
static void process_collector(ebpf_module_t *em)
{
- // Start cgroup integration before other threads
- cgroup_thread.thread = mallocz(sizeof(netdata_thread_t));
- cgroup_thread.start_routine = ebpf_cgroup_update_shm;
-
- netdata_thread_create(cgroup_thread.thread, cgroup_thread.name, NETDATA_THREAD_OPTION_DEFAULT,
- ebpf_cgroup_update_shm, NULL);
-
heartbeat_t hb;
heartbeat_init(&hb);
int publish_global = em->global_charts;
@@ -1152,7 +1084,7 @@ static void process_collector(ebpf_module_t *em)
ebpf_process_update_apps_data();
}
- if (cgroups) {
+ if (cgroups && shm_ebpf_cgroup.header) {
ebpf_update_process_cgroup();
}
}
@@ -1169,7 +1101,7 @@ static void process_collector(ebpf_module_t *em)
ebpf_process_send_apps_data(apps_groups_root_target, em);
}
- if (cgroups) {
+ if (cgroups && shm_ebpf_cgroup.header) {
ebpf_process_send_cgroup_data(em);
}
}
diff --git a/collectors/ebpf.plugin/ebpf_process.h b/collectors/ebpf.plugin/ebpf_process.h
index 43df34d48..6fded16fc 100644
--- a/collectors/ebpf.plugin/ebpf_process.h
+++ b/collectors/ebpf.plugin/ebpf_process.h
@@ -39,7 +39,7 @@
#define NETDATA_SYSTEMD_PROCESS_EXIT_CONTEXT "services.task_exit"
#define NETDATA_SYSTEMD_PROCESS_ERROR_CONTEXT "services.task_error"
-#define NETDATA_EBPF_CGROUP_UPDATE 10
+#define NETDATA_EBPF_CGROUP_UPDATE 30
// Statistical information
enum netdata_ebpf_thread_stats{
diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c
index f81287d82..4057eff7f 100644
--- a/collectors/ebpf.plugin/ebpf_shm.c
+++ b/collectors/ebpf.plugin/ebpf_shm.c
@@ -34,17 +34,6 @@ static ebpf_local_maps_t shm_maps[] = {{.name = "tbl_pid_shm", .internal_input =
.map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
{.name = NULL, .internal_input = 0, .user_input = 0}};
-struct netdata_static_thread shm_threads = {
- .name = "SHM KERNEL",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
-};
-
netdata_ebpf_targets_t shm_targets[] = { {.name = "shmget", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = "shmat", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = "shmdt", .mode = EBPF_LOAD_TRAMPOLINE},
@@ -246,7 +235,7 @@ static void ebpf_shm_adjust_map_size(struct shm_bpf *obj, ebpf_module_t *em)
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*
- * @return it returns 0 on succes and -1 otherwise
+ * @return it returns 0 on success and -1 otherwise
*/
static inline int ebpf_shm_load_and_attach(struct shm_bpf *obj, ebpf_module_t *em)
{
@@ -299,11 +288,7 @@ static inline int ebpf_shm_load_and_attach(struct shm_bpf *obj, ebpf_module_t *e
static void ebpf_shm_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
+ em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_cleanup_publish_syscall(shm_publish_aggregated);
@@ -316,7 +301,9 @@ static void ebpf_shm_free(ebpf_module_t *em)
shm_bpf__destroy(bpf_obj);
#endif
+ pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
}
/**
@@ -329,20 +316,6 @@ static void ebpf_shm_free(ebpf_module_t *em)
static void ebpf_shm_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
- netdata_thread_cancel(*shm_threads.thread);
- ebpf_shm_free(em);
-}
-
-/**
- * SHM Cleanup
- *
- * Clean up allocated memory.
- *
- * @param ptr thread data.
- */
-static void ebpf_shm_cleanup(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
ebpf_shm_free(em);
}
@@ -491,7 +464,7 @@ static void shm_send_global()
*
* Read the table with number of calls for all functions
*/
-static void read_global_table()
+static void ebpf_shm_read_global_table()
{
netdata_idx_t *stored = shm_values;
netdata_idx_t *val = shm_hash_values;
@@ -512,30 +485,6 @@ static void read_global_table()
}
/**
- * Shared memory reader thread.
- *
- * @param ptr It is a NULL value for this thread.
- * @return It always returns NULL.
- */
-void *ebpf_shm_read_hash(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_shm_cleanup, ptr);
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- usec_t step = NETDATA_SHM_SLEEP_MS * em->update_every;
- while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
-
- read_global_table();
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-/**
* Sum values for all targets.
*/
static void ebpf_shm_sum_pids(netdata_publish_shm_t *shm, struct pid_on_target *root)
@@ -894,28 +843,19 @@ void ebpf_shm_send_cgroup_data(int update_every)
*/
static void shm_collector(ebpf_module_t *em)
{
- shm_threads.thread = mallocz(sizeof(netdata_thread_t));
- shm_threads.start_routine = ebpf_shm_read_hash;
-
- netdata_thread_create(
- shm_threads.thread,
- shm_threads.name,
- NETDATA_THREAD_OPTION_DEFAULT,
- ebpf_shm_read_hash,
- em
- );
-
int cgroups = em->cgroup_charts;
int update_every = em->update_every;
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = update_every * USEC_PER_SEC;
+ int counter = update_every - 1;
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
- if (ebpf_exit_plugin)
- break;
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+ if (ebpf_exit_plugin || ++counter != update_every)
+ continue;
+ counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
+ ebpf_shm_read_global_table();
pthread_mutex_lock(&collect_data_mutex);
if (apps) {
read_apps_table();
diff --git a/collectors/ebpf.plugin/ebpf_shm.h b/collectors/ebpf.plugin/ebpf_shm.h
index 4e068819b..b06a4a5d1 100644
--- a/collectors/ebpf.plugin/ebpf_shm.h
+++ b/collectors/ebpf.plugin/ebpf_shm.h
@@ -6,8 +6,6 @@
// Module name
#define NETDATA_EBPF_MODULE_NAME_SHM "shm"
-#define NETDATA_SHM_SLEEP_MS 850000ULL
-
// charts
#define NETDATA_SHM_GLOBAL_CHART "shared_memory_calls"
#define NETDATA_SHMGET_CHART "shmget_call"
diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c
index 3a023e4a4..1954be714 100644
--- a/collectors/ebpf.plugin/ebpf_socket.c
+++ b/collectors/ebpf.plugin/ebpf_socket.c
@@ -62,8 +62,6 @@ ebpf_socket_publish_apps_t **socket_bandwidth_curr = NULL;
static ebpf_bandwidth_t *bandwidth_vector = NULL;
pthread_mutex_t nv_mutex;
-int wait_to_plot = 0;
-
netdata_vector_plot_t inbound_vectors = { .plot = NULL, .next = 0, .last = 0 };
netdata_vector_plot_t outbound_vectors = { .plot = NULL, .next = 0, .last = 0 };
netdata_socket_t *socket_values;
@@ -389,7 +387,7 @@ static void ebpf_socket_adjust_map_size(struct socket_bpf *obj, ebpf_module_t *e
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*
- * @return it returns 0 on succes and -1 otherwise
+ * @return it returns 0 on success and -1 otherwise
*/
static inline int ebpf_socket_load_and_attach(struct socket_bpf *obj, ebpf_module_t *em)
{
@@ -459,6 +457,9 @@ static inline void clean_internal_socket_plot(netdata_socket_plot_t *ptr)
*/
static void clean_allocated_socket_plot()
{
+ if (!network_viewer_opt.enabled)
+ return;
+
uint32_t i;
uint32_t end = inbound_vectors.last;
netdata_socket_plot_t *plot = inbound_vectors.plot;
@@ -647,7 +648,8 @@ static void ebpf_socket_free(ebpf_module_t *em )
static void ebpf_socket_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
- netdata_thread_cancel(*socket_threads.thread);
+ if (socket_threads.thread)
+ netdata_thread_cancel(*socket_threads.thread);
ebpf_socket_free(em);
}
@@ -724,7 +726,7 @@ static void ebpf_update_global_publish(
*/
static inline void update_nv_plot_data(netdata_plot_values_t *plot, netdata_socket_t *sock)
{
- if (sock->ct > plot->last_time) {
+ if (sock->ct != plot->last_time) {
plot->last_time = sock->ct;
plot->plot_recv_packets = sock->recv_packets;
plot->plot_sent_packets = sock->sent_packets;
@@ -747,6 +749,7 @@ static inline void update_nv_plot_data(netdata_plot_values_t *plot, netdata_sock
*/
static inline void calculate_nv_plot()
{
+ pthread_mutex_lock(&nv_mutex);
uint32_t i;
uint32_t end = inbound_vectors.next;
for (i = 0; i < end; i++) {
@@ -764,9 +767,12 @@ static inline void calculate_nv_plot()
}
outbound_vectors.max_plot = end;
+ /*
// The 'Other' dimension is always calculated for the chart to have at least one dimension
update_nv_plot_data(&outbound_vectors.plot[outbound_vectors.last].plot,
&outbound_vectors.plot[outbound_vectors.last].sock);
+ */
+ pthread_mutex_unlock(&nv_mutex);
}
/**
@@ -1441,17 +1447,17 @@ static void ebpf_socket_create_nv_charts(netdata_vector_plot_t *ptr, int update_
*
* @return It returns 1 if the IP is inside the range and 0 otherwise
*/
-static int is_specific_ip_inside_range(union netdata_ip_t *cmp, int family)
+static int ebpf_is_specific_ip_inside_range(union netdata_ip_t *cmp, int family)
{
if (!network_viewer_opt.excluded_ips && !network_viewer_opt.included_ips)
return 1;
- uint32_t ipv4_test = ntohl(cmp->addr32[0]);
+ uint32_t ipv4_test = htonl(cmp->addr32[0]);
ebpf_network_viewer_ip_list_t *move = network_viewer_opt.excluded_ips;
while (move) {
if (family == AF_INET) {
- if (ntohl(move->first.addr32[0]) <= ipv4_test &&
- ipv4_test <= ntohl(move->last.addr32[0]) )
+ if (move->first.addr32[0] <= ipv4_test &&
+ ipv4_test <= move->last.addr32[0])
return 0;
} else {
if (memcmp(move->first.addr8, cmp->addr8, sizeof(union netdata_ip_t)) <= 0 &&
@@ -1464,12 +1470,13 @@ static int is_specific_ip_inside_range(union netdata_ip_t *cmp, int family)
move = network_viewer_opt.included_ips;
while (move) {
- if (family == AF_INET) {
- if (ntohl(move->first.addr32[0]) <= ipv4_test &&
- ntohl(move->last.addr32[0]) >= ipv4_test)
+ if (family == AF_INET && move->ver == AF_INET) {
+ if (move->first.addr32[0] <= ipv4_test &&
+ move->last.addr32[0] >= ipv4_test)
return 1;
} else {
- if (memcmp(move->first.addr8, cmp->addr8, sizeof(union netdata_ip_t)) <= 0 &&
+ if (move->ver == AF_INET6 &&
+ memcmp(move->first.addr8, cmp->addr8, sizeof(union netdata_ip_t)) <= 0 &&
memcmp(move->last.addr8, cmp->addr8, sizeof(union netdata_ip_t)) >= 0) {
return 1;
}
@@ -1565,7 +1572,7 @@ int is_socket_allowed(netdata_socket_idx_t *key, int family)
if (!is_port_inside_range(key->dport))
return 0;
- return is_specific_ip_inside_range(&key->daddr, family);
+ return ebpf_is_specific_ip_inside_range(&key->daddr, family);
}
/**
@@ -1580,38 +1587,26 @@ int is_socket_allowed(netdata_socket_idx_t *key, int family)
*
* @return It returns 0 case the values are equal, 1 case a is bigger than b and -1 case a is smaller than b.
*/
-static int compare_sockets(void *a, void *b)
+static int ebpf_compare_sockets(void *a, void *b)
{
struct netdata_socket_plot *val1 = a;
struct netdata_socket_plot *val2 = b;
- int cmp;
+ int cmp = 0;
// We do not need to compare val2 family, because data inside hash table is always from the same family
if (val1->family == AF_INET) { //IPV4
- if (val1->flags & NETDATA_INBOUND_DIRECTION) {
- if (val1->index.sport == val2->index.sport)
- cmp = 0;
- else {
- cmp = (val1->index.sport > val2->index.sport)?1:-1;
- }
- } else {
+ if (network_viewer_opt.included_port || network_viewer_opt.excluded_port)
cmp = memcmp(&val1->index.dport, &val2->index.dport, sizeof(uint16_t));
- if (!cmp) {
- cmp = memcmp(&val1->index.daddr.addr32[0], &val2->index.daddr.addr32[0], sizeof(uint32_t));
- }
+
+ if (!cmp) {
+ cmp = memcmp(&val1->index.daddr.addr32[0], &val2->index.daddr.addr32[0], sizeof(uint32_t));
}
} else {
- if (val1->flags & NETDATA_INBOUND_DIRECTION) {
- if (val1->index.sport == val2->index.sport)
- cmp = 0;
- else {
- cmp = (val1->index.sport > val2->index.sport)?1:-1;
- }
- } else {
+ if (network_viewer_opt.included_port || network_viewer_opt.excluded_port)
cmp = memcmp(&val1->index.dport, &val2->index.dport, sizeof(uint16_t));
- if (!cmp) {
- cmp = memcmp(&val1->index.daddr.addr32, &val2->index.daddr.addr32, 4*sizeof(uint32_t));
- }
+
+ if (!cmp) {
+ cmp = memcmp(&val1->index.daddr.addr32, &val2->index.daddr.addr32, 4*sizeof(uint32_t));
}
}
@@ -1631,12 +1626,15 @@ static int compare_sockets(void *a, void *b)
*
* @return it returns the size of the data copied on success and -1 otherwise.
*/
-static inline int build_outbound_dimension_name(char *dimname, char *hostname, char *service_name,
- char *proto, int family)
+static inline int ebpf_build_outbound_dimension_name(char *dimname, char *hostname, char *service_name,
+ char *proto, int family)
{
- return snprintf(dimname, CONFIG_MAX_NAME - 7, (family == AF_INET)?"%s:%s:%s_":"%s:%s:[%s]_",
- service_name, proto,
- hostname);
+ if (network_viewer_opt.included_port || network_viewer_opt.excluded_port)
+ return snprintf(dimname, CONFIG_MAX_NAME - 7, (family == AF_INET)?"%s:%s:%s_":"%s:%s:[%s]_",
+ service_name, proto, hostname);
+
+ return snprintf(dimname, CONFIG_MAX_NAME - 7, (family == AF_INET)?"%s:%s_":"%s:[%s]_",
+ proto, hostname);
}
/**
@@ -1692,7 +1690,7 @@ static inline void fill_resolved_name(netdata_socket_plot_t *ptr, char *hostname
}
if (is_outbound)
- size = build_outbound_dimension_name(dimname, hostname, service_name, protocol, ptr->family);
+ size = ebpf_build_outbound_dimension_name(dimname, hostname, service_name, protocol, ptr->family);
else
size = build_inbound_dimension_name(dimname,service_name, protocol);
@@ -1850,14 +1848,12 @@ static void fill_last_nv_dimension(netdata_socket_plot_t *ptr, int is_outbound)
*/
static inline void update_socket_data(netdata_socket_t *sock, netdata_socket_t *lvalues)
{
- sock->recv_packets += lvalues->recv_packets;
- sock->sent_packets += lvalues->sent_packets;
- sock->recv_bytes += lvalues->recv_bytes;
- sock->sent_bytes += lvalues->sent_bytes;
- sock->retransmit += lvalues->retransmit;
-
- if (lvalues->ct > sock->ct)
- sock->ct = lvalues->ct;
+ sock->recv_packets = lvalues->recv_packets;
+ sock->sent_packets = lvalues->sent_packets;
+ sock->recv_bytes = lvalues->recv_bytes;
+ sock->sent_bytes = lvalues->sent_bytes;
+ sock->retransmit = lvalues->retransmit;
+ sock->ct = lvalues->ct;
}
/**
@@ -1881,7 +1877,7 @@ static void store_socket_inside_avl(netdata_vector_plot_t *out, netdata_socket_t
ret = (netdata_socket_plot_t *) avl_search_lock(&out->tree, (avl_t *)&test);
if (ret) {
- if (lvalues->ct > ret->plot.last_time) {
+ if (lvalues->ct != ret->plot.last_time) {
update_socket_data(&ret->sock, lvalues);
}
} else {
@@ -1892,7 +1888,7 @@ static void store_socket_inside_avl(netdata_vector_plot_t *out, netdata_socket_t
int resolved;
if (curr == last) {
- if (lvalues->ct > w->plot.last_time) {
+ if (lvalues->ct != w->plot.last_time) {
update_socket_data(&w->sock, lvalues);
}
return;
@@ -1977,6 +1973,9 @@ netdata_vector_plot_t * select_vector_to_store(uint32_t *direction, netdata_sock
*/
static void hash_accumulator(netdata_socket_t *values, netdata_socket_idx_t *key, int family, int end)
{
+ if (!network_viewer_opt.enabled || !is_socket_allowed(key, family))
+ return;
+
uint64_t bsent = 0, brecv = 0, psent = 0, precv = 0;
uint16_t retransmit = 0;
int i;
@@ -1994,7 +1993,7 @@ static void hash_accumulator(netdata_socket_t *values, netdata_socket_idx_t *key
if (!protocol)
protocol = w->protocol;
- if (w->ct > ct)
+ if (w->ct != ct)
ct = w->ct;
}
@@ -2006,11 +2005,9 @@ static void hash_accumulator(netdata_socket_t *values, netdata_socket_idx_t *key
values[0].protocol = (!protocol)?IPPROTO_TCP:protocol;
values[0].ct = ct;
- if (is_socket_allowed(key, family)) {
- uint32_t dir;
- netdata_vector_plot_t *table = select_vector_to_store(&dir, key, protocol);
- store_socket_inside_avl(table, &values[0], key, family, dir);
- }
+ uint32_t dir;
+ netdata_vector_plot_t *table = select_vector_to_store(&dir, key, protocol);
+ store_socket_inside_avl(table, &values[0], key, family, dir);
}
/**
@@ -2018,16 +2015,13 @@ static void hash_accumulator(netdata_socket_t *values, netdata_socket_idx_t *key
*
* Read data from hash tables created on kernel ring.
*
- * @param fd the hash table with data.
- * @param family the family associated to the hash table
+ * @param fd the hash table with data.
+ * @param family the family associated to the hash table
*
* @return it returns 0 on success and -1 otherwise.
*/
-static void read_socket_hash_table(int fd, int family, int network_connection)
+static void ebpf_read_socket_hash_table(int fd, int family)
{
- if (wait_to_plot)
- return;
-
netdata_socket_idx_t key = {};
netdata_socket_idx_t next_key = {};
@@ -2046,9 +2040,7 @@ static void read_socket_hash_table(int fd, int family, int network_connection)
continue;
}
- if (network_connection) {
- hash_accumulator(values, &key, family, end);
- }
+ hash_accumulator(values, &key, family, end);
key = next_key;
}
@@ -2057,12 +2049,12 @@ static void read_socket_hash_table(int fd, int family, int network_connection)
/**
* Fill Network Viewer Port list
*
- * Fill the strcture with values read from /proc or hash table.
+ * Fill the structure with values read from /proc or hash table.
*
* @param out the structure where we will store data.
* @param value the ports we are listen to.
* @param proto the protocol used for this connection.
- * @param in the strcuture with values read form different sources.
+ * @param in the structure with values read form different sources.
*/
static inline void fill_nv_port_list(ebpf_network_viewer_port_list_t *out, uint16_t value, uint16_t proto,
netdata_passive_connection_t *in)
@@ -2081,7 +2073,7 @@ static inline void fill_nv_port_list(ebpf_network_viewer_port_list_t *out, uint1
*
* @param value the ports we are listen to.
* @param proto the protocol used with port connection.
- * @param in the strcuture with values read form different sources.
+ * @param in the structure with values read form different sources.
*/
void update_listen_table(uint16_t value, uint16_t proto, netdata_passive_connection_t *in)
{
@@ -2159,22 +2151,19 @@ static void read_listen_table()
void *ebpf_socket_read_hash(void *ptr)
{
netdata_thread_cleanup_push(ebpf_socket_cleanup, ptr);
- ebpf_module_t *em = (ebpf_module_t *)ptr;
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = NETDATA_SOCKET_READ_SLEEP_MS * em->update_every;
int fd_ipv4 = socket_maps[NETDATA_SOCKET_TABLE_IPV4].map_fd;
int fd_ipv6 = socket_maps[NETDATA_SOCKET_TABLE_IPV6].map_fd;
- int network_connection = em->optional;
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+ if (ebpf_exit_plugin)
+ continue;
pthread_mutex_lock(&nv_mutex);
- read_listen_table();
- read_socket_hash_table(fd_ipv4, AF_INET, network_connection);
- read_socket_hash_table(fd_ipv6, AF_INET6, network_connection);
- wait_to_plot = 1;
+ ebpf_read_socket_hash_table(fd_ipv4, AF_INET);
+ ebpf_read_socket_hash_table(fd_ipv6, AF_INET6);
pthread_mutex_unlock(&nv_mutex);
}
@@ -2863,44 +2852,50 @@ static void ebpf_socket_send_cgroup_data(int update_every)
/**
* Main loop for this collector.
*
- * @param step the number of microseconds used with heart beat
* @param em the structure with thread information
*/
-static void socket_collector(usec_t step, ebpf_module_t *em)
+static void socket_collector(ebpf_module_t *em)
{
heartbeat_t hb;
heartbeat_init(&hb);
+ uint32_t network_connection = network_viewer_opt.enabled;
- socket_threads.thread = mallocz(sizeof(netdata_thread_t));
- socket_threads.start_routine = ebpf_socket_read_hash;
+ if (network_connection) {
+ socket_threads.thread = mallocz(sizeof(netdata_thread_t));
+ socket_threads.start_routine = ebpf_socket_read_hash;
- netdata_thread_create(socket_threads.thread, socket_threads.name,
- NETDATA_THREAD_OPTION_DEFAULT, ebpf_socket_read_hash, em);
+ netdata_thread_create(socket_threads.thread, socket_threads.name,
+ NETDATA_THREAD_OPTION_DEFAULT, ebpf_socket_read_hash, em);
+ }
int cgroups = em->cgroup_charts;
if (cgroups)
ebpf_socket_update_cgroup_algorithm();
int socket_global_enabled = em->global_charts;
- int network_connection = em->optional;
int update_every = em->update_every;
+ int counter = update_every - 1;
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
- if (ebpf_exit_plugin)
- break;
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+ if (ebpf_exit_plugin || ++counter != update_every)
+ continue;
+ counter = 0;
netdata_apps_integration_flags_t socket_apps_enabled = em->apps_charts;
- pthread_mutex_lock(&collect_data_mutex);
- if (socket_global_enabled)
+ if (socket_global_enabled) {
+ read_listen_table();
read_hash_global_tables();
+ }
+ pthread_mutex_lock(&collect_data_mutex);
if (socket_apps_enabled)
ebpf_socket_update_apps_data();
if (cgroups)
ebpf_update_socket_cgroup();
- calculate_nv_plot();
+ if (network_connection)
+ calculate_nv_plot();
pthread_mutex_lock(&lock);
if (socket_global_enabled)
@@ -2925,7 +2920,6 @@ static void socket_collector(usec_t step, ebpf_module_t *em)
ebpf_socket_create_nv_charts(&outbound_vectors, update_every);
fflush(stdout);
ebpf_socket_send_nv_data(&outbound_vectors);
- wait_to_plot = 0;
pthread_mutex_unlock(&nv_mutex);
}
@@ -2959,8 +2953,10 @@ static void ebpf_socket_allocate_global_vectors(int apps)
bandwidth_vector = callocz((size_t)ebpf_nprocs, sizeof(ebpf_bandwidth_t));
socket_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_socket_t));
- inbound_vectors.plot = callocz(network_viewer_opt.max_dim, sizeof(netdata_socket_plot_t));
- outbound_vectors.plot = callocz(network_viewer_opt.max_dim, sizeof(netdata_socket_plot_t));
+ if (network_viewer_opt.enabled) {
+ inbound_vectors.plot = callocz(network_viewer_opt.max_dim, sizeof(netdata_socket_plot_t));
+ outbound_vectors.plot = callocz(network_viewer_opt.max_dim, sizeof(netdata_socket_plot_t));
+ }
}
/**
@@ -3219,12 +3215,11 @@ static void get_ipv6_first_addr(union netdata_ip_t *out, union netdata_ip_t *in,
*
* @return It returns 1 if the IP is inside the range and 0 otherwise
*/
-static int is_ip_inside_range(union netdata_ip_t *rfirst, union netdata_ip_t *rlast,
- union netdata_ip_t *cmpfirst, union netdata_ip_t *cmplast, int family)
+static int ebpf_is_ip_inside_range(union netdata_ip_t *rfirst, union netdata_ip_t *rlast,
+ union netdata_ip_t *cmpfirst, union netdata_ip_t *cmplast, int family)
{
if (family == AF_INET) {
- if (ntohl(rfirst->addr32[0]) <= ntohl(cmpfirst->addr32[0]) &&
- ntohl(rlast->addr32[0]) >= ntohl(cmplast->addr32[0]))
+ if ((rfirst->addr32[0] <= cmpfirst->addr32[0]) && (rlast->addr32[0] >= cmplast->addr32[0]))
return 1;
} else {
if (memcmp(rfirst->addr8, cmpfirst->addr8, sizeof(union netdata_ip_t)) <= 0 &&
@@ -3241,16 +3236,22 @@ static int is_ip_inside_range(union netdata_ip_t *rfirst, union netdata_ip_t *rl
*
* @param out a pointer to the link list.
* @param in the structure that will be linked.
+ * @param table the modified table.
*/
-void fill_ip_list(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table)
+void ebpf_fill_ip_list(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table)
{
#ifndef NETDATA_INTERNAL_CHECKS
UNUSED(table);
#endif
+ if (in->ver == AF_INET) { // It is simpler to compare using host order
+ in->first.addr32[0] = ntohl(in->first.addr32[0]);
+ in->last.addr32[0] = ntohl(in->last.addr32[0]);
+ }
if (likely(*out)) {
ebpf_network_viewer_ip_list_t *move = *out, *store = *out;
while (move) {
- if (in->ver == move->ver && is_ip_inside_range(&move->first, &move->last, &in->first, &in->last, in->ver)) {
+ if (in->ver == move->ver &&
+ ebpf_is_ip_inside_range(&move->first, &move->last, &in->first, &in->last, in->ver)) {
info("The range/value (%s) is inside the range/value (%s) already inserted, it will be ignored.",
in->value, move->value);
freez(in->value);
@@ -3267,14 +3268,12 @@ void fill_ip_list(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_li
}
#ifdef NETDATA_INTERNAL_CHECKS
- char first[512], last[512];
+ char first[256], last[512];
if (in->ver == AF_INET) {
- if (inet_ntop(AF_INET, in->first.addr8, first, INET_ADDRSTRLEN) &&
- inet_ntop(AF_INET, in->last.addr8, last, INET_ADDRSTRLEN))
- info("Adding values %s - %s to %s IP list \"%s\" used on network viewer",
- first, last,
- (*out == network_viewer_opt.included_ips)?"included":"excluded",
- table);
+ info("Adding values %s: (%u - %u) to %s IP list \"%s\" used on network viewer",
+ in->value, in->first.addr32[0], in->last.addr32[0],
+ (*out == network_viewer_opt.included_ips)?"included":"excluded",
+ table);
} else {
if (inet_ntop(AF_INET6, in->first.addr8, first, INET6_ADDRSTRLEN) &&
inet_ntop(AF_INET6, in->last.addr8, last, INET6_ADDRSTRLEN))
@@ -3294,7 +3293,7 @@ void fill_ip_list(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_li
* @param out a pointer to store the link list
* @param ip the value given as parameter
*/
-static void parse_ip_list(void **out, char *ip)
+static void ebpf_parse_ip_list(void **out, char *ip)
{
ebpf_network_viewer_ip_list_t **list = (ebpf_network_viewer_ip_list_t **)out;
@@ -3442,7 +3441,7 @@ static void parse_ip_list(void **out, char *ip)
ebpf_network_viewer_ip_list_t *store;
- storethisip:
+storethisip:
store = callocz(1, sizeof(ebpf_network_viewer_ip_list_t));
store->value = ipdup;
store->hash = simple_hash(ipdup);
@@ -3450,7 +3449,7 @@ static void parse_ip_list(void **out, char *ip)
memcpy(store->first.addr8, first.addr8, sizeof(first.addr8));
memcpy(store->last.addr8, last.addr8, sizeof(last.addr8));
- fill_ip_list(list, store, "socket");
+ ebpf_fill_ip_list(list, store, "socket");
return;
cleanipdup:
@@ -3464,7 +3463,7 @@ cleanipdup:
*
* @param ptr is a pointer with the text to parse.
*/
-static void parse_ips(char *ptr)
+static void ebpf_parse_ips(char *ptr)
{
// No value
if (unlikely(!ptr))
@@ -3491,8 +3490,9 @@ static void parse_ips(char *ptr)
}
if (isascii(*ptr)) { // Parse port
- parse_ip_list((!neg)?(void **)&network_viewer_opt.included_ips:(void **)&network_viewer_opt.excluded_ips,
- ptr);
+ ebpf_parse_ip_list((!neg)?(void **)&network_viewer_opt.included_ips:
+ (void **)&network_viewer_opt.excluded_ips,
+ ptr);
}
ptr = end;
@@ -3762,7 +3762,7 @@ void parse_network_viewer_section(struct config *cfg)
value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION,
"ips", "!127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7 !::1/128");
- parse_ips(value);
+ ebpf_parse_ips(value);
}
/**
@@ -3916,16 +3916,9 @@ void *ebpf_socket_thread(void *ptr)
{
netdata_thread_cleanup_push(ebpf_socket_exit, ptr);
- memset(&inbound_vectors.tree, 0, sizeof(avl_tree_lock));
- memset(&outbound_vectors.tree, 0, sizeof(avl_tree_lock));
- avl_init_lock(&inbound_vectors.tree, compare_sockets);
- avl_init_lock(&outbound_vectors.tree, compare_sockets);
-
ebpf_module_t *em = (ebpf_module_t *)ptr;
em->maps = socket_maps;
- parse_network_viewer_section(&socket_config);
- parse_service_name_section(&socket_config);
parse_table_size_options(&socket_config);
if (pthread_mutex_init(&nv_mutex, NULL)) {
@@ -3935,7 +3928,15 @@ void *ebpf_socket_thread(void *ptr)
}
ebpf_socket_allocate_global_vectors(em->apps_charts);
- initialize_inbound_outbound();
+
+ if (network_viewer_opt.enabled) {
+ memset(&inbound_vectors.tree, 0, sizeof(avl_tree_lock));
+ memset(&outbound_vectors.tree, 0, sizeof(avl_tree_lock));
+ avl_init_lock(&inbound_vectors.tree, ebpf_compare_sockets);
+ avl_init_lock(&outbound_vectors.tree, ebpf_compare_sockets);
+
+ initialize_inbound_outbound();
+ }
if (running_on_kernel < NETDATA_EBPF_KERNEL_5_0)
em->mode = MODE_ENTRY;
@@ -3966,7 +3967,7 @@ void *ebpf_socket_thread(void *ptr)
pthread_mutex_unlock(&lock);
- socket_collector((usec_t)(em->update_every * USEC_PER_SEC), em);
+ socket_collector(em);
endsocket:
ebpf_update_disabled_plugin_stats(em);
diff --git a/collectors/ebpf.plugin/ebpf_socket.h b/collectors/ebpf.plugin/ebpf_socket.h
index ca6b193f0..63b1e107b 100644
--- a/collectors/ebpf.plugin/ebpf_socket.h
+++ b/collectors/ebpf.plugin/ebpf_socket.h
@@ -244,6 +244,7 @@ typedef struct ebpf_network_viewer_hostname_list {
#define NETDATA_NV_CAP_VALUE 50L
typedef struct ebpf_network_viewer_options {
+ uint32_t enabled;
uint32_t max_dim; // Store value read from 'maximum dimensions'
uint32_t hostname_resolution_enabled;
@@ -360,7 +361,7 @@ void clean_port_structure(ebpf_network_viewer_port_list_t **clean);
extern ebpf_network_viewer_port_list_t *listen_ports;
void update_listen_table(uint16_t value, uint16_t proto, netdata_passive_connection_t *values);
void parse_network_viewer_section(struct config *cfg);
-void fill_ip_list(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table);
+void ebpf_fill_ip_list(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in, char *table);
void parse_service_name_section(struct config *cfg);
extern ebpf_socket_publish_apps_t **socket_bandwidth_curr;
diff --git a/collectors/ebpf.plugin/ebpf_softirq.c b/collectors/ebpf.plugin/ebpf_softirq.c
index 3b5d15921..49e9c3051 100644
--- a/collectors/ebpf.plugin/ebpf_softirq.c
+++ b/collectors/ebpf.plugin/ebpf_softirq.c
@@ -54,17 +54,6 @@ static softirq_val_t softirq_vals[] = {
// tmp store for soft IRQ values we get from a per-CPU eBPF map.
static softirq_ebpf_val_t *softirq_ebpf_vals = NULL;
-static struct netdata_static_thread softirq_threads = {
- .name = "SOFTIRQ KERNEL",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
-};
-
/**
* Cachestat Free
*
@@ -75,40 +64,20 @@ static struct netdata_static_thread softirq_threads = {
static void ebpf_softirq_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
+ em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
- freez(softirq_threads.thread);
-
for (int i = 0; softirq_tracepoints[i].class != NULL; i++) {
ebpf_disable_tracepoint(&softirq_tracepoints[i]);
}
freez(softirq_ebpf_vals);
pthread_mutex_lock(&ebpf_exit_cleanup);
- em->thread->enabled = NETDATA_MAIN_THREAD_EXITED;
+ em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
pthread_mutex_unlock(&ebpf_exit_cleanup);
}
/**
- * Exit
- *
- * Cancel thread.
- *
- * @param ptr thread data.
- */
-static void softirq_exit(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- netdata_thread_cancel(*softirq_threads.thread);
- ebpf_softirq_free(em);
-}
-
-/**
* Cleanup
*
* Clean up allocated memory.
@@ -146,28 +115,6 @@ static void softirq_read_latency_map()
}
}
-/**
- * Read eBPF maps for soft IRQ.
- */
-static void *softirq_reader(void *ptr)
-{
- netdata_thread_cleanup_push(softirq_exit, ptr);
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- usec_t step = NETDATA_SOFTIRQ_SLEEP_MS * em->update_every;
- while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
-
- softirq_read_latency_map();
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
static void softirq_create_charts(int update_every)
{
ebpf_create_chart(
@@ -212,17 +159,6 @@ static void softirq_collector(ebpf_module_t *em)
{
softirq_ebpf_vals = callocz(ebpf_nprocs, sizeof(softirq_ebpf_val_t));
- // create reader thread.
- softirq_threads.thread = mallocz(sizeof(netdata_thread_t));
- softirq_threads.start_routine = softirq_reader;
- netdata_thread_create(
- softirq_threads.thread,
- softirq_threads.name,
- NETDATA_THREAD_OPTION_DEFAULT,
- softirq_reader,
- em
- );
-
// create chart and static dims.
pthread_mutex_lock(&lock);
softirq_create_charts(em->update_every);
@@ -233,13 +169,16 @@ static void softirq_collector(ebpf_module_t *em)
// loop and read from published data until ebpf plugin is closed.
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = em->update_every * USEC_PER_SEC;
+ int update_every = em->update_every;
+ int counter = update_every - 1;
//This will be cancelled by its parent
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
- if (ebpf_exit_plugin)
- break;
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+ if (ebpf_exit_plugin || ++counter != update_every)
+ continue;
+ counter = 0;
+ softirq_read_latency_map();
pthread_mutex_lock(&lock);
// write dims now for all hitherto discovered IRQs.
diff --git a/collectors/ebpf.plugin/ebpf_softirq.h b/collectors/ebpf.plugin/ebpf_softirq.h
index 7dcddbb49..eea2a1841 100644
--- a/collectors/ebpf.plugin/ebpf_softirq.h
+++ b/collectors/ebpf.plugin/ebpf_softirq.h
@@ -20,7 +20,6 @@ typedef struct softirq_ebpf_val {
*****************************************************************/
#define NETDATA_EBPF_MODULE_NAME_SOFTIRQ "softirq"
-#define NETDATA_SOFTIRQ_SLEEP_MS 650000ULL
#define NETDATA_SOFTIRQ_CONFIG_FILE "softirq.conf"
typedef struct sofirq_val {
diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c
index 8199573a9..059efb63b 100644
--- a/collectors/ebpf.plugin/ebpf_swap.c
+++ b/collectors/ebpf.plugin/ebpf_swap.c
@@ -34,17 +34,6 @@ static ebpf_local_maps_t swap_maps[] = {{.name = "tbl_pid_swap", .internal_input
.map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
{.name = NULL, .internal_input = 0, .user_input = 0}};
-struct netdata_static_thread swap_threads = {
- .name = "SWAP KERNEL",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
-};
-
netdata_ebpf_targets_t swap_targets[] = { {.name = "swap_readpage", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = "swap_writepage", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
@@ -184,7 +173,7 @@ static void ebpf_swap_disable_release_task(struct swap_bpf *obj)
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*
- * @return it returns 0 on succes and -1 otherwise
+ * @return it returns 0 on success and -1 otherwise
*/
static inline int ebpf_swap_load_and_attach(struct swap_bpf *obj, ebpf_module_t *em)
{
@@ -236,18 +225,13 @@ static inline int ebpf_swap_load_and_attach(struct swap_bpf *obj, ebpf_module_t
static void ebpf_swap_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
+ em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
ebpf_cleanup_publish_syscall(swap_publish_aggregated);
freez(swap_vector);
freez(swap_values);
- freez(swap_threads.thread);
#ifdef LIBBPF_MAJOR_VERSION
if (bpf_obj)
@@ -268,20 +252,6 @@ static void ebpf_swap_free(ebpf_module_t *em)
static void ebpf_swap_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
- netdata_thread_cancel(*swap_threads.thread);
- ebpf_swap_free(em);
-}
-
-/**
- * Swap cleanup
- *
- * Clean up allocated memory.
- *
- * @param ptr thread data.
- */
-static void ebpf_swap_cleanup(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
ebpf_swap_free(em);
}
@@ -412,7 +382,7 @@ static void swap_send_global()
*
* Read the table with number of calls to all functions
*/
-static void read_global_table()
+static void ebpf_swap_read_global_table()
{
netdata_idx_t *stored = swap_values;
netdata_idx_t *val = swap_hash_values;
@@ -433,33 +403,6 @@ static void read_global_table()
}
/**
- * Swap read hash
- *
- * This is the thread callback.
- *
- * @param ptr It is a NULL value for this thread.
- *
- * @return It always returns NULL.
- */
-void *ebpf_swap_read_hash(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_swap_cleanup, ptr);
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
- usec_t step = NETDATA_SWAP_SLEEP_MS * em->update_every;
- while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
-
- read_global_table();
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-/**
* Sum PIDs
*
* Sum values for all targets.
@@ -714,23 +657,19 @@ void ebpf_swap_send_cgroup_data(int update_every)
*/
static void swap_collector(ebpf_module_t *em)
{
- swap_threads.thread = mallocz(sizeof(netdata_thread_t));
- swap_threads.start_routine = ebpf_swap_read_hash;
-
- netdata_thread_create(swap_threads.thread, swap_threads.name, NETDATA_THREAD_OPTION_DEFAULT,
- ebpf_swap_read_hash, em);
-
int cgroup = em->cgroup_charts;
int update_every = em->update_every;
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = update_every * USEC_PER_SEC;
+ int counter = update_every - 1;
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
- if (ebpf_exit_plugin)
- break;
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+ if (ebpf_exit_plugin || ++counter != update_every)
+ continue;
+ counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
+ ebpf_swap_read_global_table();
pthread_mutex_lock(&collect_data_mutex);
if (apps)
read_apps_table();
diff --git a/collectors/ebpf.plugin/ebpf_sync.c b/collectors/ebpf.plugin/ebpf_sync.c
index 840497533..7c81c1df3 100644
--- a/collectors/ebpf.plugin/ebpf_sync.c
+++ b/collectors/ebpf.plugin/ebpf_sync.c
@@ -10,17 +10,6 @@ static netdata_publish_syscall_t sync_counter_publish_aggregated[NETDATA_SYNC_ID
static netdata_idx_t sync_hash_values[NETDATA_SYNC_IDX_END];
-struct netdata_static_thread sync_threads = {
- .name = "SYNC KERNEL",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
-};
-
static ebpf_local_maps_t sync_maps[] = {{.name = "tbl_sync", .internal_input = NETDATA_SYNC_END,
.user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
.map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
@@ -77,7 +66,7 @@ static inline void ebpf_sync_disable_probe(struct sync_bpf *obj)
}
/**
- * Disable tramppoline
+ * Disable trampoline
*
* Disable trampoline to use another method.
*
@@ -140,7 +129,7 @@ static void ebpf_sync_set_hash_tables(struct sync_bpf *obj, sync_syscalls_index_
* @param target the syscall that we are attaching a tracer.
* @param idx the index for the main structure
*
- * @return it returns 0 on succes and -1 otherwise
+ * @return it returns 0 on success and -1 otherwise
*/
static inline int ebpf_sync_load_and_attach(struct sync_bpf *obj, ebpf_module_t *em, char *target,
sync_syscalls_index_t idx)
@@ -216,17 +205,12 @@ void ebpf_sync_cleanup_objects()
static void ebpf_sync_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
+ em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
#ifdef LIBBPF_MAJOR_VERSION
ebpf_sync_cleanup_objects();
#endif
- freez(sync_threads.thread);
pthread_mutex_lock(&ebpf_exit_cleanup);
em->thread->enabled = NETDATA_THREAD_EBPF_STOPPED;
@@ -243,18 +227,6 @@ static void ebpf_sync_free(ebpf_module_t *em)
static void ebpf_sync_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
- netdata_thread_cancel(*sync_threads.thread);
- ebpf_sync_free(em);
-}
-
-/**
- * Clean up the main thread.
- *
- * @param ptr thread data.
- */
-static void ebpf_sync_cleanup(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
ebpf_sync_free(em);
}
@@ -350,7 +322,7 @@ static int ebpf_sync_initialize_syscall(ebpf_module_t *em)
*
* Read the table with number of calls for all functions
*/
-static void read_global_table()
+static void ebpf_sync_read_global_table()
{
netdata_idx_t stored;
uint32_t idx = NETDATA_SYNC_CALL;
@@ -366,34 +338,6 @@ static void read_global_table()
}
/**
- * Sync read hash
- *
- * This is the thread callback.
- *
- * @param ptr It is a NULL value for this thread.
- *
- * @return It always returns NULL.
- */
-void *ebpf_sync_read_hash(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_sync_cleanup, ptr);
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- heartbeat_t hb;
- heartbeat_init(&hb);
- usec_t step = NETDATA_EBPF_SYNC_SLEEP_MS * em->update_every;
-
- while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
-
- read_global_table();
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-/**
* Create Sync charts
*
* Create charts and dimensions according user input.
@@ -452,20 +396,17 @@ static void sync_send_data()
*/
static void sync_collector(ebpf_module_t *em)
{
- sync_threads.thread = mallocz(sizeof(netdata_thread_t));
- sync_threads.start_routine = ebpf_sync_read_hash;
-
- netdata_thread_create(sync_threads.thread, sync_threads.name, NETDATA_THREAD_OPTION_DEFAULT,
- ebpf_sync_read_hash, em);
-
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = em->update_every * USEC_PER_SEC;
+ int update_every = em->update_every;
+ int counter = update_every - 1;
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
- if (ebpf_exit_plugin)
- break;
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+ if (ebpf_exit_plugin || ++counter != update_every)
+ continue;
+ counter = 0;
+ ebpf_sync_read_global_table();
pthread_mutex_lock(&lock);
sync_send_data();
diff --git a/collectors/ebpf.plugin/ebpf_vfs.c b/collectors/ebpf.plugin/ebpf_vfs.c
index ad6de4a07..b3c0ba45d 100644
--- a/collectors/ebpf.plugin/ebpf_vfs.c
+++ b/collectors/ebpf.plugin/ebpf_vfs.c
@@ -34,17 +34,6 @@ struct config vfs_config = { .first_section = NULL,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
-struct netdata_static_thread vfs_threads = {
- .name = "VFS KERNEL",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
-};
-
netdata_ebpf_targets_t vfs_targets[] = { {.name = "vfs_write", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = "vfs_writev", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = "vfs_read", .mode = EBPF_LOAD_TRAMPOLINE},
@@ -357,7 +346,7 @@ static void ebpf_vfs_disable_release_task(struct vfs_bpf *obj)
* @param obj is the main structure for bpf objects.
* @param em structure with configuration
*
- * @return it returns 0 on succes and -1 otherwise
+ * @return it returns 0 on success and -1 otherwise
*/
static inline int ebpf_vfs_load_and_attach(struct vfs_bpf *obj, ebpf_module_t *em)
{
@@ -409,16 +398,11 @@ static inline int ebpf_vfs_load_and_attach(struct vfs_bpf *obj, ebpf_module_t *e
static void ebpf_vfs_free(ebpf_module_t *em)
{
pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->thread->enabled == NETDATA_THREAD_EBPF_RUNNING) {
- em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
+ em->thread->enabled = NETDATA_THREAD_EBPF_STOPPING;
pthread_mutex_unlock(&ebpf_exit_cleanup);
freez(vfs_hash_values);
freez(vfs_vector);
- freez(vfs_threads.thread);
#ifdef LIBBPF_MAJOR_VERSION
if (bpf_obj)
@@ -440,18 +424,6 @@ static void ebpf_vfs_free(ebpf_module_t *em)
static void ebpf_vfs_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
- netdata_thread_cancel(*vfs_threads.thread);
- ebpf_vfs_free(em);
-}
-
-/**
-* Clean up the main thread.
-*
-* @param ptr thread data.
-**/
-static void ebpf_vfs_cleanup(void *ptr)
-{
- ebpf_module_t *em = (ebpf_module_t *)ptr;
ebpf_vfs_free(em);
}
@@ -518,7 +490,7 @@ static void ebpf_vfs_send_data(ebpf_module_t *em)
/**
* Read the hash table and store data to allocated vectors.
*/
-static void read_global_table()
+static void ebpf_vfs_read_global_table()
{
uint64_t idx;
netdata_idx_t res[NETDATA_VFS_COUNTER];
@@ -874,36 +846,6 @@ static void read_update_vfs_cgroup()
}
/**
- * VFS read hash
- *
- * This is the thread callback.
- * This thread is necessary, because we cannot freeze the whole plugin to read the data.
- *
- * @param ptr It is a NULL value for this thread.
- *
- * @return It always returns NULL.
- */
-void *ebpf_vfs_read_hash(void *ptr)
-{
- netdata_thread_cleanup_push(ebpf_vfs_cleanup, ptr);
- heartbeat_t hb;
- heartbeat_init(&hb);
-
- ebpf_module_t *em = (ebpf_module_t *)ptr;
-
- usec_t step = NETDATA_LATENCY_VFS_SLEEP_MS * em->update_every;
- //This will be cancelled by its parent
- while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
-
- read_global_table();
- }
-
- netdata_thread_cleanup_pop(1);
- return NULL;
-}
-
-/**
* Sum PIDs
*
* Sum values for all targets.
@@ -1525,22 +1467,19 @@ static void ebpf_vfs_send_cgroup_data(ebpf_module_t *em)
*/
static void vfs_collector(ebpf_module_t *em)
{
- vfs_threads.thread = mallocz(sizeof(netdata_thread_t));
- vfs_threads.start_routine = ebpf_vfs_read_hash;
-
- netdata_thread_create(vfs_threads.thread, vfs_threads.name, NETDATA_THREAD_OPTION_DEFAULT,
- ebpf_vfs_read_hash, em);
-
int cgroups = em->cgroup_charts;
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = em->update_every * USEC_PER_SEC;
+ int update_every = em->update_every;
+ int counter = update_every - 1;
while (!ebpf_exit_plugin) {
- (void)heartbeat_next(&hb, step);
- if (ebpf_exit_plugin)
- break;
+ (void)heartbeat_next(&hb, USEC_PER_SEC);
+ if (ebpf_exit_plugin || ++counter != update_every)
+ continue;
+ counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
+ ebpf_vfs_read_global_table();
pthread_mutex_lock(&collect_data_mutex);
if (apps)
ebpf_vfs_read_apps();
diff --git a/collectors/ebpf.plugin/ebpf_vfs.h b/collectors/ebpf.plugin/ebpf_vfs.h
index 2e3c7cc29..d7fc2672f 100644
--- a/collectors/ebpf.plugin/ebpf_vfs.h
+++ b/collectors/ebpf.plugin/ebpf_vfs.h
@@ -8,8 +8,6 @@
#define NETDATA_DIRECTORY_VFS_CONFIG_FILE "vfs.conf"
-#define NETDATA_LATENCY_VFS_SLEEP_MS 750000ULL
-
// Global chart name
#define NETDATA_VFS_FILE_CLEAN_COUNT "vfs_deleted_objects"
#define NETDATA_VFS_FILE_IO_COUNT "vfs_io"