summaryrefslogtreecommitdiffstats
path: root/collectors/ebpf.plugin
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 11:19:16 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-05 12:07:37 +0000
commitb485aab7e71c1625cfc27e0f92c9509f42378458 (patch)
treeae9abe108601079d1679194de237c9a435ae5b55 /collectors/ebpf.plugin
parentAdding upstream version 1.44.3. (diff)
downloadnetdata-b485aab7e71c1625cfc27e0f92c9509f42378458.tar.xz
netdata-b485aab7e71c1625cfc27e0f92c9509f42378458.zip
Adding upstream version 1.45.3+dfsg.upstream/1.45.3+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--collectors/ebpf.plugin/Makefile.am42
-rw-r--r--collectors/ebpf.plugin/README.md1071
-rw-r--r--collectors/ebpf.plugin/ebpf.c4126
-rw-r--r--collectors/ebpf.plugin/ebpf.d/swap.conf34
-rw-r--r--collectors/ebpf.plugin/ebpf.h393
-rw-r--r--collectors/ebpf.plugin/ebpf_cgroup.h71
-rw-r--r--collectors/ebpf.plugin/ebpf_functions.c1093
-rw-r--r--collectors/ebpf.plugin/ebpf_functions.h44
-rw-r--r--collectors/ebpf.plugin/metadata.yaml3320
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d.conf (renamed from collectors/ebpf.plugin/ebpf.d.conf)10
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/cachestat.conf (renamed from collectors/ebpf.plugin/ebpf.d/cachestat.conf)2
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/dcstat.conf (renamed from collectors/ebpf.plugin/ebpf.d/dcstat.conf)2
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/disk.conf (renamed from collectors/ebpf.plugin/ebpf.d/disk.conf)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/ebpf_kernel_reject_list.txt (renamed from collectors/ebpf.plugin/ebpf.d/ebpf_kernel_reject_list.txt)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/fd.conf (renamed from collectors/ebpf.plugin/ebpf.d/fd.conf)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/filesystem.conf (renamed from collectors/ebpf.plugin/ebpf.d/filesystem.conf)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/functions.conf (renamed from collectors/ebpf.plugin/ebpf.d/functions.conf)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/hardirq.conf (renamed from collectors/ebpf.plugin/ebpf.d/hardirq.conf)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/mdflush.conf (renamed from collectors/ebpf.plugin/ebpf.d/mdflush.conf)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/mount.conf (renamed from collectors/ebpf.plugin/ebpf.d/mount.conf)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/network.conf (renamed from collectors/ebpf.plugin/ebpf.d/network.conf)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/oomkill.conf (renamed from collectors/ebpf.plugin/ebpf.d/oomkill.conf)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/process.conf (renamed from collectors/ebpf.plugin/ebpf.d/process.conf)2
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/shm.conf (renamed from collectors/ebpf.plugin/ebpf.d/shm.conf)1
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/softirq.conf (renamed from collectors/ebpf.plugin/ebpf.d/softirq.conf)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/sync.conf (renamed from collectors/ebpf.plugin/ebpf.d/sync.conf)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf.d/vfs.conf (renamed from collectors/ebpf.plugin/ebpf.d/vfs.conf)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf_apps.c (renamed from collectors/ebpf.plugin/ebpf_apps.c)445
-rw-r--r--src/collectors/ebpf.plugin/ebpf_apps.h (renamed from collectors/ebpf.plugin/ebpf_apps.h)110
-rw-r--r--src/collectors/ebpf.plugin/ebpf_cachestat.c (renamed from collectors/ebpf.plugin/ebpf_cachestat.c)481
-rw-r--r--src/collectors/ebpf.plugin/ebpf_cachestat.h (renamed from collectors/ebpf.plugin/ebpf_cachestat.h)20
-rw-r--r--src/collectors/ebpf.plugin/ebpf_cgroup.c (renamed from collectors/ebpf.plugin/ebpf_cgroup.c)25
-rw-r--r--src/collectors/ebpf.plugin/ebpf_dcstat.c (renamed from collectors/ebpf.plugin/ebpf_dcstat.c)585
-rw-r--r--src/collectors/ebpf.plugin/ebpf_dcstat.h (renamed from collectors/ebpf.plugin/ebpf_dcstat.h)16
-rw-r--r--src/collectors/ebpf.plugin/ebpf_disk.c (renamed from collectors/ebpf.plugin/ebpf_disk.c)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf_disk.h (renamed from collectors/ebpf.plugin/ebpf_disk.h)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf_fd.c (renamed from collectors/ebpf.plugin/ebpf_fd.c)466
-rw-r--r--src/collectors/ebpf.plugin/ebpf_fd.h (renamed from collectors/ebpf.plugin/ebpf_fd.h)14
-rw-r--r--src/collectors/ebpf.plugin/ebpf_filesystem.c (renamed from collectors/ebpf.plugin/ebpf_filesystem.c)2
-rw-r--r--src/collectors/ebpf.plugin/ebpf_filesystem.h (renamed from collectors/ebpf.plugin/ebpf_filesystem.h)3
-rw-r--r--src/collectors/ebpf.plugin/ebpf_hardirq.c (renamed from collectors/ebpf.plugin/ebpf_hardirq.c)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf_hardirq.h (renamed from collectors/ebpf.plugin/ebpf_hardirq.h)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf_mdflush.c (renamed from collectors/ebpf.plugin/ebpf_mdflush.c)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf_mdflush.h (renamed from collectors/ebpf.plugin/ebpf_mdflush.h)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf_mount.c (renamed from collectors/ebpf.plugin/ebpf_mount.c)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf_mount.h (renamed from collectors/ebpf.plugin/ebpf_mount.h)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf_oomkill.c (renamed from collectors/ebpf.plugin/ebpf_oomkill.c)71
-rw-r--r--src/collectors/ebpf.plugin/ebpf_oomkill.h (renamed from collectors/ebpf.plugin/ebpf_oomkill.h)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf_process.c (renamed from collectors/ebpf.plugin/ebpf_process.c)335
-rw-r--r--src/collectors/ebpf.plugin/ebpf_process.h (renamed from collectors/ebpf.plugin/ebpf_process.h)11
-rw-r--r--src/collectors/ebpf.plugin/ebpf_shm.c (renamed from collectors/ebpf.plugin/ebpf_shm.c)403
-rw-r--r--src/collectors/ebpf.plugin/ebpf_shm.h (renamed from collectors/ebpf.plugin/ebpf_shm.h)14
-rw-r--r--src/collectors/ebpf.plugin/ebpf_socket.c (renamed from collectors/ebpf.plugin/ebpf_socket.c)515
-rw-r--r--src/collectors/ebpf.plugin/ebpf_socket.h (renamed from collectors/ebpf.plugin/ebpf_socket.h)24
-rw-r--r--src/collectors/ebpf.plugin/ebpf_softirq.c (renamed from collectors/ebpf.plugin/ebpf_softirq.c)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf_softirq.h (renamed from collectors/ebpf.plugin/ebpf_softirq.h)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf_swap.c (renamed from collectors/ebpf.plugin/ebpf_swap.c)365
-rw-r--r--src/collectors/ebpf.plugin/ebpf_swap.h (renamed from collectors/ebpf.plugin/ebpf_swap.h)14
-rw-r--r--src/collectors/ebpf.plugin/ebpf_sync.c (renamed from collectors/ebpf.plugin/ebpf_sync.c)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf_sync.h (renamed from collectors/ebpf.plugin/ebpf_sync.h)4
-rw-r--r--src/collectors/ebpf.plugin/ebpf_unittest.c (renamed from collectors/ebpf.plugin/ebpf_unittest.c)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf_unittest.h (renamed from collectors/ebpf.plugin/ebpf_unittest.h)0
-rw-r--r--src/collectors/ebpf.plugin/ebpf_vfs.c (renamed from collectors/ebpf.plugin/ebpf_vfs.c)765
-rw-r--r--src/collectors/ebpf.plugin/ebpf_vfs.h (renamed from collectors/ebpf.plugin/ebpf_vfs.h)35
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_cachestat.md (renamed from collectors/ebpf.plugin/integrations/ebpf_cachestat.md)8
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_dcstat.md (renamed from collectors/ebpf.plugin/integrations/ebpf_dcstat.md)8
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_disk.md (renamed from collectors/ebpf.plugin/integrations/ebpf_disk.md)8
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md (renamed from collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md)8
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_filesystem.md (renamed from collectors/ebpf.plugin/integrations/ebpf_filesystem.md)8
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_hardirq.md (renamed from collectors/ebpf.plugin/integrations/ebpf_hardirq.md)8
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_mdflush.md (renamed from collectors/ebpf.plugin/integrations/ebpf_mdflush.md)8
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_mount.md (renamed from collectors/ebpf.plugin/integrations/ebpf_mount.md)8
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_oomkill.md (renamed from collectors/ebpf.plugin/integrations/ebpf_oomkill.md)8
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_process.md (renamed from collectors/ebpf.plugin/integrations/ebpf_process.md)6
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_processes.md (renamed from collectors/ebpf.plugin/integrations/ebpf_processes.md)8
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_shm.md (renamed from collectors/ebpf.plugin/integrations/ebpf_shm.md)8
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_socket.md (renamed from collectors/ebpf.plugin/integrations/ebpf_socket.md)8
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_softirq.md (renamed from collectors/ebpf.plugin/integrations/ebpf_softirq.md)8
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_swap.md (renamed from collectors/ebpf.plugin/integrations/ebpf_swap.md)8
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_sync.md (renamed from collectors/ebpf.plugin/integrations/ebpf_sync.md)10
-rw-r--r--src/collectors/ebpf.plugin/integrations/ebpf_vfs.md (renamed from collectors/ebpf.plugin/integrations/ebpf_vfs.md)8
81 files changed, 2441 insertions, 12629 deletions
diff --git a/collectors/ebpf.plugin/Makefile.am b/collectors/ebpf.plugin/Makefile.am
deleted file mode 100644
index 2d5f92a6b..000000000
--- a/collectors/ebpf.plugin/Makefile.am
+++ /dev/null
@@ -1,42 +0,0 @@
-# SPDX-License-Identifier: GPL-3.0-or-later
-
-AUTOMAKE_OPTIONS = subdir-objects
-MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-
-include $(top_srcdir)/build/subst.inc
-SUFFIXES = .in
-
-userebpfconfigdir=$(configdir)/ebpf.d
-
-# Explicitly install directories to avoid permission issues due to umask
-install-exec-local:
- $(INSTALL) -d $(DESTDIR)$(userebpfconfigdir)
-
-dist_noinst_DATA = \
- README.md \
- $(NULL)
-
-ebpfconfigdir=$(libconfigdir)/ebpf.d
-dist_libconfig_DATA = \
- ebpf.d.conf \
- $(NULL)
-
-dist_ebpfconfig_DATA = \
- ebpf.d/ebpf_kernel_reject_list.txt \
- ebpf.d/cachestat.conf \
- ebpf.d/dcstat.conf \
- ebpf.d/disk.conf \
- ebpf.d/fd.conf \
- ebpf.d/filesystem.conf \
- ebpf.d/hardirq.conf \
- ebpf.d/mdflush.conf \
- ebpf.d/mount.conf \
- ebpf.d/network.conf \
- ebpf.d/oomkill.conf \
- ebpf.d/process.conf \
- ebpf.d/shm.conf \
- ebpf.d/softirq.conf \
- ebpf.d/sync.conf \
- ebpf.d/swap.conf \
- ebpf.d/vfs.conf \
- $(NULL)
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md
deleted file mode 100644
index 06915ea52..000000000
--- a/collectors/ebpf.plugin/README.md
+++ /dev/null
@@ -1,1071 +0,0 @@
-<!--
-title: "Kernel traces/metrics (eBPF) monitoring with Netdata"
-description: "Use Netdata's extended Berkeley Packet Filter (eBPF) collector to monitor kernel-level metrics about yourcomplex applications with per-second granularity."
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/README.md"
-sidebar_label: "Kernel traces/metrics (eBPF)"
-learn_status: "Published"
-learn_topic_type: "References"
-learn_rel_path: "Integrations/Monitor/System metrics"
--->
-
-# Kernel traces/metrics (eBPF) collector
-
-The Netdata Agent provides many [eBPF](https://ebpf.io/what-is-ebpf/) programs to help you troubleshoot and debug how applications interact with the Linux kernel. The `ebpf.plugin` uses [tracepoints, trampoline, and2 kprobes](#how-netdata-collects-data-using-probes-and-tracepoints) to collect a wide array of high value data about the host that would otherwise be impossible to capture.
-
-> ❗ eBPF monitoring only works on Linux systems and with specific Linux kernels, including all kernels newer than `4.11.0`, and all kernels on CentOS 7.6 or later. For kernels older than `4.11.0`, improved support is in active development.
-
-This document provides comprehensive details about the `ebpf.plugin`.
-For hands-on configuration and troubleshooting tips see our [tutorial on troubleshooting apps with eBPF metrics](https://github.com/netdata/netdata/blob/master/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md).
-
-<figure>
- <img src="https://user-images.githubusercontent.com/1153921/74746434-ad6a1e00-5222-11ea-858a-a7882617ae02.png" alt="An example of VFS charts, made possible by the eBPF collector plugin" />
- <figcaption>An example of virtual file system (VFS) charts made possible by the eBPF collector plugin.</figcaption>
-</figure>
-
-## How Netdata collects data using probes and tracepoints
-
-Netdata uses the following features from the Linux kernel to run eBPF programs:
-
-- Tracepoints are hooks to call specific functions. Tracepoints are more stable than `kprobes` and are preferred when
- both options are available.
-- Trampolines are bridges between kernel functions, and BPF programs. Netdata uses them by default whenever available.
-- Kprobes and return probes (`kretprobe`): Probes can insert virtually into any kernel instruction. When eBPF runs in `entry` mode, it attaches only `kprobes` for internal functions monitoring calls and some arguments every time a function is called. The user can also change configuration to use [`return`](#global-configuration-options) mode, and this will allow users to monitor return from these functions and detect possible failures.
-
-In each case, wherever a normal kprobe, kretprobe, or tracepoint would have run its hook function, an eBPF program is run instead, performing various collection logic before letting the kernel continue its normal control flow.
-
-There are more methods to trigger eBPF programs, such as uprobes, but currently are not supported.
-
-## Configuring ebpf.plugin
-
-The eBPF collector is installed and enabled by default on most new installations of the Agent.
-If your Agent is v1.22 or older, you may to enable the collector yourself.
-
-### Enable the eBPF collector
-
-To enable or disable the entire eBPF collector:
-
-1. Navigate to the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
- ```bash
- cd /etc/netdata
- ```
-
-2. Use the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) script to edit `netdata.conf`.
-
- ```bash
- ./edit-config netdata.conf
- ```
-
-3. Enable the collector by scrolling down to the `[plugins]` section. Uncomment the line `ebpf` (not
- `ebpf_process`) and set it to `yes`.
-
- ```conf
- [plugins]
- ebpf = yes
- ```
-
-### Configure the eBPF collector
-
-You can configure the eBPF collector's behavior to fine-tune which metrics you receive and [optimize performance]\(#performance opimization).
-
-To edit the `ebpf.d.conf`:
-
-1. Navigate to the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
- ```bash
- cd /etc/netdata
- ```
-2. Use the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) script to edit [`ebpf.d.conf`](https://github.com/netdata/netdata/blob/master/collectors/ebpf.plugin/ebpf.d.conf).
-
- ```bash
- ./edit-config ebpf.d.conf
- ```
-
- You can now edit the behavior of the eBPF collector. The following sections describe each configuration option in detail.
-
-### `[global]` configuration options
-
-The `[global]` section defines settings for the whole eBPF collector.
-
-#### eBPF load mode
-
-The collector uses two different eBPF programs. These programs rely on the same functions inside the kernel, but they
-monitor, process, and display different kinds of information.
-
-By default, this plugin uses the `entry` mode. Changing this mode can create significant overhead on your operating
-system, but also offer valuable information if you are developing or debugging software. The `ebpf load mode` option
-accepts the following values:
-
-- `entry`: This is the default mode. In this mode, the eBPF collector only monitors calls for the functions described in
- the sections above, and does not show charts related to errors.
-- `return`: In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates new
- charts for the return of these functions, such as errors. Monitoring function returns can help in debugging software,
- such as failing to close file descriptors or creating zombie processes.
-
-#### Integration with `apps.plugin`
-
-The eBPF collector also creates charts for each running application through an integration with the
-[`apps.plugin`](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md). This integration helps you understand how specific applications
-interact with the Linux kernel.
-
-If you want to enable `apps.plugin` integration, change the "apps" setting to "yes".
-
-```conf
-[global]
- apps = yes
-```
-
-#### Integration with `cgroups.plugin`
-
-The eBPF collector also creates charts for each cgroup through an integration with the
-[`cgroups.plugin`](https://github.com/netdata/netdata/blob/master/collectors/cgroups.plugin/README.md). This integration helps you understand how a specific cgroup
-interacts with the Linux kernel.
-
-The integration with `cgroups.plugin` is disabled by default to avoid creating overhead on your system. If you want to
-_enable_ the integration with `cgroups.plugin`, change the `cgroups` setting to `yes`.
-
-```conf
-[global]
- cgroups = yes
-```
-
-If you do not need to monitor specific metrics for your `cgroups`, you can enable `cgroups` inside
-`ebpf.d.conf`, and then disable the plugin for a specific `thread` by following the steps in the
-[Configuration](#configuring-ebpfplugin) section.
-
-#### Maps per Core
-
-When netdata is running on kernels newer than `4.6` users are allowed to modify how the `ebpf.plugin` creates maps (hash or
-array). When `maps per core` is defined as `yes`, plugin will create a map per core on host, on the other hand,
-when the value is set as `no` only one hash table will be created, this option will use less memory, but it also can
-increase overhead for processes.
-
-#### Collect PID
-
-When one of the previous integrations is enabled, `ebpf.plugin` will use Process Identifier (`PID`) to identify the
-process group for which it needs to plot data.
-
-There are different ways to collect PID, and you can select the way `ebpf.plugin` collects data with the following
-values:
-
-- `real parent`: This is the default mode. Collection will aggregate data for the real parent, the thread that creates
- child threads.
-- `parent`: Parent and real parent are the same when a process starts, but this value can be changed during run time.
-- `all`: This option will store all PIDs that run on the host. Note, this method can be expensive for the host,
- because more memory needs to be allocated and parsed.
-
-The threads that have integration with other collectors have an internal clean up wherein they attach either a
-`trampoline` or a `kprobe` to `release_task` internal function. To avoid `overload` on this function, `ebpf.plugin`
-will only enable these threads integrated with other collectors when the kernel is compiled with
-`CONFIG_DEBUG_INFO_BTF`, unless you enable them manually.
-
-#### Collection period
-
-The plugin uses the option `update every` to define the number of seconds used for eBPF to send data for Netdata. The default value
-is 5 seconds.
-
-#### PID table size
-
-The option `pid table size` defines the maximum number of PIDs stored inside the application hash table. The default value
-is defined according [kernel](https://elixir.bootlin.com/linux/v6.0.19/source/include/linux/threads.h#L28) source code.
-
-#### Integration Dashboard Elements
-
-When an integration is enabled, your dashboard will also show the following cgroups and apps charts using low-level
-Linux metrics:
-
-> Note: The parenthetical accompanying each bulleted item provides the chart name.
-
-- mem
- - Number of processes killed due out of memory. (`oomkills`)
-- process
- - Number of processes created with `do_fork`. (`process_create`)
- - Number of threads created with `do_fork` or `clone (2)`, depending on your system's kernel
- version. (`thread_create`)
- - Number of times that a process called `do_exit`. (`task_exit`)
- - Number of times that a process called `release_task`. (`task_close`)
- - Number of times that an error happened to create thread or process. (`task_error`)
-- swap
- - Number of calls to `swap_readpage`. (`swap_read_call`)
- - Number of calls to `swap_writepage`. (`swap_write_call`)
-- network
- - Number of outbound connections using TCP/IPv4. (`outbound_conn_ipv4`)
- - Number of outbound connections using TCP/IPv6. (`outbound_conn_ipv6`)
- - Number of bytes sent. (`total_bandwidth_sent`)
- - Number of bytes received. (`total_bandwidth_recv`)
- - Number of calls to `tcp_sendmsg`. (`bandwidth_tcp_send`)
- - Number of calls to `tcp_cleanup_rbuf`. (`bandwidth_tcp_recv`)
- - Number of calls to `tcp_retransmit_skb`. (`bandwidth_tcp_retransmit`)
- - Number of calls to `udp_sendmsg`. (`bandwidth_udp_send`)
- - Number of calls to `udp_recvmsg`. (`bandwidth_udp_recv`)
-- file access
- - Number of calls to open files. (`file_open`)
- - Number of calls to open files that returned errors. (`open_error`)
- - Number of files closed. (`file_closed`)
- - Number of calls to close files that returned errors. (`file_error_closed`)
-- vfs
- - Number of calls to `vfs_unlink`. (`file_deleted`)
- - Number of calls to `vfs_write`. (`vfs_write_call`)
- - Number of calls to write a file that returned errors. (`vfs_write_error`)
- - Number of calls to `vfs_read`. (`vfs_read_call`)
- - - Number of calls to read a file that returned errors. (`vfs_read_error`)
- - Number of bytes written with `vfs_write`. (`vfs_write_bytes`)
- - Number of bytes read with `vfs_read`. (`vfs_read_bytes`)
- - Number of calls to `vfs_fsync`. (`vfs_fsync`)
- - Number of calls to sync file that returned errors. (`vfs_fsync_error`)
- - Number of calls to `vfs_open`. (`vfs_open`)
- - Number of calls to open file that returned errors. (`vfs_open_error`)
- - Number of calls to `vfs_create`. (`vfs_create`)
- - Number of calls to open file that returned errors. (`vfs_create_error`)
-- page cache
- - Ratio of pages accessed. (`cachestat_ratio`)
- - Number of modified pages ("dirty"). (`cachestat_dirties`)
- - Number of accessed pages. (`cachestat_hits`)
- - Number of pages brought from disk. (`cachestat_misses`)
-- directory cache
- - Ratio of files available in directory cache. (`dc_hit_ratio`)
- - Number of files accessed. (`dc_reference`)
- - Number of files accessed that were not in cache. (`dc_not_cache`)
- - Number of files not found. (`dc_not_found`)
-- ipc shm
- - Number of calls to `shm_get`. (`shmget_call`)
- - Number of calls to `shm_at`. (`shmat_call`)
- - Number of calls to `shm_dt`. (`shmdt_call`)
- - Number of calls to `shm_ctl`. (`shmctl_call`)
-
-### `[ebpf programs]` configuration options
-
-The eBPF collector enables and runs the following eBPF programs by default:
-
-- `cachestat`: Netdata's eBPF data collector creates charts about the memory page cache. When the integration with
- [`apps.plugin`](https://github.com/netdata/netdata/blob/master/collectors/apps.plugin/README.md) is enabled, this collector creates charts for the whole host _and_
- for each application.
-- `fd` : This eBPF program creates charts that show information about calls to open files.
-- `mount`: This eBPF program creates charts that show calls to syscalls mount(2) and umount(2).
-- `shm`: This eBPF program creates charts that show calls to syscalls shmget(2), shmat(2), shmdt(2) and shmctl(2).
-- `process`: This eBPF program creates charts that show information about process life. When in `return` mode, it also
- creates charts showing errors when these operations are executed.
-- `hardirq`: This eBPF program creates charts that show information about time spent servicing individual hardware
- interrupt requests (hard IRQs).
-- `softirq`: This eBPF program creates charts that show information about time spent servicing individual software
- interrupt requests (soft IRQs).
-- `oomkill`: This eBPF program creates a chart that shows OOM kills for all applications recognized via
- the `apps.plugin` integration. Note that this program will show application charts regardless of whether apps
- integration is turned on or off.
-
-You can also enable the following eBPF programs:
-
-- `dcstat` : This eBPF program creates charts that show information about file access using directory cache. It appends
- `kprobes` for `lookup_fast()` and `d_lookup()` to identify if files are inside directory cache, outside and files are
- not found.
-- `disk` : This eBPF program creates charts that show information about disk latency independent of filesystem.
-- `filesystem` : This eBPF program creates charts that show information about some filesystem latency.
-- `swap` : This eBPF program creates charts that show information about swap access.
-- `mdflush`: This eBPF program creates charts that show information about
-- `sync`: Monitor calls to syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2).
-- `socket`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
- bandwidth consumed by each.
- multi-device software flushes.
-- `vfs`: This eBPF program creates charts that show information about VFS (Virtual File System) functions.
-
-### Configuring eBPF threads
-
-You can configure each thread of the eBPF data collector. This allows you to overwrite global options defined in `/etc/netdata/ebpf.d.conf` and configure specific options for each thread.
-
-To configure an eBPF thread:
-
-1. Navigate to the [Netdata config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
- ```bash
- cd /etc/netdata
- ```
-2. Use the [`edit-config`](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#use-edit-config-to-edit-configuration-files) script to edit a thread configuration file. The following configuration files are available:
-
- - `network.conf`: Configuration for the [`network` thread](#network-configuration). This config file overwrites the global options and also
- lets you specify which network the eBPF collector monitors.
- - `process.conf`: Configuration for the [`process` thread](#sync-configuration).
- - `cachestat.conf`: Configuration for the `cachestat` thread(#filesystem-configuration).
- - `dcstat.conf`: Configuration for the `dcstat` thread.
- - `disk.conf`: Configuration for the `disk` thread.
- - `fd.conf`: Configuration for the `file descriptor` thread.
- - `filesystem.conf`: Configuration for the `filesystem` thread.
- - `hardirq.conf`: Configuration for the `hardirq` thread.
- - `softirq.conf`: Configuration for the `softirq` thread.
- - `sync.conf`: Configuration for the `sync` thread.
- - `vfs.conf`: Configuration for the `vfs` thread.
-
- ```bash
- ./edit-config FILE.conf
- ```
-
-### Network configuration
-
-The network configuration has specific options to configure which network(s) the eBPF collector monitors. These options
-are divided in the following sections:
-
-#### `[network connections]`
-
-You can configure the information shown with function `ebpf_socket` using the settings in this section.
-
-```conf
-[network connections]
- enabled = yes
- resolve hostname ips = no
- resolve service names = yes
- ports = 1-1024 !145 !domain
- hostnames = !example.com
- ips = !127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7
-```
-
-When you define a `ports` setting, Netdata will collect network metrics for that specific port. For example, if you
-write `ports = 19999`, Netdata will collect only connections for itself. The `hostnames` setting accepts
-[simple patterns](https://github.com/netdata/netdata/blob/master/libnetdata/simple_pattern/README.md). The `ports`, and `ips` settings accept negation (`!`) to deny
-specific values or asterisk alone to define all values.
-
-In the above example, Netdata will collect metrics for all ports between `1` and `1024`, with the exception of `53` (domain)
-and `145`.
-
-The following options are available:
-
-- `enabled`: Disable network connections monitoring. This can affect directly some funcion output.
-- `resolve hostname ips`: Enable resolving IPs to hostnames. It is disabled by default because it can be too slow.
-- `resolve service names`: Convert destination ports into service names, for example, port `53` protocol `UDP` becomes `domain`.
- all names are read from /etc/services.
-- `ports`: Define the destination ports for Netdata to monitor.
-- `hostnames`: The list of hostnames that can be resolved to an IP address.
-- `ips`: The IP or range of IPs that you want to monitor. You can use IPv4 or IPv6 addresses, use dashes to define a
- range of IPs, or use CIDR values.
-
-By default the traffic table is created using the destination IPs and ports of the sockets. This can be
-changed, so that Netdata uses service names (if possible), by specifying `resolve service name = yes` in the configuration
-section.
-
-#### `[service name]`
-
-Netdata uses the list of services in `/etc/services` to plot network connection charts. If this file does not contain
-the name for a particular service you use in your infrastructure, you will need to add it to the `[service name]`
-section.
-
-For example, Netdata's default port (`19999`) is not listed in `/etc/services`. To associate that port with the Netdata
-service in network connection charts, and thus see the name of the service instead of its port, define it:
-
-```conf
-[service name]
- 19999 = Netdata
-```
-
-### Sync configuration
-
-The sync configuration has specific options to disable monitoring for syscalls. All syscalls are monitored by default.
-
-```conf
-[syscalls]
- sync = yes
- msync = yes
- fsync = yes
- fdatasync = yes
- syncfs = yes
- sync_file_range = yes
-```
-
-### Filesystem configuration
-
-The filesystem configuration has specific options to disable monitoring for filesystems; by default, all filesystems are
-monitored.
-
-```conf
-[filesystem]
- btrfsdist = yes
- ext4dist = yes
- nfsdist = yes
- xfsdist = yes
- zfsdist = yes
-```
-
-The ebpf program `nfsdist` monitors only `nfs` mount points.
-
-## Troubleshooting
-
-If the eBPF collector does not work, you can troubleshoot it by running the `ebpf.plugin` command and investigating its
-output.
-
-```bash
-cd /usr/libexec/netdata/plugins.d/
-sudo su -s /bin/bash ./ebpf.plugin
-```
-
-You can also use `grep` to search the Agent's `error.log` for messages related to eBPF monitoring.
-
-```bash
-grep -i ebpf /var/log/netdata/error.log
-```
-
-### Confirm kernel compatibility
-
-The eBPF collector only works on Linux systems and with specific Linux kernels. We support all kernels more recent than
-`4.11.0`, and all kernels on CentOS 7.6 or later.
-
-You can run our helper script to determine whether your system can support eBPF monitoring. If it returns no output, your system is ready to compile and run the eBPF collector.
-
-```bash
-curl -sSL https://raw.githubusercontent.com/netdata/kernel-collector/master/tools/check-kernel-config.sh | sudo bash
-```
-
-
-If you see a warning about a missing kernel
-configuration (`KPROBES KPROBES_ON_FTRACE HAVE_KPROBES BPF BPF_SYSCALL BPF_JIT`), you will need to recompile your kernel
-to support this configuration. The process of recompiling Linux kernels varies based on your distribution and version.
-Read the documentation for your system's distribution to learn more about the specific workflow for recompiling the
-kernel, ensuring that you set all the necessary
-
-- [Ubuntu](https://wiki.ubuntu.com/Kernel/BuildYourOwnKernel)
-- [Debian](https://kernel-team.pages.debian.net/kernel-handbook/ch-common-tasks.html#s-common-official)
-- [Fedora](https://fedoraproject.org/wiki/Building_a_custom_kernel)
-- [CentOS](https://wiki.centos.org/HowTos/Custom_Kernel)
-- [Arch Linux](https://wiki.archlinux.org/index.php/Kernel/Traditional_compilation)
-- [Slackware](https://docs.slackware.com/howtos:slackware_admin:kernelbuilding)
-
-### Mount `debugfs` and `tracefs`
-
-The eBPF collector also requires both the `tracefs` and `debugfs` filesystems. Try mounting the `tracefs` and `debugfs`
-filesystems using the commands below:
-
-```bash
-sudo mount -t debugfs nodev /sys/kernel/debug
-sudo mount -t tracefs nodev /sys/kernel/tracing
-```
-
-If they are already mounted, you will see an error. You can also configure your system's `/etc/fstab` configuration to
-mount these filesystems on startup. More information can be found in
-the [ftrace documentation](https://www.kernel.org/doc/Documentation/trace/ftrace.txt).
-
-## Charts
-
-The eBPF collector creates charts on different menus, like System Overview, Memory, MD arrays, Disks, Filesystem,
-Mount Points, Networking Stack, systemd Services, and Applications.
-
-The collector stores the actual value inside of its process, but charts only show the difference between the values
-collected in the previous and current seconds.
-
-### System overview
-
-Not all charts within the System Overview menu are enabled by default. Charts that rely on `kprobes` are disabled by default because they add around 100ns overhead for each function call. This is a small number from a human's perspective, but the functions are called many times and create an impact
-on host. See the [configuration](#configuring-ebpfplugin) section for details about how to enable them.
-
-#### Processes
-
-Internally, the Linux kernel treats both processes and threads as `tasks`. To create a thread, the kernel offers a few
-system calls: `fork(2)`, `vfork(2)`, and `clone(2)`. To generate this chart, the eBPF
-collector uses the following `tracepoints` and `kprobe`:
-
-- `sched/sched_process_fork`: Tracepoint called after a call for `fork (2)`, `vfork (2)` and `clone (2)`.
-- `sched/sched_process_exec`: Tracepoint called after a exec-family syscall.
-- `kprobe/kernel_clone`: This is the main [`fork()`](https://elixir.bootlin.com/linux/v5.10/source/kernel/fork.c#L2415)
- routine since kernel `5.10.0` was released.
-- `kprobe/_do_fork`: Like `kernel_clone`, but this was the main function between kernels `4.2.0` and `5.9.16`
-- `kprobe/do_fork`: This was the main function before kernel `4.2.0`.
-
-#### Process Exit
-
-Ending a task requires two steps. The first is a call to the internal function `do_exit`, which notifies the operating
-system that the task is finishing its work. The second step is to release the kernel information with the internal
-function `release_task`. The difference between the two dimensions can help you discover
-[zombie processes](https://en.wikipedia.org/wiki/Zombie_process). To get the metrics, the collector uses:
-
-- `sched/sched_process_exit`: Tracepoint called after a task exits.
-- `kprobe/release_task`: This function is called when a process exits, as the kernel still needs to remove the process
- descriptor.
-
-#### Task error
-
-The functions responsible for ending tasks do not return values, so this chart contains information about failures on
-process and thread creation only.
-
-#### Swap
-
-Inside the swap submenu the eBPF plugin creates the chart `swapcalls`; this chart is displaying when processes are
-calling functions [`swap_readpage` and `swap_writepage`](https://hzliu123.github.io/linux-kernel/Page%20Cache%20in%20Linux%202.6.pdf),
-which are functions responsible for doing IO in swap memory. To collect the exact moment that an access to swap happens,
-the collector attaches `kprobes` for cited functions.
-
-#### Soft IRQ
-
-The following `tracepoints` are used to measure time usage for soft IRQs:
-
-- [`irq/softirq_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_entry): Called
- before softirq handler
-- [`irq/softirq_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_exit): Called when
- softirq handler returns.
-
-#### Hard IRQ
-
-The following tracepoints are used to measure the latency of servicing a
-hardware interrupt request (hard IRQ).
-
-- [`irq/irq_handler_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_entry):
- Called immediately before the IRQ action handler.
-- [`irq/irq_handler_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_exit):
- Called immediately after the IRQ action handler returns.
-- `irq_vectors`: These are traces from `irq_handler_entry` and
- `irq_handler_exit` when an IRQ is handled. The following elements from vector
- are triggered:
- - `irq_vectors/local_timer_entry`
- - `irq_vectors/local_timer_exit`
- - `irq_vectors/reschedule_entry`
- - `irq_vectors/reschedule_exit`
- - `irq_vectors/call_function_entry`
- - `irq_vectors/call_function_exit`
- - `irq_vectors/call_function_single_entry`
- - `irq_vectors/call_function_single_xit`
- - `irq_vectors/irq_work_entry`
- - `irq_vectors/irq_work_exit`
- - `irq_vectors/error_apic_entry`
- - `irq_vectors/error_apic_exit`
- - `irq_vectors/thermal_apic_entry`
- - `irq_vectors/thermal_apic_exit`
- - `irq_vectors/threshold_apic_entry`
- - `irq_vectors/threshold_apic_exit`
- - `irq_vectors/deferred_error_entry`
- - `irq_vectors/deferred_error_exit`
- - `irq_vectors/spurious_apic_entry`
- - `irq_vectors/spurious_apic_exit`
- - `irq_vectors/x86_platform_ipi_entry`
- - `irq_vectors/x86_platform_ipi_exit`
-
-#### IPC shared memory
-
-To monitor shared memory system call counts, Netdata attaches tracing in the following functions:
-
-- `shmget`: Runs when [`shmget`](https://man7.org/linux/man-pages/man2/shmget.2.html) is called.
-- `shmat`: Runs when [`shmat`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called.
-- `shmdt`: Runs when [`shmdt`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called.
-- `shmctl`: Runs when [`shmctl`](https://man7.org/linux/man-pages/man2/shmctl.2.html) is called.
-
-### Memory
-
-In the memory submenu the eBPF plugin creates two submenus **page cache** and **synchronization** with the following
-organization:
-
-- Page Cache
- - Page cache ratio
- - Dirty pages
- - Page cache hits
- - Page cache misses
-- Synchronization
- - File sync
- - Memory map sync
- - File system sync
- - File range sync
-
-#### Page cache hits
-
-When the processor needs to read or write a location in main memory, it checks for a corresponding entry in the page cache.
- If the entry is there, a page cache hit has occurred and the read is from the cache.
-
-A page cache hit is when the page cache is successfully accessed with a read operation. We do not count pages that were
-added relatively recently.
-
-#### Dirty pages
-
-A "dirty page" is a page in the page cache that was modified after being created. Since non-dirty pages in the page cache
- have identical copies in secondary storage (e.g. hard disk drive or solid-state drive), discarding and reusing their space
- is much quicker than paging out application memory, and is often preferred over flushing the dirty pages into secondary storage
- and reusing their space.
-
-On `cachestat_dirties` Netdata demonstrates the number of pages that were modified. This chart shows the number of calls
-to the function `mark_buffer_dirty`.
-
-#### Page cache ratio
-
-When the processor needs to read or write in a specific memory address, it checks for a corresponding entry in the page cache.
-If the processor hits a page cache (`page cache hit`), it reads the entry from the cache. If there is no entry (`page cache miss`),
- the kernel allocates a new entry and copies data from the disk. Netdata calculates the percentage of accessed files that are cached on
- memory. The ratio is calculated counting the accessed cached pages
- (without counting [dirty pages](#dirty-pages) and pages added because of read misses) divided by total access without dirty pages.
-
-> \_\_**\_\_\_\_**<ins>Number of accessed cached pages</ins>\***\*\_\_\*\***<br/>
-> Number of total accessed pages - dirty pages - missed pages
-
-The chart `cachestat_ratio` shows how processes are accessing page cache. In a normal scenario, we expect values around
-100%, which means that the majority of the work on the machine is processed in memory. To calculate the ratio, Netdata
-attaches `kprobes` for kernel functions:
-
-- `add_to_page_cache_lru`: Page addition.
-- `mark_page_accessed`: Access to cache.
-- `account_page_dirtied`: Dirty (modified) pages.
-- `mark_buffer_dirty`: Writes to page cache.
-
-#### Page cache misses
-
-A page cache miss means that a page was not inside memory when the process tried to access it. This chart shows the
-result of the difference for calls between functions `add_to_page_cache_lru` and `account_page_dirtied`.
-
-#### File sync
-
-This chart shows calls to synchronization methods, [`fsync(2)`](https://man7.org/linux/man-pages/man2/fdatasync.2.html)
-and [`fdatasync(2)`](https://man7.org/linux/man-pages/man2/fdatasync.2.html), to transfer all modified page caches
-for the files on disk devices. These calls block until the disk reports that the transfer has been completed. They flush
-data for specific file descriptors.
-
-#### Memory map sync
-
-The chart shows calls to [`msync(2)`](https://man7.org/linux/man-pages/man2/msync.2.html) syscalls. This syscall flushes
-changes to a file that was mapped into memory using [`mmap(2)`](https://man7.org/linux/man-pages/man2/mmap.2.html).
-
-#### File system sync
-
-This chart monitors calls demonstrating commits from filesystem caches to disk. Netdata attaches `tracing` for
-[`sync(2)`](https://man7.org/linux/man-pages/man2/sync.2.html), and [`syncfs(2)`](https://man7.org/linux/man-pages/man2/sync.2.html).
-
-#### File range sync
-
-This chart shows calls to [`sync_file_range(2)`](https://man7.org/linux/man-pages/man2/sync_file_range.2.html) which
-synchronizes file segments with disk.
-
-> Note: This is the most dangerous syscall to synchronize data, according to its manual.
-
-### Multiple Device (MD) arrays
-
-The eBPF plugin shows multi-device flushes happening in real time. This can be used to explain some spikes happening
-in [disk latency](#disk) charts.
-
-By default, MD flush is disabled. To enable it, configure your
-`/etc/netdata/ebpf.d.conf` file as:
-
-```conf
-[global]
- mdflush = yes
-```
-
-#### MD flush
-
-To collect data related to Linux multi-device (MD) flushing, the following kprobe is used:
-
-- `kprobe/md_flush_request`: called whenever a request for flushing multi-device data is made.
-
-### Disk
-
-The eBPF plugin also shows a chart in the Disk section when the `disk` thread is enabled.
-
-#### Disk Latency
-
-This will create the chart `disk_latency_io` for each disk on the host. The following tracepoints are used:
-
-- [`block/block_rq_issue`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_issue):
- IO request operation to a device drive.
-- [`block/block_rq_complete`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_complete):
- IO operation completed by device.
-
-Disk Latency is the single most important metric to focus on when it comes to storage performance, under most circumstances.
-For hard drives, an average latency somewhere between 10 to 20 ms can be considered acceptable. For SSD (Solid State Drives),
-in most cases, workloads experience less than 1 ms latency numbers, but workloads should never reach higher than 3 ms.
-The dimensions refer to time intervals.
-
-### Filesystem
-
-This group has charts demonstrating how applications interact with the Linux kernel to open and close file descriptors.
-It also brings latency charts for several different filesystems.
-
-#### Latency Algorithm
-
-We calculate the difference between the calling and return times, spanning disk I/O, file system operations (lock, I/O),
-run queue latency and all events related to the monitored action.
-
-#### ext4
-
-To measure the latency of executing some actions in an
-[ext4](https://elixir.bootlin.com/linux/latest/source/fs/ext4) filesystem, the
-collector needs to attach `kprobes` and `kretprobes` for each of the following
-functions:
-
-- `ext4_file_read_iter`: Function used to measure read latency.
-- `ext4_file_write_iter`: Function used to measure write latency.
-- `ext4_file_open`: Function used to measure open latency.
-- `ext4_sync_file`: Function used to measure sync latency.
-
-#### ZFS
-
-To measure the latency of executing some actions in a zfs filesystem, the
-collector needs to attach `kprobes` and `kretprobes` for each of the following
-functions:
-
-- `zpl_iter_read`: Function used to measure read latency.
-- `zpl_iter_write`: Function used to measure write latency.
-- `zpl_open`: Function used to measure open latency.
-- `zpl_fsync`: Function used to measure sync latency.
-
-#### XFS
-
-To measure the latency of executing some actions in an
-[xfs](https://elixir.bootlin.com/linux/latest/source/fs/xfs) filesystem, the
-collector needs to attach `kprobes` and `kretprobes` for each of the following
-functions:
-
-- `xfs_file_read_iter`: Function used to measure read latency.
-- `xfs_file_write_iter`: Function used to measure write latency.
-- `xfs_file_open`: Function used to measure open latency.
-- `xfs_file_fsync`: Function used to measure sync latency.
-
-#### NFS
-
-To measure the latency of executing some actions in an
-[nfs](https://elixir.bootlin.com/linux/latest/source/fs/nfs) filesystem, the
-collector needs to attach `kprobes` and `kretprobes` for each of the following
-functions:
-
-- `nfs_file_read`: Function used to measure read latency.
-- `nfs_file_write`: Function used to measure write latency.
-- `nfs_file_open`: Functions used to measure open latency.
-- `nfs4_file_open`: Functions used to measure open latency for NFS v4.
-- `nfs_getattr`: Function used to measure sync latency.
-
-#### btrfs
-
-To measure the latency of executing some actions in a [btrfs](https://elixir.bootlin.com/linux/latest/source/fs/btrfs/file.c)
-filesystem, the collector needs to attach `kprobes` and `kretprobes` for each of the following functions:
-
-> Note: We are listing two functions used to measure `read` latency, but we use either `btrfs_file_read_iter` or
-> `generic_file_read_iter`, depending on kernel version.
-
-- `btrfs_file_read_iter`: Function used to measure read latency since kernel `5.10.0`.
-- `generic_file_read_iter`: Like `btrfs_file_read_iter`, but this function was used before kernel `5.10.0`.
-- `btrfs_file_write_iter`: Function used to write data.
-- `btrfs_file_open`: Function used to open files.
-- `btrfs_sync_file`: Function used to synchronize data to filesystem.
-
-#### File descriptor
-
-To give metrics related to `open` and `close` events, instead of attaching kprobes for each syscall used to do these
-events, the collector attaches `kprobes` for the common function used for syscalls:
-
-- [`do_sys_open`](https://0xax.gitbooks.io/linux-insides/content/SysCall/linux-syscall-5.html): Internal function used to
- open files.
-- [`do_sys_openat2`](https://elixir.bootlin.com/linux/v5.6/source/fs/open.c#L1162):
- Function called from `do_sys_open` since version `5.6.0`.
-- [`close_fd`](https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg2271761.html): Function used to close file
- descriptor since kernel `5.11.0`.
-- `__close_fd`: Function used to close files before version `5.11.0`.
-
-#### File error
-
-This chart shows the number of times some software tried and failed to open or close a file descriptor.
-
-#### VFS
-
-The Linux Virtual File System (VFS) is an abstraction layer on top of a
-concrete filesystem like the ones listed in the parent section, e.g. `ext4`.
-
-In this section we list the mechanism by which we gather VFS data, and what
-charts are consequently created.
-
-##### VFS eBPF Hooks
-
-To measure the latency and total quantity of executing some VFS-level
-functions, ebpf.plugin needs to attach kprobes and kretprobes for each of the
-following functions:
-
-- `vfs_write`: Function used monitoring the number of successful & failed
- filesystem write calls, as well as the total number of written bytes.
-- `vfs_writev`: Same function as `vfs_write` but for vector writes (i.e. a
- single write operation using a group of buffers rather than 1).
-- `vfs_read`: Function used for monitoring the number of successful & failed
- filesystem read calls, as well as the total number of read bytes.
-- `vfs_readv` Same function as `vfs_read` but for vector reads (i.e. a single
- read operation using a group of buffers rather than 1).
-- `vfs_unlink`: Function used for monitoring the number of successful & failed
- filesystem unlink calls.
-- `vfs_fsync`: Function used for monitoring the number of successful & failed
- filesystem fsync calls.
-- `vfs_open`: Function used for monitoring the number of successful & failed
- filesystem open calls.
-- `vfs_create`: Function used for monitoring the number of successful & failed
- filesystem create calls.
-
-##### VFS Deleted objects
-
-This chart monitors calls to `vfs_unlink`. This function is responsible for removing objects from the file system.
-
-##### VFS IO
-
-This chart shows the number of calls to the functions `vfs_read` and `vfs_write`.
-
-##### VFS IO bytes
-
-This chart also monitors `vfs_read` and `vfs_write` but, instead of the number of calls, it shows the total amount of
-bytes read and written with these functions.
-
-The Agent displays the number of bytes written as negative because they are moving down to disk.
-
-##### VFS IO errors
-
-The Agent counts and shows the number of instances where a running program experiences a read or write error.
-
-##### VFS Create
-
-This chart shows the number of calls to `vfs_create`. This function is responsible for creating files.
-
-##### VFS Synchronization
-
-This chart shows the number of calls to `vfs_fsync`. This function is responsible for calling `fsync(2)` or
-`fdatasync(2)` on a file. You can see more details in the Synchronization section.
-
-##### VFS Open
-
-This chart shows the number of calls to `vfs_open`. This function is responsible for opening files.
-
-#### Directory Cache
-
-Metrics for directory cache are collected using kprobe for `lookup_fast`, because we are interested in the number of
-times this function is accessed. On the other hand, for `d_lookup` we are not only interested in the number of times it
-is accessed, but also in possible errors, so we need to attach a `kretprobe`. For this reason, the following is used:
-
-- [`lookup_fast`](https://lwn.net/Articles/649115/): Called to look at data inside the directory cache.
-- [`d_lookup`](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/fs/dcache.c?id=052b398a43a7de8c68c13e7fa05d6b3d16ce6801#n2223):
- Called when the desired file is not inside the directory cache.
-
-##### Directory Cache Interpretation
-
-When directory cache is showing 100% that means that every accessed file was present in the directory cache.
-If files are not present in the directory cache, they are either not present in the file system or the files were not
-accessed before.
-
-### Mount Points
-
-The following `tracing` are used to collect `mount` & `unmount` call counts:
-
-- [`mount`](https://man7.org/linux/man-pages/man2/mount.2.html): mount filesystem on host.
-- [`umount`](https://man7.org/linux/man-pages/man2/umount.2.html): umount filesystem on host.
-
-### Networking Stack
-
-Netdata monitors socket bandwidth attaching `tracing` for internal functions.
-
-#### TCP outbound connections
-
-This chart demonstrates calls to `tcp_v4_connection` and `tcp_v6_connection` that start connections for IPV4 and IPV6, respectively.
-
-#### TCP inbound connections
-
-This chart demonstrates TCP and UDP connections that the host receives.
-To collect this information, netdata attaches a tracing to `inet_csk_accept`.
-
-#### TCP bandwidth functions
-
-This chart demonstrates calls to functions `tcp_sendmsg`, `tcp_cleanup_rbuf`, and `tcp_close`; these functions are used
-to send & receive data and to close connections when `TCP` protocol is used.
-
-#### TCP bandwidth
-
-This chart demonstrates calls to functions:
-
-- `tcp_sendmsg`: Function responsible to send data for a specified destination.
-- `tcp_cleanup_rbuf`: We use this function instead of `tcp_recvmsg`, because the last one misses `tcp_read_sock` traffic
- and we would also need to add more `tracing` to get the socket and package size.
-- `tcp_close`: Function responsible to close connection.
-
-#### TCP retransmit
-
-This chart demonstrates calls to function `tcp_retransmit` that is responsible for executing TCP retransmission when the
-receiver did not return the packet during the expected time.
-
-#### UDP functions
-
-This chart demonstrates calls to functions `udp_sendmsg` and `udp_recvmsg`, which are responsible for sending &
-receiving data for connections when the `UDP` protocol is used.
-
-#### UDP bandwidth
-
-Like the previous chart, this one also monitors `udp_sendmsg` and `udp_recvmsg`, but instead of showing the number of
-calls, it monitors the number of bytes sent and received.
-
-### Apps
-
-#### OOM Killing
-
-These are tracepoints related to [OOM](https://en.wikipedia.org/wiki/Out_of_memory) killing processes.
-
-- `oom/mark_victim`: Monitors when an oomkill event happens.
-
-## Known issues
-
-### Performance opimization
-
-eBPF monitoring is complex and produces a large volume of metrics. We've discovered scenarios where the eBPF plugin
-significantly increases kernel memory usage by several hundred MB.
-
-When the integration with apps or cgroup is enabled, the eBPF collector allocates memory for each process running. If your
-node is experiencing high memory usage and there is no obvious culprit to be found in the `apps.mem` chart, consider:
-
-- Modify [maps per core](#maps-per-core) to use only one map.
-- Disable [integration with apps](#integration-with-appsplugin).
-- Disable [integration with cgroup](#integration-with-cgroupsplugin).
-
-If with these changes you still suspect eBPF using too much memory, and there is no obvious culprit to be found
-in the `apps.mem` chart, consider testing for high kernel memory usage by [disabling eBPF monitoring](#configuring-ebpfplugin).
-Next, [restart Netdata](https://github.com/netdata/netdata/blob/master/docs/configure/start-stop-restart.md) with
-`sudo systemctl restart netdata` to see if system memory usage (see the `system.ram` chart) has dropped significantly.
-
-Beginning with `v1.31`, kernel memory usage is configurable via the [`pid table size` setting](#pid-table-size)
-in `ebpf.conf`.
-
-The total memory usage is a well known [issue](https://lore.kernel.org/all/167821082315.1693.6957546778534183486.git-patchwork-notify@kernel.org/)
-for eBPF, this is not a bug present in plugin.
-
-### SELinux
-
-When [SELinux](https://www.redhat.com/en/topics/linux/what-is-selinux) is enabled, it may prevent `ebpf.plugin` from
-starting correctly. Check the Agent's `error.log` file for errors like the ones below:
-
-```bash
-2020-06-14 15:32:08: ebpf.plugin ERROR : EBPF PROCESS : Cannot load program: /usr/libexec/netdata/plugins.d/pnetdata_ebpf_process.3.10.0.o (errno 13, Permission denied)
-2020-06-14 15:32:19: netdata ERROR : PLUGINSD[ebpf] : read failed: end of file (errno 9, Bad file descriptor)
-```
-
-You can also check for errors related to `ebpf.plugin` inside `/var/log/audit/audit.log`:
-
-```bash
-type=AVC msg=audit(1586260134.952:97): avc: denied { map_create } for pid=1387 comm="ebpf.pl" scontext=system_u:system_r:unconfined_service_t:s0 tcontext=system_u:system_r:unconfined_service_t:s0 tclass=bpf permissive=0
-type=SYSCALL msg=audit(1586260134.952:97): arch=c000003e syscall=321 success=no exit=-13 a0=0 a1=7ffe6b36f000 a2=70 a3=0 items=0 ppid=1135 pid=1387 auid=4294967295 uid=994 gid=990 euid=0 suid=0 fsuid=0 egid=990 sgid=990 fsgid=990 tty=(none) ses=4294967295 comm="ebpf_proc
-ess.pl" exe="/usr/libexec/netdata/plugins.d/ebpf.plugin" subj=system_u:system_r:unconfined_service_t:s0 key=(null)
-```
-
-If you see similar errors, you will have to adjust SELinux's policies to enable the eBPF collector.
-
-#### Creation of bpf policies
-
-To enable `ebpf.plugin` to run on a distribution with SELinux enabled, it will be necessary to take the following
-actions.
-
-First, stop the Netdata Agent.
-
-```bash
-# systemctl stop netdata
-```
-
-Next, create a policy with the `audit.log` file you examined earlier.
-
-```bash
-# grep ebpf.plugin /var/log/audit/audit.log | audit2allow -M netdata_ebpf
-```
-
-This will create two new files: `netdata_ebpf.te` and `netdata_ebpf.mod`.
-
-Edit the `netdata_ebpf.te` file to change the options `class` and `allow`. You should have the following at the end of
-the `netdata_ebpf.te` file.
-
-```conf
-module netdata_ebpf 1.0;
-require {
- type unconfined_service_t;
- class bpf { map_create map_read map_write prog_load prog_run };
-}
-#============= unconfined_service_t ==============
-allow unconfined_service_t self:bpf { map_create map_read map_write prog_load prog_run };
-```
-
-Then compile your `netdata_ebpf.te` file with the following commands to create a binary that loads the new policies:
-
-```bash
-# checkmodule -M -m -o netdata_ebpf.mod netdata_ebpf.te
-# semodule_package -o netdata_ebpf.pp -m netdata_ebpf.mod
-```
-
-Finally, you can load the new policy and start the Netdata agent again:
-
-```bash
-# semodule -i netdata_ebpf.pp
-# systemctl start netdata
-```
-
-### Linux kernel lockdown
-
-Beginning with [version 5.4](https://www.zdnet.com/article/linux-to-get-kernel-lockdown-feature/), the Linux kernel has
-a feature called "lockdown," which may affect `ebpf.plugin` depending how the kernel was compiled. The following table
-shows how the lockdown module impacts `ebpf.plugin` based on the selected options:
-
-| Enforcing kernel lockdown | Enable lockdown LSM early in init | Default lockdown mode | Can `ebpf.plugin` run with this? |
-| :------------------------ | :-------------------------------- | :-------------------- | :------------------------------- |
-| YES | NO | NO | YES |
-| YES | Yes | None | YES |
-| YES | Yes | Integrity | YES |
-| YES | Yes | Confidentiality | NO |
-
-If you or your distribution compiled the kernel with the last combination, your system cannot load shared libraries
-required to run `ebpf.plugin`.
-
-## Functions
-
-### ebpf_thread
-
-The eBPF plugin has a [function](https://github.com/netdata/netdata/blob/master/docs/cloud/netdata-functions.md) named
-`ebpf_thread` that controls its internal threads and helps to reduce the overhead on host. Using the function you
-can run the plugin with all threads disabled and enable them only when you want to take a look in specific areas.
-
-#### List threads
-
-To list all threads status you can query directly the endpoint function:
-
-`http://localhost:19999/api/v1/function?function=ebpf_thread`
-
-It is also possible to query a specific thread adding keyword `thread` and thread name:
-
-`http://localhost:19999/api/v1/function?function=ebpf_thread%20thread:mount`
-
-#### Enable thread
-
-It is possible to enable a specific thread using the keyword `enable`:
-
-`http://localhost:19999/api/v1/function?function=ebpf_thread%20enable:mount`
-
-this will run thread `mount` during 300 seconds (5 minutes). You can specify a specific period by appending the period
-after the thread name:
-
-`http://localhost:19999/api/v1/function?function=ebpf_thread%20enable:mount:600`
-
-in this example thread `mount` will run during 600 seconds (10 minutes).
-
-#### Disable thread
-
-It is also possible to stop any thread running using the keyword `disable`. For example, to disable `cachestat` you can
-request:
-
-`http://localhost:19999/api/v1/function?function=ebpf_thread%20disable:cachestat`
-
-#### Debugging threads
-
-You can verify the impact of threads on the host by running the
-[ebpf_thread_function.sh](https://github.com/netdata/netdata/blob/master/tests/ebpf/ebpf_thread_function.sh)
-script on your environment.
-
-You can check the results of having threads running on your environment in the Netdata monitoring section on your
-dashboard
-
-<img src="https://github.com/netdata/netdata/assets/49162938/91823573-114c-4c16-b634-cc46f7bb1bcf" alt="Threads running." />
-
-### ebpf_socket
-
-The eBPF plugin has a [function](https://github.com/netdata/netdata/blob/master/docs/cloud/netdata-functions.md) named
-`ebpf_socket` that shows the current status of open sockets on host.
-
-#### Families
-
-The plugin shows by default sockets for IPV4 and IPV6, but it is possible to select a specific family by passing the
-family as an argument:
-
-`http://localhost:19999/api/v1/function?function=ebpf_socket%20family:IPV4`
-
-#### Resolve
-
-The plugin resolves ports to service names by default. You can show the port number by disabling the name resolution:
-
-`http://localhost:19999/api/v1/function?function=ebpf_socket%20resolve:NO`
-
-#### CIDR
-
-The plugin shows connections for all possible destination IPs by default. You can limit the range by specifying the CIDR:
-
-`http://localhost:19999/api/v1/function?function=ebpf_socket%20cidr:192.168.1.0/24`
-
-#### PORT
-
-The plugin shows connections for all possible ports by default. You can limit the range by specifying a port or range
-of ports:
-
-`http://localhost:19999/api/v1/function?function=ebpf_socket%20port:1-1024`
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
deleted file mode 100644
index a8e621643..000000000
--- a/collectors/ebpf.plugin/ebpf.c
+++ /dev/null
@@ -1,4126 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <ifaddrs.h>
-
-#include "ebpf.h"
-#include "ebpf_socket.h"
-#include "ebpf_unittest.h"
-#include "libnetdata/required_dummies.h"
-
-/*****************************************************************
- *
- * GLOBAL VARIABLES
- *
- *****************************************************************/
-
-char *ebpf_plugin_dir = PLUGINS_DIR;
-static char *ebpf_configured_log_dir = LOG_DIR;
-
-char *ebpf_algorithms[] = {"absolute", "incremental"};
-struct config collector_config = { .first_section = NULL,
- .last_section = NULL,
- .mutex = NETDATA_MUTEX_INITIALIZER,
- .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
- .rwlock = AVL_LOCK_INITIALIZER } };
-
-int running_on_kernel = 0;
-int ebpf_nprocs;
-int isrh = 0;
-int main_thread_id = 0;
-int process_pid_fd = -1;
-static size_t global_iterations_counter = 1;
-bool publish_internal_metrics = true;
-
-pthread_mutex_t lock;
-pthread_mutex_t ebpf_exit_cleanup;
-pthread_mutex_t collect_data_mutex;
-
-struct netdata_static_thread cgroup_integration_thread = {
- .name = "EBPF CGROUP INT",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
-};
-
-ebpf_module_t ebpf_modules[] = {
- { .info = {.thread_name = "process",
- .config_name = "process",
- .thread_description = NETDATA_EBPF_MODULE_PROCESS_DESC},
- .functions = {.start_routine = ebpf_process_thread,
- .apps_routine = ebpf_process_create_apps_charts,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &process_config,
- .config_file = NETDATA_PROCESS_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10 |
- NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0 },
- { .info = {.thread_name = "socket",
- .config_name = "socket",
- .thread_description = NETDATA_EBPF_SOCKET_MODULE_DESC},
- .functions = {.start_routine = ebpf_socket_thread,
- .apps_routine = ebpf_socket_create_apps_charts,
- .fnct_routine = ebpf_socket_read_open_connections,
- .fcnt_name = EBPF_FUNCTION_SOCKET,
- .fcnt_desc = EBPF_PLUGIN_SOCKET_FUNCTION_DESCRIPTION,
- .fcnt_thread_chart_name = NULL,
- .fcnt_thread_lifetime_name = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &socket_config,
- .config_file = NETDATA_NETWORK_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = socket_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "cachestat", .config_name = "cachestat", .thread_description = NETDATA_EBPF_CACHESTAT_MODULE_DESC},
- .functions = {.start_routine = ebpf_cachestat_thread,
- .apps_routine = ebpf_cachestat_create_apps_charts,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = cachestat_maps, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &cachestat_config,
- .config_file = NETDATA_CACHESTAT_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18|
- NETDATA_V5_4 | NETDATA_V5_14 | NETDATA_V5_15 | NETDATA_V5_16,
- .load = EBPF_LOAD_LEGACY, .targets = cachestat_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "sync",
- .config_name = "sync",
- .thread_description = NETDATA_EBPF_SYNC_MODULE_DESC},
- .functions = {.start_routine = ebpf_sync_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .maps = NULL,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &sync_config,
- .config_file = NETDATA_SYNC_CONFIG_FILE,
- // All syscalls have the same kernels
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = sync_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "dc",
- .config_name = "dc",
- .thread_description = NETDATA_EBPF_DC_MODULE_DESC},
- .functions = {.start_routine = ebpf_dcstat_thread,
- .apps_routine = ebpf_dcstat_create_apps_charts,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = dcstat_maps,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &dcstat_config,
- .config_file = NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = dc_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "swap", .config_name = "swap", .thread_description = NETDATA_EBPF_SWAP_MODULE_DESC},
- .functions = {.start_routine = ebpf_swap_thread,
- .apps_routine = ebpf_swap_create_apps_charts,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &swap_config,
- .config_file = NETDATA_DIRECTORY_SWAP_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = swap_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "vfs",
- .config_name = "vfs",
- .thread_description = NETDATA_EBPF_VFS_MODULE_DESC},
- .functions = {.start_routine = ebpf_vfs_thread,
- .apps_routine = ebpf_vfs_create_apps_charts,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &vfs_config,
- .config_file = NETDATA_DIRECTORY_VFS_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = vfs_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "filesystem", .config_name = "filesystem", .thread_description = NETDATA_EBPF_FS_MODULE_DESC},
- .functions = {.start_routine = ebpf_filesystem_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fs_config,
- .config_file = NETDATA_FILESYSTEM_CONFIG_FILE,
- //We are setting kernels as zero, because we load eBPF programs according the kernel running.
- .kernels = 0, .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "disk",
- .config_name = "disk",
- .thread_description = NETDATA_EBPF_DISK_MODULE_DESC},
- .functions = {.start_routine = ebpf_disk_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &disk_config,
- .config_file = NETDATA_DISK_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "mount",
- .config_name = "mount",
- .thread_description = NETDATA_EBPF_MOUNT_MODULE_DESC},
- .functions = {.start_routine = ebpf_mount_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mount_config,
- .config_file = NETDATA_MOUNT_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = mount_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = { .thread_name = "fd",
- .config_name = "fd",
- .thread_description = NETDATA_EBPF_FD_MODULE_DESC},
- .functions = {.start_routine = ebpf_fd_thread,
- .apps_routine = ebpf_fd_create_apps_charts,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fd_config,
- .config_file = NETDATA_FD_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_11 |
- NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = fd_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = { .thread_name = "hardirq",
- .config_name = "hardirq",
- .thread_description = NETDATA_EBPF_HARDIRQ_MODULE_DESC},
- .functions = {.start_routine = ebpf_hardirq_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &hardirq_config,
- .config_file = NETDATA_HARDIRQ_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = { .thread_name = "softirq",
- .config_name = "softirq",
- .thread_description = NETDATA_EBPF_SOFTIRQ_MODULE_DESC},
- .functions = {.start_routine = ebpf_softirq_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL },
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &softirq_config,
- .config_file = NETDATA_SOFTIRQ_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "oomkill",
- .config_name = "oomkill",
- .thread_description = NETDATA_EBPF_OOMKILL_MODULE_DESC},
- .functions = {.start_routine = ebpf_oomkill_thread,
- .apps_routine = ebpf_oomkill_create_apps_charts,
- .fnct_routine = NULL},.enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &oomkill_config,
- .config_file = NETDATA_OOMKILL_CONFIG_FILE,
- .kernels = NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = "shm",
- .config_name = "shm",
- .thread_description = NETDATA_EBPF_SHM_MODULE_DESC},
- .functions = {.start_routine = ebpf_shm_thread,
- .apps_routine = ebpf_shm_create_apps_charts,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &shm_config,
- .config_file = NETDATA_DIRECTORY_SHM_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = shm_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = { .thread_name = "mdflush",
- .config_name = "mdflush",
- .thread_description = NETDATA_EBPF_MD_MODULE_DESC},
- .functions = {.start_routine = ebpf_mdflush_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mdflush_config,
- .config_file = NETDATA_DIRECTORY_MDFLUSH_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = mdflush_targets, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = { .thread_name = "functions",
- .config_name = "functions",
- .thread_description = NETDATA_EBPF_FUNCTIONS_MODULE_DESC},
- .functions = {.start_routine = ebpf_function_thread,
- .apps_routine = NULL,
- .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_RUNNING,
- .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
- .apps_level = NETDATA_APPS_NOT_SET, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
- .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = NULL,
- .config_file = NETDATA_DIRECTORY_FUNCTIONS_CONFIG_FILE,
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_14,
- .load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
- .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0},
- { .info = {.thread_name = NULL, .config_name = NULL},
- .functions = {.start_routine = NULL, .apps_routine = NULL, .fnct_routine = NULL},
- .enabled = NETDATA_THREAD_EBPF_NOT_RUNNING, .update_every = EBPF_DEFAULT_UPDATE_EVERY,
- .global_charts = 0, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO, .apps_level = NETDATA_APPS_NOT_SET,
- .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .maps = NULL,
- .pid_map_size = 0, .names = NULL, .cfg = NULL, .kernels = 0, .load = EBPF_LOAD_LEGACY,
- .targets = NULL, .probe_links = NULL, .objects = NULL, .thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES},
-};
-
-struct netdata_static_thread ebpf_threads[] = {
- {
- .name = "EBPF PROCESS",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF SOCKET",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF CACHESTAT",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF SYNC",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF DCSTAT",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF SWAP",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF VFS",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF FILESYSTEM",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF DISK",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF MOUNT",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF FD",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF HARDIRQ",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF SOFTIRQ",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF OOMKILL",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF SHM",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF MDFLUSH",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 1,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = "EBPF FUNCTIONS",
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
-#ifdef NETDATA_DEV_MODE
- .enabled = 1,
-#else
- .enabled = 0,
-#endif
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
- {
- .name = NULL,
- .config_section = NULL,
- .config_name = NULL,
- .env_name = NULL,
- .enabled = 0,
- .thread = NULL,
- .init_routine = NULL,
- .start_routine = NULL
- },
-};
-
-ebpf_filesystem_partitions_t localfs[] =
- {{.filesystem = "ext4",
- .optional_filesystem = NULL,
- .family = "ext4",
- .objects = NULL,
- .probe_links = NULL,
- .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
- .enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = NULL, .addr = 0},
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
- .fs_maps = NULL,
- .fs_obj = NULL,
- .functions = { "ext4_file_read_iter",
- "ext4_file_write_iter",
- "ext4_file_open",
- "ext4_sync_file",
- NULL }},
- {.filesystem = "xfs",
- .optional_filesystem = NULL,
- .family = "xfs",
- .objects = NULL,
- .probe_links = NULL,
- .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
- .enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = NULL, .addr = 0},
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
- .fs_maps = NULL,
- .fs_obj = NULL,
- .functions = { "xfs_file_read_iter",
- "xfs_file_write_iter",
- "xfs_file_open",
- "xfs_file_fsync",
- NULL }},
- {.filesystem = "nfs",
- .optional_filesystem = "nfs4",
- .family = "nfs",
- .objects = NULL,
- .probe_links = NULL,
- .flags = NETDATA_FILESYSTEM_ATTR_CHARTS,
- .enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = NULL, .addr = 0},
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
- .fs_maps = NULL,
- .fs_obj = NULL,
- .functions = { "nfs_file_read",
- "nfs_file_write",
- "nfs_open",
- "nfs_getattr",
- NULL }}, // // "nfs4_file_open" - not present on all kernels
- {.filesystem = "zfs",
- .optional_filesystem = NULL,
- .family = "zfs",
- .objects = NULL,
- .probe_links = NULL,
- .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
- .enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = NULL, .addr = 0},
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4,
- .fs_maps = NULL,
- .fs_obj = NULL,
- .functions = { "zpl_iter_read",
- "zpl_iter_write",
- "zpl_open",
- "zpl_fsync",
- NULL }},
- {.filesystem = "btrfs",
- .optional_filesystem = NULL,
- .family = "btrfs",
- .objects = NULL,
- .probe_links = NULL,
- .flags = NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE,
- .enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = "btrfs_file_operations", .addr = 0},
- .kernels = NETDATA_V3_10 | NETDATA_V4_14 | NETDATA_V4_16 | NETDATA_V4_18 | NETDATA_V5_4 | NETDATA_V5_10,
- .fs_maps = NULL,
- .fs_obj = NULL,
- .functions = { "btrfs_file_read_iter",
- "btrfs_file_write_iter",
- "btrfs_file_open",
- "btrfs_sync_file",
- NULL }},
- {.filesystem = NULL,
- .optional_filesystem = NULL,
- .family = NULL,
- .objects = NULL,
- .probe_links = NULL,
- .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
- .enabled = CONFIG_BOOLEAN_YES,
- .addresses = {.function = NULL, .addr = 0},
- .kernels = 0, .fs_maps = NULL, .fs_obj = NULL}};
-
-ebpf_sync_syscalls_t local_syscalls[] = {
- {.syscall = NETDATA_SYSCALLS_SYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
-#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL,
-#endif
- .sync_maps = NULL
- },
- {.syscall = NETDATA_SYSCALLS_SYNCFS, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
-#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL,
-#endif
- .sync_maps = NULL
- },
- {.syscall = NETDATA_SYSCALLS_MSYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
-#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL,
-#endif
- .sync_maps = NULL
- },
- {.syscall = NETDATA_SYSCALLS_FSYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
-#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL,
-#endif
- .sync_maps = NULL
- },
- {.syscall = NETDATA_SYSCALLS_FDATASYNC, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
-#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL,
-#endif
- .sync_maps = NULL
- },
- {.syscall = NETDATA_SYSCALLS_SYNC_FILE_RANGE, .enabled = CONFIG_BOOLEAN_YES, .objects = NULL, .probe_links = NULL,
-#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL,
-#endif
- .sync_maps = NULL
- },
- {.syscall = NULL, .enabled = CONFIG_BOOLEAN_NO, .objects = NULL, .probe_links = NULL,
-#ifdef LIBBPF_MAJOR_VERSION
- .sync_obj = NULL,
-#endif
- .sync_maps = NULL
- }
-};
-
-
-// Link with cgroup.plugin
-netdata_ebpf_cgroup_shm_t shm_ebpf_cgroup = {NULL, NULL};
-int shm_fd_ebpf_cgroup = -1;
-sem_t *shm_sem_ebpf_cgroup = SEM_FAILED;
-pthread_mutex_t mutex_cgroup_shm;
-
-//Network viewer
-ebpf_network_viewer_options_t network_viewer_opt;
-
-// Statistic
-ebpf_plugin_stats_t plugin_statistics = {.core = 0, .legacy = 0, .running = 0, .threads = 0, .tracepoints = 0,
- .probes = 0, .retprobes = 0, .trampolines = 0, .memlock_kern = 0,
- .hash_tables = 0};
-netdata_ebpf_judy_pid_t ebpf_judy_pid = {.pid_table = NULL, .index = {.JudyLArray = NULL}};
-bool ebpf_plugin_exit = false;
-
-#ifdef LIBBPF_MAJOR_VERSION
-struct btf *default_btf = NULL;
-struct cachestat_bpf *cachestat_bpf_obj = NULL;
-struct dc_bpf *dc_bpf_obj = NULL;
-struct disk_bpf *disk_bpf_obj = NULL;
-struct fd_bpf *fd_bpf_obj = NULL;
-struct hardirq_bpf *hardirq_bpf_obj = NULL;
-struct mdflush_bpf *mdflush_bpf_obj = NULL;
-struct mount_bpf *mount_bpf_obj = NULL;
-struct shm_bpf *shm_bpf_obj = NULL;
-struct socket_bpf *socket_bpf_obj = NULL;
-struct swap_bpf *bpf_obj = NULL;
-struct vfs_bpf *vfs_bpf_obj = NULL;
-#else
-void *default_btf = NULL;
-#endif
-char *btf_path = NULL;
-
-/*****************************************************************
- *
- * FUNCTIONS USED TO MANIPULATE JUDY ARRAY
- *
- *****************************************************************/
-
-/**
- * Hashtable insert unsafe
- *
- * Find or create a value associated to the index
- *
- * @return The lsocket = 0 when new item added to the array otherwise the existing item value is returned in *lsocket
- * we return a pointer to a pointer, so that the caller can put anything needed at the value of the index.
- * The pointer to pointer we return has to be used before any other operation that may change the index (insert/delete).
- *
- */
-void **ebpf_judy_insert_unsafe(PPvoid_t arr, Word_t key)
-{
- JError_t J_Error;
- Pvoid_t *idx = JudyLIns(arr, key, &J_Error);
- if (unlikely(idx == PJERR)) {
- netdata_log_error("Cannot add PID to JudyL, JU_ERRNO_* == %u, ID == %d",
- JU_ERRNO(&J_Error), JU_ERRID(&J_Error));
- }
-
- return idx;
-}
-
-/**
- * Get PID from judy
- *
- * Get a pointer for the `pid` from judy_array;
- *
- * @param judy_array a judy array where PID is the primary key
- * @param pid pid stored.
- */
-netdata_ebpf_judy_pid_stats_t *ebpf_get_pid_from_judy_unsafe(PPvoid_t judy_array, uint32_t pid)
-{
- netdata_ebpf_judy_pid_stats_t **pid_pptr =
- (netdata_ebpf_judy_pid_stats_t **)ebpf_judy_insert_unsafe(judy_array, pid);
- netdata_ebpf_judy_pid_stats_t *pid_ptr = *pid_pptr;
- if (likely(*pid_pptr == NULL)) {
- // a new PID added to the index
- *pid_pptr = aral_mallocz(ebpf_judy_pid.pid_table);
-
- pid_ptr = *pid_pptr;
-
- pid_ptr->cmdline = NULL;
- pid_ptr->socket_stats.JudyLArray = NULL;
- rw_spinlock_init(&pid_ptr->socket_stats.rw_spinlock);
- }
-
- return pid_ptr;
-}
-
-/*****************************************************************
- *
- * FUNCTIONS USED TO ALLOCATE APPS/CGROUP MEMORIES (ARAL)
- *
- *****************************************************************/
-
-/**
- * Allocate PID ARAL
- *
- * Allocate memory using ARAL functions to speed up processing.
- *
- * @param name the internal name used for allocated region.
- * @param size size of each element inside allocated space
- *
- * @return It returns the address on success and NULL otherwise.
- */
-ARAL *ebpf_allocate_pid_aral(char *name, size_t size)
-{
- static size_t max_elements = NETDATA_EBPF_ALLOC_MAX_PID;
- if (max_elements < NETDATA_EBPF_ALLOC_MIN_ELEMENTS) {
- netdata_log_error("Number of elements given is too small, adjusting it for %d", NETDATA_EBPF_ALLOC_MIN_ELEMENTS);
- max_elements = NETDATA_EBPF_ALLOC_MIN_ELEMENTS;
- }
-
- return aral_create(name, size,
- 0, max_elements,
- NULL, NULL, NULL, false, false);
-}
-
-/*****************************************************************
- *
- * FUNCTIONS USED TO CLEAN MEMORY AND OPERATE SYSTEM FILES
- *
- *****************************************************************/
-
-/**
- * Wait to avoid possible coredumps while process is closing.
- */
-static inline void ebpf_check_before2go()
-{
- int i = EBPF_OPTION_ALL_CHARTS;
- usec_t max = USEC_PER_SEC, step = 200000;
- while (i && max) {
- max -= step;
- sleep_usec(step);
- i = 0;
- int j;
- pthread_mutex_lock(&ebpf_exit_cleanup);
- for (j = 0; ebpf_modules[j].info.thread_name != NULL; j++) {
- if (ebpf_modules[j].enabled < NETDATA_THREAD_EBPF_STOPPING)
- i++;
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-
- if (i) {
- netdata_log_error("eBPF cannot unload all threads on time, but it will go away");
- }
-}
-
-/**
- * Close the collector gracefully
- */
-static void ebpf_exit()
-{
-#ifdef LIBBPF_MAJOR_VERSION
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (default_btf) {
- btf__free(default_btf);
- default_btf = NULL;
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-#endif
-
- char filename[FILENAME_MAX + 1];
- ebpf_pid_file(filename, FILENAME_MAX);
- if (unlink(filename))
- netdata_log_error("Cannot remove PID file %s", filename);
-
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_error("Good bye world! I was PID %d", main_thread_id);
-#endif
- fprintf(stdout, "EXIT\n");
- fflush(stdout);
-
- ebpf_check_before2go();
- pthread_mutex_lock(&mutex_cgroup_shm);
- if (shm_ebpf_cgroup.header) {
- ebpf_unmap_cgroup_shared_memory();
- shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
- }
- pthread_mutex_unlock(&mutex_cgroup_shm);
-
- exit(0);
-}
-
-/**
- * Unload loegacy code
- *
- * @param objects objects loaded from eBPF programs
- * @param probe_links links from loader
- */
-void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link **probe_links)
-{
- if (!probe_links || !objects)
- return;
-
- struct bpf_program *prog;
- size_t j = 0 ;
- bpf_object__for_each_program(prog, objects) {
- bpf_link__destroy(probe_links[j]);
- j++;
- }
- freez(probe_links);
- if (objects)
- bpf_object__close(objects);
-}
-
-/**
- * Unload Unique maps
- *
- * This function unload all BPF maps from threads using one unique BPF object.
- */
-static void ebpf_unload_unique_maps()
-{
- int i;
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- // These threads are cleaned with other functions
- if (i != EBPF_MODULE_SOCKET_IDX)
- continue;
-
- if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_STOPPED) {
- if (ebpf_modules[i].enabled != NETDATA_THREAD_EBPF_NOT_RUNNING)
- netdata_log_error("Cannot unload maps for thread %s, because it is not stopped.",
- ebpf_modules[i].info.thread_name);
-
- continue;
- }
-
- if (ebpf_modules[i].load == EBPF_LOAD_LEGACY) {
- ebpf_unload_legacy_code(ebpf_modules[i].objects, ebpf_modules[i].probe_links);
- continue;
- }
-
-#ifdef LIBBPF_MAJOR_VERSION
- if (socket_bpf_obj)
- socket_bpf__destroy(socket_bpf_obj);
-#endif
- }
-}
-
-/**
- * Unload filesystem maps
- *
- * This function unload all BPF maps from filesystem thread.
- */
-static void ebpf_unload_filesystems()
-{
- if (ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].enabled == NETDATA_THREAD_EBPF_NOT_RUNNING ||
- ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].enabled < NETDATA_THREAD_EBPF_STOPPING ||
- ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].load != EBPF_LOAD_LEGACY)
- return;
-
- int i;
- for (i = 0; localfs[i].filesystem != NULL; i++) {
- if (!localfs[i].objects)
- continue;
-
- ebpf_unload_legacy_code(localfs[i].objects, localfs[i].probe_links);
- }
-}
-
-/**
- * Unload sync maps
- *
- * This function unload all BPF maps from sync thread.
- */
-static void ebpf_unload_sync()
-{
- if (ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled == NETDATA_THREAD_EBPF_NOT_RUNNING ||
- ebpf_modules[EBPF_MODULE_SYNC_IDX].enabled < NETDATA_THREAD_EBPF_STOPPING)
- return;
-
- int i;
- for (i = 0; local_syscalls[i].syscall != NULL; i++) {
- if (!local_syscalls[i].enabled)
- continue;
-
-#ifdef LIBBPF_MAJOR_VERSION
- if (local_syscalls[i].sync_obj) {
- sync_bpf__destroy(local_syscalls[i].sync_obj);
- continue;
- }
-#endif
- ebpf_unload_legacy_code(local_syscalls[i].objects, local_syscalls[i].probe_links);
- }
-}
-
-/**
- * Close the collector gracefully
- *
- * @param sig is the signal number used to close the collector
- */
-void ebpf_stop_threads(int sig)
-{
- UNUSED(sig);
- static int only_one = 0;
-
- // Child thread should be closed by itself.
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (main_thread_id != gettid() || only_one) {
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
- only_one = 1;
- int i;
- for (i = 0; ebpf_modules[i].info.thread_name != NULL; i++) {
- if (ebpf_modules[i].enabled < NETDATA_THREAD_EBPF_STOPPING) {
- netdata_thread_cancel(*ebpf_modules[i].thread->thread);
-#ifdef NETDATA_DEV_MODE
- netdata_log_info("Sending cancel for thread %s", ebpf_modules[i].info.thread_name);
-#endif
- }
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- pthread_mutex_lock(&mutex_cgroup_shm);
- netdata_thread_cancel(*cgroup_integration_thread.thread);
-#ifdef NETDATA_DEV_MODE
- netdata_log_info("Sending cancel for thread %s", cgroup_integration_thread.name);
-#endif
- pthread_mutex_unlock(&mutex_cgroup_shm);
-
- ebpf_plugin_exit = true;
-
- ebpf_check_before2go();
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- ebpf_unload_unique_maps();
- ebpf_unload_filesystems();
- ebpf_unload_sync();
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- ebpf_exit();
-}
-
-/*****************************************************************
- *
- * FUNCTIONS TO CREATE CHARTS
- *
- *****************************************************************/
-
-/**
- * Create apps for module
- *
- * Create apps chart that will be used with specific module
- *
- * @param em the module main structure.
- * @param root a pointer for the targets.
- */
-static inline void ebpf_create_apps_for_module(ebpf_module_t *em, struct ebpf_target *root) {
- if (em->enabled < NETDATA_THREAD_EBPF_STOPPING && em->apps_charts && em->functions.apps_routine)
- em->functions.apps_routine(em, root);
-}
-
-/**
- * Create apps charts
- *
- * Call ebpf_create_chart to create the charts on apps submenu.
- *
- * @param root a pointer for the targets.
- */
-static void ebpf_create_apps_charts(struct ebpf_target *root)
-{
- if (unlikely(!ebpf_all_pids))
- return;
-
- struct ebpf_target *w;
- int newly_added = 0;
-
- for (w = root; w; w = w->next) {
- if (w->target)
- continue;
-
- if (unlikely(w->processes && (debug_enabled || w->debug_enabled))) {
- struct ebpf_pid_on_target *pid_on_target;
-
- fprintf(
- stderr, "ebpf.plugin: target '%s' has aggregated %u process%s:", w->name, w->processes,
- (w->processes == 1) ? "" : "es");
-
- for (pid_on_target = w->root_pid; pid_on_target; pid_on_target = pid_on_target->next) {
- fprintf(stderr, " %d", pid_on_target->pid);
- }
-
- fputc('\n', stderr);
- }
-
- if (!w->exposed && w->processes) {
- newly_added++;
- w->exposed = 1;
- if (debug_enabled || w->debug_enabled)
- debug_log_int("%s just added - regenerating charts.", w->name);
- }
- }
-
- int i;
- if (!newly_added) {
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX ; i++) {
- ebpf_module_t *current = &ebpf_modules[i];
- if (current->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
- continue;
-
- ebpf_create_apps_for_module(current, root);
- }
- return;
- }
-
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX ; i++) {
- ebpf_module_t *current = &ebpf_modules[i];
- ebpf_create_apps_for_module(current, root);
- }
-}
-
-/**
- * Get a value from a structure.
- *
- * @param basis it is the first address of the structure
- * @param offset it is the offset of the data you want to access.
- * @return
- */
-collected_number get_value_from_structure(char *basis, size_t offset)
-{
- collected_number *value = (collected_number *)(basis + offset);
-
- collected_number ret = (collected_number)llabs(*value);
- // this reset is necessary to avoid keep a constant value while processing is not executing a task
- *value = 0;
-
- return ret;
-}
-
-/**
- * Write set command on standard output
- *
- * @param dim the dimension name
- * @param value the value for the dimension
- */
-void write_chart_dimension(char *dim, long long value)
-{
- printf("SET %s = %lld\n", dim, value);
-}
-
-/**
- * Call the necessary functions to create a chart.
- *
- * @param name the chart name
- * @param family the chart family
- * @param move the pointer with the values that will be published
- * @param end the number of values that will be written on standard output
- *
- * @return It returns a variable that maps the charts that did not have zero values.
- */
-void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end)
-{
- ebpf_write_begin_chart(family, name, "");
-
- uint32_t i = 0;
- while (move && i < end) {
- write_chart_dimension(move->name, move->ncall);
-
- move = move->next;
- i++;
- }
-
- ebpf_write_end_chart();
-}
-
-/**
- * Call the necessary functions to create a chart.
- *
- * @param name the chart name
- * @param family the chart family
- * @param move the pointer with the values that will be published
- * @param end the number of values that will be written on standard output
- */
-void write_err_chart(char *name, char *family, netdata_publish_syscall_t *move, int end)
-{
- ebpf_write_begin_chart(family, name, "");
-
- int i = 0;
- while (move && i < end) {
- write_chart_dimension(move->name, move->nerr);
-
- move = move->next;
- i++;
- }
-
- ebpf_write_end_chart();
-}
-
-/**
- * Write charts
- *
- * Write the current information to publish the charts.
- *
- * @param family chart family
- * @param chart chart id
- * @param dim dimension name
- * @param v1 value.
- */
-void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long long v1)
-{
- ebpf_write_begin_chart(family, chart, "");
-
- write_chart_dimension(dim, v1);
-
- ebpf_write_end_chart();
-}
-
-/**
- * Call the necessary functions to create a chart.
- *
- * @param chart the chart name
- * @param family the chart family
- * @param dwrite the dimension name
- * @param vwrite the value for previous dimension
- * @param dread the dimension name
- * @param vread the value for previous dimension
- *
- * @return It returns a variable that maps the charts that did not have zero values.
- */
-void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, char *dread, long long vread)
-{
- ebpf_write_begin_chart(family, chart, "");
-
- write_chart_dimension(dwrite, vwrite);
- write_chart_dimension(dread, vread);
-
- ebpf_write_end_chart();
-}
-
-/**
- * Write chart cmd on standard output
- *
- * @param type chart type
- * @param id chart id (the apps group name).
- * @param suffix suffix to differentiate charts
- * @param title chart title
- * @param units units label
- * @param family group name used to attach the chart on dashboard
- * @param charttype chart type
- * @param context chart context
- * @param order chart order
- * @param update_every update interval used by plugin
- * @param module chart module name, this is the eBPF thread.
- */
-void ebpf_write_chart_cmd(char *type, char *id, char *suffix, char *title, char *units, char *family,
- char *charttype, char *context, int order, int update_every, char *module)
-{
- printf("CHART %s.%s%s '' '%s' '%s' '%s' '%s' '%s' %d %d '' 'ebpf.plugin' '%s'\n",
- type,
- id,
- suffix,
- title,
- units,
- (family)?family:"",
- (context)?context:"",
- (charttype)?charttype:"",
- order,
- update_every,
- module);
-}
-
-/**
- * Write chart cmd on standard output
- *
- * @param type chart type
- * @param id chart id
- * @param suffix add suffix to obsolete charts.
- * @param title chart title
- * @param units units label
- * @param family group name used to attach the chart on dashboard
- * @param charttype chart type
- * @param context chart context
- * @param order chart order
- * @param update_every value to overwrite the update frequency set by the server.
- */
-void ebpf_write_chart_obsolete(char *type, char *id, char *suffix, char *title, char *units, char *family,
- char *charttype, char *context, int order, int update_every)
-{
- printf("CHART %s.%s%s '' '%s' '%s' '%s' '%s' '%s' %d %d 'obsolete'\n",
- type,
- id,
- suffix,
- title,
- units,
- (family)?family:"",
- (context)?context:"",
- (charttype)?charttype:"",
- order,
- update_every);
-}
-
-/**
- * Write the dimension command on standard output
- *
- * @param name the dimension name
- * @param id the dimension id
- * @param algo the dimension algorithm
- */
-void ebpf_write_global_dimension(char *name, char *id, char *algorithm)
-{
- printf("DIMENSION %s %s %s 1 1\n", name, id, algorithm);
-}
-
-/**
- * Call ebpf_write_global_dimension to create the dimensions for a specific chart
- *
- * @param ptr a pointer to a structure of the type netdata_publish_syscall_t
- * @param end the number of dimensions for the structure ptr
- */
-void ebpf_create_global_dimension(void *ptr, int end)
-{
- netdata_publish_syscall_t *move = ptr;
-
- int i = 0;
- while (move && i < end) {
- ebpf_write_global_dimension(move->name, move->dimension, move->algorithm);
-
- move = move->next;
- i++;
- }
-}
-
-/**
- * Call write_chart_cmd to create the charts
- *
- * @param type chart type
- * @param id chart id
- * @param title chart title
- * @param units axis label
- * @param family group name used to attach the chart on dashboard
- * @param context chart context
- * @param charttype chart type
- * @param order order number of the specified chart
- * @param ncd a pointer to a function called to create dimensions
- * @param move a pointer for a structure that has the dimensions
- * @param end number of dimensions for the chart created
- * @param update_every update interval used with chart.
- * @param module chart module name, this is the eBPF thread.
- */
-void ebpf_create_chart(char *type,
- char *id,
- char *title,
- char *units,
- char *family,
- char *context,
- char *charttype,
- int order,
- void (*ncd)(void *, int),
- void *move,
- int end,
- int update_every,
- char *module)
-{
- ebpf_write_chart_cmd(type, id, "", title, units, family, charttype, context, order, update_every, module);
-
- if (ncd) {
- ncd(move, end);
- }
-}
-
-/**
- * Call the necessary functions to create a name.
- *
- * @param family family name
- * @param name chart name
- * @param hist0 histogram values
- * @param dimensions dimension values.
- * @param end number of bins that will be sent to Netdata.
- *
- * @return It returns a variable that maps the charts that did not have zero values.
- */
-void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end)
-{
- ebpf_write_begin_chart(family, name, "");
-
- uint32_t i;
- for (i = 0; i < end; i++) {
- write_chart_dimension(dimensions[i], (long long) hist[i]);
- }
-
- ebpf_write_end_chart();
-
- fflush(stdout);
-}
-
-/**
- * ARAL Charts
- *
- * Add chart to monitor ARAL usage
- * Caller must call this function with mutex locked.
- *
- * @param name the name used to create aral
- * @param em a pointer to the structure with the default values.
- */
-int ebpf_statistic_create_aral_chart(char *name, ebpf_module_t *em)
-{
- static int priority = NETATA_EBPF_ORDER_STAT_ARAL_BEGIN;
- char *mem = { NETDATA_EBPF_STAT_DIMENSION_MEMORY };
- char *aral = { NETDATA_EBPF_STAT_DIMENSION_ARAL };
-
- snprintfz(em->memory_usage, NETDATA_EBPF_CHART_MEM_LENGTH -1, "aral_%s_size", name);
- snprintfz(em->memory_allocations, NETDATA_EBPF_CHART_MEM_LENGTH -1, "aral_%s_alloc", name);
-
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- em->memory_usage,
- "",
- "Bytes allocated for ARAL.",
- "bytes",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "netdata.ebpf_aral_stat_size",
- priority++,
- em->update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_write_global_dimension(mem,
- mem,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- em->memory_allocations,
- "",
- "Calls to allocate memory.",
- "calls",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "netdata.ebpf_aral_stat_alloc",
- priority++,
- em->update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_write_global_dimension(aral,
- aral,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- return priority - 2;
-}
-
-/**
- * ARAL Charts
- *
- * Add chart to monitor ARAL usage
- * Caller must call this function with mutex locked.
- *
- * @param em a pointer to the structure with the default values.
- * @param prio the initial priority used to disable charts.
- */
-void ebpf_statistic_obsolete_aral_chart(ebpf_module_t *em, int prio)
-{
- ebpf_write_chart_obsolete(NETDATA_MONITORING_FAMILY,
- em->memory_allocations,
- "",
- "Calls to allocate memory.",
- "calls",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "netdata.ebpf_aral_stat_alloc",
- prio++,
- em->update_every);
-
- ebpf_write_chart_obsolete(NETDATA_MONITORING_FAMILY,
- em->memory_allocations,
- "",
- "Calls to allocate memory.",
- "calls",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- "netdata.ebpf_aral_stat_alloc",
- prio++,
- em->update_every);
-}
-
-/**
- * Send data from aral chart
- *
- * Send data for eBPF plugin
- *
- * @param memory a pointer to the allocated address
- * @param em a pointer to the structure with the default values.
- */
-void ebpf_send_data_aral_chart(ARAL *memory, ebpf_module_t *em)
-{
- char *mem = { NETDATA_EBPF_STAT_DIMENSION_MEMORY };
- char *aral = { NETDATA_EBPF_STAT_DIMENSION_ARAL };
-
- struct aral_statistics *stats = aral_statistics(memory);
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, em->memory_usage, "");
- write_chart_dimension(mem, (long long)stats->structures.allocated_bytes);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, em->memory_allocations, "");
- write_chart_dimension(aral, (long long)stats->structures.allocations);
- ebpf_write_end_chart();
-}
-
-/*****************************************************************
- *
- * FUNCTIONS TO READ GLOBAL HASH TABLES
- *
- *****************************************************************/
-
-/**
- * Read Global Table Stats
- *
- * Read data from specified table (map_fd) using array allocated inside thread(values) and storing
- * them in stats vector starting from the first position.
- *
- * For PID tables is recommended to use a function to parse the specific data.
- *
- * @param stats vector used to store data
- * @param values helper to read data from hash tables.
- * @param map_fd table that has data
- * @param maps_per_core Is necessary to read data from all cores?
- * @param begin initial value to query hash table
- * @param end last value that will not be used.
- */
-void ebpf_read_global_table_stats(netdata_idx_t *stats,
- netdata_idx_t *values,
- int map_fd,
- int maps_per_core,
- uint32_t begin,
- uint32_t end)
-{
- uint32_t idx, order;
-
- for (idx = begin, order = 0; idx < end; idx++, order++) {
- if (!bpf_map_lookup_elem(map_fd, &idx, values)) {
- int i;
- int before = (maps_per_core) ? ebpf_nprocs: 1;
- netdata_idx_t total = 0;
- for (i = 0; i < before; i++)
- total += values[i];
-
- stats[order] = total;
- }
- }
-}
-
-/*****************************************************************
- *
- * FUNCTIONS USED WITH SOCKET
- *
- *****************************************************************/
-
-/**
- * Netmask
- *
- * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h)
- *
- * @param prefix create the netmask based in the CIDR value.
- *
- * @return
- */
-static inline in_addr_t ebpf_netmask(int prefix) {
-
- if (prefix == 0)
- return (~((in_addr_t) - 1));
- else
- return (in_addr_t)(~((1 << (32 - prefix)) - 1));
-
-}
-
-/**
- * Broadcast
- *
- * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h)
- *
- * @param addr is the ip address
- * @param prefix is the CIDR value.
- *
- * @return It returns the last address of the range
- */
-static inline in_addr_t ebpf_broadcast(in_addr_t addr, int prefix)
-{
- return (addr | ~ebpf_netmask(prefix));
-}
-
-/**
- * Network
- *
- * Copied from iprange (https://github.com/firehol/iprange/blob/master/iprange.h)
- *
- * @param addr is the ip address
- * @param prefix is the CIDR value.
- *
- * @return It returns the first address of the range.
- */
-static inline in_addr_t ebpf_ipv4_network(in_addr_t addr, int prefix)
-{
- return (addr & ebpf_netmask(prefix));
-}
-
-/**
- * Calculate ipv6 first address
- *
- * @param out the address to store the first address.
- * @param in the address used to do the math.
- * @param prefix number of bits used to calculate the address
- */
-static void get_ipv6_first_addr(union netdata_ip_t *out, union netdata_ip_t *in, uint64_t prefix)
-{
- uint64_t mask,tmp;
- uint64_t ret[2];
-
- memcpy(ret, in->addr32, sizeof(union netdata_ip_t));
-
- if (prefix == 128) {
- memcpy(out->addr32, in->addr32, sizeof(union netdata_ip_t));
- return;
- } else if (!prefix) {
- ret[0] = ret[1] = 0;
- memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
- return;
- } else if (prefix <= 64) {
- ret[1] = 0ULL;
-
- tmp = be64toh(ret[0]);
- mask = 0xFFFFFFFFFFFFFFFFULL << (64 - prefix);
- tmp &= mask;
- ret[0] = htobe64(tmp);
- } else {
- mask = 0xFFFFFFFFFFFFFFFFULL << (128 - prefix);
- tmp = be64toh(ret[1]);
- tmp &= mask;
- ret[1] = htobe64(tmp);
- }
-
- memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
-}
-
-/**
- * Get IPV6 Last Address
- *
- * @param out the address to store the last address.
- * @param in the address used to do the math.
- * @param prefix number of bits used to calculate the address
- */
-static void get_ipv6_last_addr(union netdata_ip_t *out, union netdata_ip_t *in, uint64_t prefix)
-{
- uint64_t mask,tmp;
- uint64_t ret[2];
- memcpy(ret, in->addr32, sizeof(union netdata_ip_t));
-
- if (prefix == 128) {
- memcpy(out->addr32, in->addr32, sizeof(union netdata_ip_t));
- return;
- } else if (!prefix) {
- ret[0] = ret[1] = 0xFFFFFFFFFFFFFFFF;
- memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
- return;
- } else if (prefix <= 64) {
- ret[1] = 0xFFFFFFFFFFFFFFFFULL;
-
- tmp = be64toh(ret[0]);
- mask = 0xFFFFFFFFFFFFFFFFULL << (64 - prefix);
- tmp |= ~mask;
- ret[0] = htobe64(tmp);
- } else {
- mask = 0xFFFFFFFFFFFFFFFFULL << (128 - prefix);
- tmp = be64toh(ret[1]);
- tmp |= ~mask;
- ret[1] = htobe64(tmp);
- }
-
- memcpy(out->addr32, ret, sizeof(union netdata_ip_t));
-}
-
-/**
- * IP to network long
- *
- * @param dst the vector to store the result
- * @param ip the source ip given by our users.
- * @param domain the ip domain (IPV4 or IPV6)
- * @param source the original string
- *
- * @return it returns 0 on success and -1 otherwise.
- */
-static inline int ebpf_ip2nl(uint8_t *dst, char *ip, int domain, char *source)
-{
- if (inet_pton(domain, ip, dst) <= 0) {
- netdata_log_error("The address specified (%s) is invalid ", source);
- return -1;
- }
-
- return 0;
-}
-
-/**
- * Clean port Structure
- *
- * Clean the allocated list.
- *
- * @param clean the list that will be cleaned
- */
-void ebpf_clean_port_structure(ebpf_network_viewer_port_list_t **clean)
-{
- ebpf_network_viewer_port_list_t *move = *clean;
- while (move) {
- ebpf_network_viewer_port_list_t *next = move->next;
- freez(move->value);
- freez(move);
-
- move = next;
- }
- *clean = NULL;
-}
-
-/**
- * Clean IP structure
- *
- * Clean the allocated list.
- *
- * @param clean the list that will be cleaned
- */
-void ebpf_clean_ip_structure(ebpf_network_viewer_ip_list_t **clean)
-{
- ebpf_network_viewer_ip_list_t *move = *clean;
- while (move) {
- ebpf_network_viewer_ip_list_t *next = move->next;
- freez(move->value);
- freez(move);
-
- move = next;
- }
- *clean = NULL;
-}
-
-/**
- * Parse IP List
- *
- * Parse IP list and link it.
- *
- * @param out a pointer to store the link list
- * @param ip the value given as parameter
- */
-static void ebpf_parse_ip_list_unsafe(void **out, char *ip)
-{
- ebpf_network_viewer_ip_list_t **list = (ebpf_network_viewer_ip_list_t **)out;
-
- char *ipdup = strdupz(ip);
- union netdata_ip_t first = { };
- union netdata_ip_t last = { };
- char *is_ipv6;
- if (*ip == '*' && *(ip+1) == '\0') {
- memset(first.addr8, 0, sizeof(first.addr8));
- memset(last.addr8, 0xFF, sizeof(last.addr8));
-
- is_ipv6 = ip;
-
- ebpf_clean_ip_structure(list);
- goto storethisip;
- }
-
- char *end = ip;
- // Move while I cannot find a separator
- while (*end && *end != '/' && *end != '-') end++;
-
- // We will use only the classic IPV6 for while, but we could consider the base 85 in a near future
- // https://tools.ietf.org/html/rfc1924
- is_ipv6 = strchr(ip, ':');
-
- int select;
- if (*end && !is_ipv6) { // IPV4 range
- select = (*end == '/') ? 0 : 1;
- *end++ = '\0';
- if (*end == '!') {
- netdata_log_info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup);
- goto cleanipdup;
- }
-
- if (!select) { // CIDR
- select = ebpf_ip2nl(first.addr8, ip, AF_INET, ipdup);
- if (select)
- goto cleanipdup;
-
- select = (int) str2i(end);
- if (select < NETDATA_MINIMUM_IPV4_CIDR || select > NETDATA_MAXIMUM_IPV4_CIDR) {
- netdata_log_info("The specified CIDR %s is not valid, the IP %s will be ignored.", end, ip);
- goto cleanipdup;
- }
-
- last.addr32[0] = htonl(ebpf_broadcast(ntohl(first.addr32[0]), select));
- // This was added to remove
- // https://app.codacy.com/manual/netdata/netdata/pullRequest?prid=5810941&bid=19021977
- UNUSED(last.addr32[0]);
-
- uint32_t ipv4_test = htonl(ebpf_ipv4_network(ntohl(first.addr32[0]), select));
- if (first.addr32[0] != ipv4_test) {
- first.addr32[0] = ipv4_test;
- struct in_addr ipv4_convert;
- ipv4_convert.s_addr = ipv4_test;
- char ipv4_msg[INET_ADDRSTRLEN];
- if(inet_ntop(AF_INET, &ipv4_convert, ipv4_msg, INET_ADDRSTRLEN))
- netdata_log_info("The network value of CIDR %s was updated for %s .", ipdup, ipv4_msg);
- }
- } else { // Range
- select = ebpf_ip2nl(first.addr8, ip, AF_INET, ipdup);
- if (select)
- goto cleanipdup;
-
- select = ebpf_ip2nl(last.addr8, end, AF_INET, ipdup);
- if (select)
- goto cleanipdup;
- }
-
- if (htonl(first.addr32[0]) > htonl(last.addr32[0])) {
- netdata_log_info("The specified range %s is invalid, the second address is smallest than the first, it will be ignored.",
- ipdup);
- goto cleanipdup;
- }
- } else if (is_ipv6) { // IPV6
- if (!*end) { // Unique
- select = ebpf_ip2nl(first.addr8, ip, AF_INET6, ipdup);
- if (select)
- goto cleanipdup;
-
- memcpy(last.addr8, first.addr8, sizeof(first.addr8));
- } else if (*end == '-') {
- *end++ = 0x00;
- if (*end == '!') {
- netdata_log_info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup);
- goto cleanipdup;
- }
-
- select = ebpf_ip2nl(first.addr8, ip, AF_INET6, ipdup);
- if (select)
- goto cleanipdup;
-
- select = ebpf_ip2nl(last.addr8, end, AF_INET6, ipdup);
- if (select)
- goto cleanipdup;
- } else { // CIDR
- *end++ = 0x00;
- if (*end == '!') {
- netdata_log_info("The exclusion cannot be in the second part of the range %s, it will be ignored.", ipdup);
- goto cleanipdup;
- }
-
- select = str2i(end);
- if (select < 0 || select > 128) {
- netdata_log_info("The CIDR %s is not valid, the address %s will be ignored.", end, ip);
- goto cleanipdup;
- }
-
- uint64_t prefix = (uint64_t)select;
- select = ebpf_ip2nl(first.addr8, ip, AF_INET6, ipdup);
- if (select)
- goto cleanipdup;
-
- get_ipv6_last_addr(&last, &first, prefix);
-
- union netdata_ip_t ipv6_test;
- get_ipv6_first_addr(&ipv6_test, &first, prefix);
-
- if (memcmp(first.addr8, ipv6_test.addr8, sizeof(union netdata_ip_t)) != 0) {
- memcpy(first.addr8, ipv6_test.addr8, sizeof(union netdata_ip_t));
-
- struct in6_addr ipv6_convert;
- memcpy(ipv6_convert.s6_addr, ipv6_test.addr8, sizeof(union netdata_ip_t));
-
- char ipv6_msg[INET6_ADDRSTRLEN];
- if(inet_ntop(AF_INET6, &ipv6_convert, ipv6_msg, INET6_ADDRSTRLEN))
- netdata_log_info("The network value of CIDR %s was updated for %s .", ipdup, ipv6_msg);
- }
- }
-
- if ((be64toh(*(uint64_t *)&first.addr32[2]) > be64toh(*(uint64_t *)&last.addr32[2]) &&
- !memcmp(first.addr32, last.addr32, 2*sizeof(uint32_t))) ||
- (be64toh(*(uint64_t *)&first.addr32) > be64toh(*(uint64_t *)&last.addr32)) ) {
- netdata_log_info("The specified range %s is invalid, the second address is smallest than the first, it will be ignored.",
- ipdup);
- goto cleanipdup;
- }
- } else { // Unique ip
- select = ebpf_ip2nl(first.addr8, ip, AF_INET, ipdup);
- if (select)
- goto cleanipdup;
-
- memcpy(last.addr8, first.addr8, sizeof(first.addr8));
- }
-
- ebpf_network_viewer_ip_list_t *store;
-
- storethisip:
- store = callocz(1, sizeof(ebpf_network_viewer_ip_list_t));
- store->value = ipdup;
- store->hash = simple_hash(ipdup);
- store->ver = (uint8_t)(!is_ipv6)?AF_INET:AF_INET6;
- memcpy(store->first.addr8, first.addr8, sizeof(first.addr8));
- memcpy(store->last.addr8, last.addr8, sizeof(last.addr8));
-
- ebpf_fill_ip_list_unsafe(list, store, "socket");
- return;
-
- cleanipdup:
- freez(ipdup);
-}
-
-/**
- * Parse IP Range
- *
- * Parse the IP ranges given and create Network Viewer IP Structure
- *
- * @param ptr is a pointer with the text to parse.
- */
-void ebpf_parse_ips_unsafe(char *ptr)
-{
- // No value
- if (unlikely(!ptr))
- return;
-
- while (likely(ptr)) {
- // Move forward until next valid character
- while (isspace(*ptr)) ptr++;
-
- // No valid value found
- if (unlikely(!*ptr))
- return;
-
- // Find space that ends the list
- char *end = strchr(ptr, ' ');
- if (end) {
- *end++ = '\0';
- }
-
- int neg = 0;
- if (*ptr == '!') {
- neg++;
- ptr++;
- }
-
- if (isascii(*ptr)) { // Parse port
- ebpf_parse_ip_list_unsafe(
- (!neg) ? (void **)&network_viewer_opt.included_ips : (void **)&network_viewer_opt.excluded_ips, ptr);
- }
-
- ptr = end;
- }
-}
-
-/**
- * Fill Port list
- *
- * @param out a pointer to the link list.
- * @param in the structure that will be linked.
- */
-static inline void fill_port_list(ebpf_network_viewer_port_list_t **out, ebpf_network_viewer_port_list_t *in)
-{
- if (likely(*out)) {
- ebpf_network_viewer_port_list_t *move = *out, *store = *out;
- uint16_t first = ntohs(in->first);
- uint16_t last = ntohs(in->last);
- while (move) {
- uint16_t cmp_first = ntohs(move->first);
- uint16_t cmp_last = ntohs(move->last);
- if (cmp_first <= first && first <= cmp_last &&
- cmp_first <= last && last <= cmp_last ) {
- netdata_log_info("The range/value (%u, %u) is inside the range/value (%u, %u) already inserted, it will be ignored.",
- first, last, cmp_first, cmp_last);
- freez(in->value);
- freez(in);
- return;
- } else if (first <= cmp_first && cmp_first <= last &&
- first <= cmp_last && cmp_last <= last) {
- netdata_log_info("The range (%u, %u) is bigger than previous range (%u, %u) already inserted, the previous will be ignored.",
- first, last, cmp_first, cmp_last);
- freez(move->value);
- move->value = in->value;
- move->first = in->first;
- move->last = in->last;
- freez(in);
- return;
- }
-
- store = move;
- move = move->next;
- }
-
- store->next = in;
- } else {
- *out = in;
- }
-
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("Adding values %s( %u, %u) to %s port list used on network viewer",
- in->value, in->first, in->last,
- (*out == network_viewer_opt.included_port)?"included":"excluded");
-#endif
-}
-
-/**
- * Parse Service List
- *
- * @param out a pointer to store the link list
- * @param service the service used to create the structure that will be linked.
- */
-static void ebpf_parse_service_list(void **out, char *service)
-{
- ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out;
- struct servent *serv = getservbyname((const char *)service, "tcp");
- if (!serv)
- serv = getservbyname((const char *)service, "udp");
-
- if (!serv) {
- netdata_log_info("Cannot resolve the service '%s' with protocols TCP and UDP, it will be ignored", service);
- return;
- }
-
- ebpf_network_viewer_port_list_t *w = callocz(1, sizeof(ebpf_network_viewer_port_list_t));
- w->value = strdupz(service);
- w->hash = simple_hash(service);
-
- w->first = w->last = (uint16_t)serv->s_port;
-
- fill_port_list(list, w);
-}
-
-/**
- * Parse port list
- *
- * Parse an allocated port list with the range given
- *
- * @param out a pointer to store the link list
- * @param range the informed range for the user.
- */
-static void ebpf_parse_port_list(void **out, char *range)
-{
- int first, last;
- ebpf_network_viewer_port_list_t **list = (ebpf_network_viewer_port_list_t **)out;
-
- char *copied = strdupz(range);
- if (*range == '*' && *(range+1) == '\0') {
- first = 1;
- last = 65535;
-
- ebpf_clean_port_structure(list);
- goto fillenvpl;
- }
-
- char *end = range;
- //Move while I cannot find a separator
- while (*end && *end != ':' && *end != '-') end++;
-
- //It has a range
- if (likely(*end)) {
- *end++ = '\0';
- if (*end == '!') {
- netdata_log_info("The exclusion cannot be in the second part of the range, the range %s will be ignored.", copied);
- freez(copied);
- return;
- }
- last = str2i((const char *)end);
- } else {
- last = 0;
- }
-
- first = str2i((const char *)range);
- if (first < NETDATA_MINIMUM_PORT_VALUE || first > NETDATA_MAXIMUM_PORT_VALUE) {
- netdata_log_info("The first port %d of the range \"%s\" is invalid and it will be ignored!", first, copied);
- freez(copied);
- return;
- }
-
- if (!last)
- last = first;
-
- if (last < NETDATA_MINIMUM_PORT_VALUE || last > NETDATA_MAXIMUM_PORT_VALUE) {
- netdata_log_info("The second port %d of the range \"%s\" is invalid and the whole range will be ignored!", last, copied);
- freez(copied);
- return;
- }
-
- if (first > last) {
- netdata_log_info("The specified order %s is wrong, the smallest value is always the first, it will be ignored!", copied);
- freez(copied);
- return;
- }
-
- ebpf_network_viewer_port_list_t *w;
- fillenvpl:
- w = callocz(1, sizeof(ebpf_network_viewer_port_list_t));
- w->value = copied;
- w->hash = simple_hash(copied);
- w->first = (uint16_t)first;
- w->last = (uint16_t)last;
- w->cmp_first = (uint16_t)first;
- w->cmp_last = (uint16_t)last;
-
- fill_port_list(list, w);
-}
-
-/**
- * Parse Port Range
- *
- * Parse the port ranges given and create Network Viewer Port Structure
- *
- * @param ptr is a pointer with the text to parse.
- */
-void ebpf_parse_ports(char *ptr)
-{
- // No value
- if (unlikely(!ptr))
- return;
-
- while (likely(ptr)) {
- // Move forward until next valid character
- while (isspace(*ptr)) ptr++;
-
- // No valid value found
- if (unlikely(!*ptr))
- return;
-
- // Find space that ends the list
- char *end = strchr(ptr, ' ');
- if (end) {
- *end++ = '\0';
- }
-
- int neg = 0;
- if (*ptr == '!') {
- neg++;
- ptr++;
- }
-
- if (isdigit(*ptr)) { // Parse port
- ebpf_parse_port_list(
- (!neg) ? (void **)&network_viewer_opt.included_port : (void **)&network_viewer_opt.excluded_port, ptr);
- } else if (isalpha(*ptr)) { // Parse service
- ebpf_parse_service_list(
- (!neg) ? (void **)&network_viewer_opt.included_port : (void **)&network_viewer_opt.excluded_port, ptr);
- } else if (*ptr == '*') { // All
- ebpf_parse_port_list(
- (!neg) ? (void **)&network_viewer_opt.included_port : (void **)&network_viewer_opt.excluded_port, ptr);
- }
-
- ptr = end;
- }
-}
-
-/*****************************************************************
- *
- * FUNCTIONS TO DEFINE OPTIONS
- *
- *****************************************************************/
-
-/**
- * Define labels used to generate charts
- *
- * @param is structure with information about number of calls made for a function.
- * @param pio structure used to generate charts.
- * @param dim a pointer for the dimensions name
- * @param name a pointer for the tensor with the name of the functions.
- * @param algorithm a vector with the algorithms used to make the charts
- * @param end the number of elements in the previous 4 arguments.
- */
-void ebpf_global_labels(netdata_syscall_stat_t *is, netdata_publish_syscall_t *pio, char **dim,
- char **name, int *algorithm, int end)
-{
- int i;
-
- netdata_syscall_stat_t *prev = NULL;
- netdata_publish_syscall_t *publish_prev = NULL;
- for (i = 0; i < end; i++) {
- if (prev) {
- prev->next = &is[i];
- }
- prev = &is[i];
-
- pio[i].dimension = dim[i];
- pio[i].name = name[i];
- pio[i].algorithm = ebpf_algorithms[algorithm[i]];
- if (publish_prev) {
- publish_prev->next = &pio[i];
- }
- publish_prev = &pio[i];
- }
-}
-
-/**
- * Define thread mode for all ebpf program.
- *
- * @param lmode the mode that will be used for them.
- */
-static inline void ebpf_set_thread_mode(netdata_run_mode_t lmode)
-{
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_modules[i].mode = lmode;
- }
-}
-
-/**
- * Enable specific charts selected by user.
- *
- * @param em the structure that will be changed
- * @param disable_cgroup the status about the cgroups charts.
- */
-static inline void ebpf_enable_specific_chart(struct ebpf_module *em, int disable_cgroup)
-{
- em->enabled = NETDATA_THREAD_EBPF_RUNNING;
-
- if (!disable_cgroup) {
- em->cgroup_charts = CONFIG_BOOLEAN_YES;
- }
-
- em->global_charts = CONFIG_BOOLEAN_YES;
-}
-
-/**
- * Disable all Global charts
- *
- * Disable charts
- */
-static inline void disable_all_global_charts()
-{
- int i;
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_modules[i].enabled = NETDATA_THREAD_EBPF_NOT_RUNNING;
- ebpf_modules[i].global_charts = 0;
- }
-}
-
-/**
- * Enable the specified chart group
- *
- * @param idx the index of ebpf_modules that I am enabling
- */
-static inline void ebpf_enable_chart(int idx, int disable_cgroup)
-{
- int i;
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- if (i == idx) {
- ebpf_enable_specific_chart(&ebpf_modules[i], disable_cgroup);
- break;
- }
- }
-}
-
-/**
- * Disable Cgroups
- *
- * Disable charts for apps loading only global charts.
- */
-static inline void ebpf_disable_cgroups()
-{
- int i;
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_modules[i].cgroup_charts = 0;
- }
-}
-
-/**
- * Update Disabled Plugins
- *
- * This function calls ebpf_update_stats to update statistics for collector.
- *
- * @param em a pointer to `struct ebpf_module`
- */
-void ebpf_update_disabled_plugin_stats(ebpf_module_t *em)
-{
- pthread_mutex_lock(&lock);
- ebpf_update_stats(&plugin_statistics, em);
- pthread_mutex_unlock(&lock);
-}
-
-/**
- * Print help on standard error for user knows how to use the collector.
- */
-void ebpf_print_help()
-{
- const time_t t = time(NULL);
- struct tm ct;
- struct tm *test = localtime_r(&t, &ct);
- int year;
- if (test)
- year = ct.tm_year;
- else
- year = 0;
-
- fprintf(stderr,
- "\n"
- " Netdata ebpf.plugin %s\n"
- " Copyright (C) 2016-%d Costa Tsaousis <costa@tsaousis.gr>\n"
- " Released under GNU General Public License v3 or later.\n"
- " All rights reserved.\n"
- "\n"
- " This eBPF.plugin is a data collector plugin for netdata.\n"
- "\n"
- " This plugin only accepts long options with one or two dashes. The available command line options are:\n"
- "\n"
- " SECONDS Set the data collection frequency.\n"
- "\n"
- " [-]-help Show this help.\n"
- "\n"
- " [-]-version Show software version.\n"
- "\n"
- " [-]-global Disable charts per application and cgroup.\n"
- "\n"
- " [-]-all Enable all chart groups (global, apps, and cgroup), unless -g is also given.\n"
- "\n"
- " [-]-cachestat Enable charts related to process run time.\n"
- "\n"
- " [-]-dcstat Enable charts related to directory cache.\n"
- "\n"
- " [-]-disk Enable charts related to disk monitoring.\n"
- "\n"
- " [-]-filesystem Enable chart related to filesystem run time.\n"
- "\n"
- " [-]-hardirq Enable chart related to hard IRQ latency.\n"
- "\n"
- " [-]-mdflush Enable charts related to multi-device flush.\n"
- "\n"
- " [-]-mount Enable charts related to mount monitoring.\n"
- "\n"
- " [-]-net Enable network viewer charts.\n"
- "\n"
- " [-]-oomkill Enable chart related to OOM kill tracking.\n"
- "\n"
- " [-]-process Enable charts related to process run time.\n"
- "\n"
- " [-]-return Run the collector in return mode.\n"
- "\n"
- " [-]-shm Enable chart related to shared memory tracking.\n"
- "\n"
- " [-]-softirq Enable chart related to soft IRQ latency.\n"
- "\n"
- " [-]-sync Enable chart related to sync run time.\n"
- "\n"
- " [-]-swap Enable chart related to swap run time.\n"
- "\n"
- " [-]-vfs Enable chart related to vfs run time.\n"
- "\n"
- " [-]-legacy Load legacy eBPF programs.\n"
- "\n"
- " [-]-core Use CO-RE when available(Working in progress).\n"
- "\n",
- VERSION,
- (year >= 116) ? year + 1900 : 2020);
-}
-
-/*****************************************************************
- *
- * TRACEPOINT MANAGEMENT FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * Enable a tracepoint.
- *
- * @return 0 on success, -1 on error.
- */
-int ebpf_enable_tracepoint(ebpf_tracepoint_t *tp)
-{
- int test = ebpf_is_tracepoint_enabled(tp->class, tp->event);
-
- // err?
- if (test == -1) {
- return -1;
- }
- // disabled?
- else if (test == 0) {
- // enable it then.
- if (ebpf_enable_tracing_values(tp->class, tp->event)) {
- return -1;
- }
- }
-
- // enabled now or already was.
- tp->enabled = true;
-
- return 0;
-}
-
-/**
- * Disable a tracepoint if it's enabled.
- *
- * @return 0 on success, -1 on error.
- */
-int ebpf_disable_tracepoint(ebpf_tracepoint_t *tp)
-{
- int test = ebpf_is_tracepoint_enabled(tp->class, tp->event);
-
- // err?
- if (test == -1) {
- return -1;
- }
- // enabled?
- else if (test == 1) {
- // disable it then.
- if (ebpf_disable_tracing_values(tp->class, tp->event)) {
- return -1;
- }
- }
-
- // disable now or already was.
- tp->enabled = false;
-
- return 0;
-}
-
-/**
- * Enable multiple tracepoints on a list of tracepoints which end when the
- * class is NULL.
- *
- * @return the number of successful enables.
- */
-uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps)
-{
- uint32_t cnt = 0;
- for (int i = 0; tps[i].class != NULL; i++) {
- if (ebpf_enable_tracepoint(&tps[i]) == -1) {
- netdata_log_error("Failed to enable tracepoint %s:%s", tps[i].class, tps[i].event);
- }
- else {
- cnt += 1;
- }
- }
- return cnt;
-}
-
-/*****************************************************************
- *
- * AUXILIARY FUNCTIONS USED DURING INITIALIZATION
- *
- *****************************************************************/
-
-/**
- * Is ip inside the range
- *
- * Check if the ip is inside a IP range
- *
- * @param rfirst the first ip address of the range
- * @param rlast the last ip address of the range
- * @param cmpfirst the first ip to compare
- * @param cmplast the last ip to compare
- * @param family the IP family
- *
- * @return It returns 1 if the IP is inside the range and 0 otherwise
- */
-static int ebpf_is_ip_inside_range(union netdata_ip_t *rfirst, union netdata_ip_t *rlast,
- union netdata_ip_t *cmpfirst, union netdata_ip_t *cmplast, int family)
-{
- if (family == AF_INET) {
- if ((rfirst->addr32[0] <= cmpfirst->addr32[0]) && (rlast->addr32[0] >= cmplast->addr32[0]))
- return 1;
- } else {
- if (memcmp(rfirst->addr8, cmpfirst->addr8, sizeof(union netdata_ip_t)) <= 0 &&
- memcmp(rlast->addr8, cmplast->addr8, sizeof(union netdata_ip_t)) >= 0) {
- return 1;
- }
-
- }
- return 0;
-}
-
-/**
- * Fill IP list
- *
- * @param out a pointer to the link list.
- * @param in the structure that will be linked.
- * @param table the modified table.
- */
-void ebpf_fill_ip_list_unsafe(ebpf_network_viewer_ip_list_t **out, ebpf_network_viewer_ip_list_t *in,
- char *table __maybe_unused)
-{
- if (in->ver == AF_INET) { // It is simpler to compare using host order
- in->first.addr32[0] = ntohl(in->first.addr32[0]);
- in->last.addr32[0] = ntohl(in->last.addr32[0]);
- }
- if (likely(*out)) {
- ebpf_network_viewer_ip_list_t *move = *out, *store = *out;
- while (move) {
- if (in->ver == move->ver &&
- ebpf_is_ip_inside_range(&move->first, &move->last, &in->first, &in->last, in->ver)) {
-#ifdef NETDATA_DEV_MODE
- netdata_log_info("The range/value (%s) is inside the range/value (%s) already inserted, it will be ignored.",
- in->value, move->value);
-#endif
- freez(in->value);
- freez(in);
- return;
- }
- store = move;
- move = move->next;
- }
-
- store->next = in;
- } else {
- *out = in;
- }
-
-#ifdef NETDATA_DEV_MODE
- char first[256], last[512];
- if (in->ver == AF_INET) {
- netdata_log_info("Adding values %s: (%u - %u) to %s IP list \"%s\" used on network viewer",
- in->value, in->first.addr32[0], in->last.addr32[0],
- (*out == network_viewer_opt.included_ips)?"included":"excluded",
- table);
- } else {
- if (inet_ntop(AF_INET6, in->first.addr8, first, INET6_ADDRSTRLEN) &&
- inet_ntop(AF_INET6, in->last.addr8, last, INET6_ADDRSTRLEN))
- netdata_log_info("Adding values %s - %s to %s IP list \"%s\" used on network viewer",
- first, last,
- (*out == network_viewer_opt.included_ips)?"included":"excluded",
- table);
- }
-#endif
-}
-
-/**
- * Link hostname
- *
- * @param out is the output link list
- * @param in the hostname to add to list.
- */
-static void ebpf_link_hostname(ebpf_network_viewer_hostname_list_t **out, ebpf_network_viewer_hostname_list_t *in)
-{
- if (likely(*out)) {
- ebpf_network_viewer_hostname_list_t *move = *out;
- for (; move->next ; move = move->next ) {
- if (move->hash == in->hash && !strcmp(move->value, in->value)) {
- netdata_log_info("The hostname %s was already inserted, it will be ignored.", in->value);
- freez(in->value);
- simple_pattern_free(in->value_pattern);
- freez(in);
- return;
- }
- }
-
- move->next = in;
- } else {
- *out = in;
- }
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("Adding value %s to %s hostname list used on network viewer",
- in->value,
- (*out == network_viewer_opt.included_hostnames)?"included":"excluded");
-#endif
-}
-
-/**
- * Link Hostnames
- *
- * Parse the list of hostnames to create the link list.
- * This is not associated with the IP, because simple patterns like *example* cannot be resolved to IP.
- *
- * @param out is the output link list
- * @param parse is a pointer with the text to parser.
- */
-static void ebpf_link_hostnames(char *parse)
-{
- // No value
- if (unlikely(!parse))
- return;
-
- while (likely(parse)) {
- // Find the first valid value
- while (isspace(*parse)) parse++;
-
- // No valid value found
- if (unlikely(!*parse))
- return;
-
- // Find space that ends the list
- char *end = strchr(parse, ' ');
- if (end) {
- *end++ = '\0';
- }
-
- int neg = 0;
- if (*parse == '!') {
- neg++;
- parse++;
- }
-
- ebpf_network_viewer_hostname_list_t *hostname = callocz(1 , sizeof(ebpf_network_viewer_hostname_list_t));
- hostname->value = strdupz(parse);
- hostname->hash = simple_hash(parse);
- hostname->value_pattern = simple_pattern_create(parse, NULL, SIMPLE_PATTERN_EXACT, true);
-
- ebpf_link_hostname((!neg) ? &network_viewer_opt.included_hostnames :
- &network_viewer_opt.excluded_hostnames,
- hostname);
-
- parse = end;
- }
-}
-
-/**
- * Parse network viewer section
- *
- * @param cfg the configuration structure
- */
-void parse_network_viewer_section(struct config *cfg)
-{
- network_viewer_opt.hostname_resolution_enabled = appconfig_get_boolean(cfg,
- EBPF_NETWORK_VIEWER_SECTION,
- EBPF_CONFIG_RESOLVE_HOSTNAME,
- CONFIG_BOOLEAN_NO);
-
- network_viewer_opt.service_resolution_enabled = appconfig_get_boolean(cfg,
- EBPF_NETWORK_VIEWER_SECTION,
- EBPF_CONFIG_RESOLVE_SERVICE,
- CONFIG_BOOLEAN_YES);
-
- char *value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_PORTS, NULL);
- ebpf_parse_ports(value);
-
- if (network_viewer_opt.hostname_resolution_enabled) {
- value = appconfig_get(cfg, EBPF_NETWORK_VIEWER_SECTION, EBPF_CONFIG_HOSTNAMES, NULL);
- ebpf_link_hostnames(value);
- } else {
- netdata_log_info("Name resolution is disabled, collector will not parse \"hostnames\" list.");
- }
-
- value = appconfig_get(cfg,
- EBPF_NETWORK_VIEWER_SECTION,
- "ips",
- NULL);
- //"ips", "!127.0.0.1/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16 fc00::/7 !::1/128");
- ebpf_parse_ips_unsafe(value);
-}
-
-/**
- * Read Local Ports
- *
- * Parse /proc/net/{tcp,udp} and get the ports Linux is listening.
- *
- * @param filename the proc file to parse.
- * @param proto is the magic number associated to the protocol file we are reading.
- */
-static void read_local_ports(char *filename, uint8_t proto)
-{
- procfile *ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
- if (!ff)
- return;
-
- ff = procfile_readall(ff);
- if (!ff)
- return;
-
- size_t lines = procfile_lines(ff), l;
- netdata_passive_connection_t values = {.counter = 0, .tgid = 0, .pid = 0};
- for(l = 0; l < lines ;l++) {
- size_t words = procfile_linewords(ff, l);
- // This is header or end of file
- if (unlikely(words < 14))
- continue;
-
- // https://elixir.bootlin.com/linux/v5.7.8/source/include/net/tcp_states.h
- // 0A = TCP_LISTEN
- if (strcmp("0A", procfile_lineword(ff, l, 5)))
- continue;
-
- // Read local port
- uint16_t port = (uint16_t)strtol(procfile_lineword(ff, l, 2), NULL, 16);
- update_listen_table(htons(port), proto, &values);
- }
-
- procfile_close(ff);
-}
-
-/**
- * Read Local addresseses
- *
- * Read the local address from the interfaces.
- */
-void ebpf_read_local_addresses_unsafe()
-{
- struct ifaddrs *ifaddr, *ifa;
- if (getifaddrs(&ifaddr) == -1) {
- netdata_log_error("Cannot get the local IP addresses, it is no possible to do separation between inbound and outbound connections");
- return;
- }
-
- char *notext = { "No text representation" };
- for (ifa = ifaddr; ifa != NULL; ifa = ifa->ifa_next) {
- if (ifa->ifa_addr == NULL)
- continue;
-
- if ((ifa->ifa_addr->sa_family != AF_INET) && (ifa->ifa_addr->sa_family != AF_INET6))
- continue;
-
- ebpf_network_viewer_ip_list_t *w = callocz(1, sizeof(ebpf_network_viewer_ip_list_t));
-
- int family = ifa->ifa_addr->sa_family;
- w->ver = (uint8_t) family;
- char text[INET6_ADDRSTRLEN];
- if (family == AF_INET) {
- struct sockaddr_in *in = (struct sockaddr_in*) ifa->ifa_addr;
-
- w->first.addr32[0] = in->sin_addr.s_addr;
- w->last.addr32[0] = in->sin_addr.s_addr;
-
- if (inet_ntop(AF_INET, w->first.addr8, text, INET_ADDRSTRLEN)) {
- w->value = strdupz(text);
- w->hash = simple_hash(text);
- } else {
- w->value = strdupz(notext);
- w->hash = simple_hash(notext);
- }
- } else {
- struct sockaddr_in6 *in6 = (struct sockaddr_in6*) ifa->ifa_addr;
-
- memcpy(w->first.addr8, (void *)&in6->sin6_addr, sizeof(struct in6_addr));
- memcpy(w->last.addr8, (void *)&in6->sin6_addr, sizeof(struct in6_addr));
-
- if (inet_ntop(AF_INET6, w->first.addr8, text, INET_ADDRSTRLEN)) {
- w->value = strdupz(text);
- w->hash = simple_hash(text);
- } else {
- w->value = strdupz(notext);
- w->hash = simple_hash(notext);
- }
- }
-
- ebpf_fill_ip_list_unsafe(
- (family == AF_INET) ? &network_viewer_opt.ipv4_local_ip : &network_viewer_opt.ipv6_local_ip, w, "selector");
- }
-
- freeifaddrs(ifaddr);
-}
-
-/**
- * Start Pthread Variable
- *
- * This function starts all pthread variables.
- */
-void ebpf_start_pthread_variables()
-{
- pthread_mutex_init(&lock, NULL);
- pthread_mutex_init(&ebpf_exit_cleanup, NULL);
- pthread_mutex_init(&collect_data_mutex, NULL);
- pthread_mutex_init(&mutex_cgroup_shm, NULL);
- rw_spinlock_init(&ebpf_judy_pid.index.rw_spinlock);
-}
-
-/**
- * Allocate the vectors used for all threads.
- */
-static void ebpf_allocate_common_vectors()
-{
- ebpf_judy_pid.pid_table = ebpf_allocate_pid_aral(NETDATA_EBPF_PID_SOCKET_ARAL_TABLE_NAME,
- sizeof(netdata_ebpf_judy_pid_stats_t));
- ebpf_all_pids = callocz((size_t)pid_max, sizeof(struct ebpf_pid_stat *));
- ebpf_aral_init();
-}
-
-/**
- * Define how to load the ebpf programs
- *
- * @param ptr the option given by users
- */
-static inline void ebpf_how_to_load(char *ptr)
-{
- if (!strcasecmp(ptr, EBPF_CFG_LOAD_MODE_RETURN))
- ebpf_set_thread_mode(MODE_RETURN);
- else if (!strcasecmp(ptr, EBPF_CFG_LOAD_MODE_DEFAULT))
- ebpf_set_thread_mode(MODE_ENTRY);
- else
- netdata_log_error("the option %s for \"ebpf load mode\" is not a valid option.", ptr);
-}
-
-/**
- * Define whether we should have charts for apps
- *
- * @param lmode the mode that will be used for them.
- */
-static inline void ebpf_set_apps_mode(netdata_apps_integration_flags_t value)
-{
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_modules[i].apps_charts = value;
- }
-}
-
-
-/**
- * Update interval
- *
- * Update default interval with value from user
- *
- * @param update_every value to overwrite the update frequency set by the server.
- */
-static void ebpf_update_interval(int update_every)
-{
- int i;
- int value = (int) appconfig_get_number(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_UPDATE_EVERY,
- update_every);
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_modules[i].update_every = value;
- }
-}
-
-/**
- * Update PID table size
- *
- * Update default size with value from user
- */
-static void ebpf_update_table_size()
-{
- int i;
- uint32_t value = (uint32_t) appconfig_get_number(&collector_config, EBPF_GLOBAL_SECTION,
- EBPF_CFG_PID_SIZE, ND_EBPF_DEFAULT_PID_SIZE);
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_modules[i].pid_map_size = value;
- }
-}
-
-/**
- * Update lifetime
- *
- * Update the period of time that specific thread will run
- */
-static void ebpf_update_lifetime()
-{
- int i;
- uint32_t value = (uint32_t) appconfig_get_number(&collector_config, EBPF_GLOBAL_SECTION,
- EBPF_CFG_LIFETIME, EBPF_DEFAULT_LIFETIME);
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_modules[i].lifetime = value;
- }
-}
-
-/**
- * Set Load mode
- *
- * @param origin specify the configuration file loaded
- */
-static inline void ebpf_set_load_mode(netdata_ebpf_load_mode_t load, netdata_ebpf_load_mode_t origin)
-{
- int i;
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_modules[i].load &= ~NETDATA_EBPF_LOAD_METHODS;
- ebpf_modules[i].load |= load | origin ;
- }
-}
-
-/**
- * Update mode
- *
- * @param str value read from configuration file.
- * @param origin specify the configuration file loaded
- */
-static inline void epbf_update_load_mode(char *str, netdata_ebpf_load_mode_t origin)
-{
- netdata_ebpf_load_mode_t load = epbf_convert_string_to_load_mode(str);
-
- ebpf_set_load_mode(load, origin);
-}
-
-/**
- * Update Map per core
- *
- * Define the map type used with some hash tables.
- */
-static void ebpf_update_map_per_core()
-{
- int i;
- int value = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION,
- EBPF_CFG_MAPS_PER_CORE, CONFIG_BOOLEAN_YES);
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_modules[i].maps_per_core = value;
- }
-}
-
-/**
- * Read collector values
- *
- * @param disable_cgroups variable to store information related to cgroups.
- * @param update_every value to overwrite the update frequency set by the server.
- * @param origin specify the configuration file loaded
- */
-static void read_collector_values(int *disable_cgroups,
- int update_every, netdata_ebpf_load_mode_t origin)
-{
- // Read global section
- char *value;
- if (appconfig_exists(&collector_config, EBPF_GLOBAL_SECTION, "load")) // Backward compatibility
- value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, "load",
- EBPF_CFG_LOAD_MODE_DEFAULT);
- else
- value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_LOAD_MODE,
- EBPF_CFG_LOAD_MODE_DEFAULT);
-
- ebpf_how_to_load(value);
-
- btf_path = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_PROGRAM_PATH,
- EBPF_DEFAULT_BTF_PATH);
-
-#ifdef LIBBPF_MAJOR_VERSION
- default_btf = ebpf_load_btf_file(btf_path, EBPF_DEFAULT_BTF_FILE);
-#endif
-
- value = appconfig_get(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_TYPE_FORMAT, EBPF_CFG_DEFAULT_PROGRAM);
-
- epbf_update_load_mode(value, origin);
-
- ebpf_update_interval(update_every);
-
- ebpf_update_table_size();
-
- ebpf_update_lifetime();
-
- // This is kept to keep compatibility
- uint32_t enabled = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION, "disable apps",
- CONFIG_BOOLEAN_NO);
- if (!enabled) {
- // Apps is a positive sentence, so we need to invert the values to disable apps.
- enabled = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_APPLICATION,
- CONFIG_BOOLEAN_YES);
- enabled = (enabled == CONFIG_BOOLEAN_NO)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_NO;
- }
-
- ebpf_set_apps_mode(!enabled);
-
- // Cgroup is a positive sentence, so we need to invert the values to disable apps.
- // We are using the same pattern for cgroup and apps
- enabled = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_CGROUP, CONFIG_BOOLEAN_NO);
- *disable_cgroups = (enabled == CONFIG_BOOLEAN_NO)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_NO;
-
- ebpf_update_map_per_core();
-
- // Read ebpf programs section
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION,
- ebpf_modules[EBPF_MODULE_PROCESS_IDX].info.config_name, CONFIG_BOOLEAN_YES);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_PROCESS_IDX, *disable_cgroups);
- }
-
- // This is kept to keep compatibility
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network viewer",
- CONFIG_BOOLEAN_NO);
- if (!enabled)
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION,
- ebpf_modules[EBPF_MODULE_SOCKET_IDX].info.config_name,
- CONFIG_BOOLEAN_NO);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, *disable_cgroups);
- }
-
- // This is kept to keep compatibility
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network connection monitoring",
- CONFIG_BOOLEAN_YES);
- if (!enabled)
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network connections",
- CONFIG_BOOLEAN_YES);
-
- network_viewer_opt.enabled = enabled;
- if (enabled) {
- if (!ebpf_modules[EBPF_MODULE_SOCKET_IDX].enabled)
- ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, *disable_cgroups);
-
- // Read network viewer section if network viewer is enabled
- // This is kept here to keep backward compatibility
- parse_network_viewer_section(&collector_config);
- ebpf_parse_service_name_section(&collector_config);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "cachestat",
- CONFIG_BOOLEAN_NO);
-
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_CACHESTAT_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "sync",
- CONFIG_BOOLEAN_YES);
-
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_SYNC_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "dcstat",
- CONFIG_BOOLEAN_NO);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_DCSTAT_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "swap",
- CONFIG_BOOLEAN_NO);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_SWAP_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "vfs",
- CONFIG_BOOLEAN_NO);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_VFS_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "filesystem",
- CONFIG_BOOLEAN_NO);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_FILESYSTEM_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "disk",
- CONFIG_BOOLEAN_NO);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_DISK_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "mount",
- CONFIG_BOOLEAN_YES);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_MOUNT_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "fd",
- CONFIG_BOOLEAN_YES);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_FD_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "hardirq",
- CONFIG_BOOLEAN_YES);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_HARDIRQ_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "softirq",
- CONFIG_BOOLEAN_YES);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_SOFTIRQ_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "oomkill",
- CONFIG_BOOLEAN_YES);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_OOMKILL_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "shm",
- CONFIG_BOOLEAN_YES);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_SHM_IDX, *disable_cgroups);
- }
-
- enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "mdflush",
- CONFIG_BOOLEAN_NO);
- if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_MDFLUSH_IDX, *disable_cgroups);
- }
-}
-
-/**
- * Load collector config
- *
- * @param path the path where the file ebpf.conf is stored.
- * @param disable_cgroups variable to store the information about cgroups plugin status.
- * @param update_every value to overwrite the update frequency set by the server.
- *
- * @return 0 on success and -1 otherwise.
- */
-static int ebpf_load_collector_config(char *path, int *disable_cgroups, int update_every)
-{
- char lpath[4096];
- netdata_ebpf_load_mode_t origin;
-
- snprintf(lpath, 4095, "%s/%s", path, NETDATA_EBPF_CONFIG_FILE);
- if (!appconfig_load(&collector_config, lpath, 0, NULL)) {
- snprintf(lpath, 4095, "%s/%s", path, NETDATA_EBPF_OLD_CONFIG_FILE);
- if (!appconfig_load(&collector_config, lpath, 0, NULL)) {
- return -1;
- }
- origin = EBPF_LOADED_FROM_STOCK;
- } else
- origin = EBPF_LOADED_FROM_USER;
-
- read_collector_values(disable_cgroups, update_every, origin);
-
- return 0;
-}
-
-/**
- * Set global variables reading environment variables
- */
-void set_global_variables()
-{
- // Get environment variables
- ebpf_plugin_dir = getenv("NETDATA_PLUGINS_DIR");
- if (!ebpf_plugin_dir)
- ebpf_plugin_dir = PLUGINS_DIR;
-
- ebpf_user_config_dir = getenv("NETDATA_USER_CONFIG_DIR");
- if (!ebpf_user_config_dir)
- ebpf_user_config_dir = CONFIG_DIR;
-
- ebpf_stock_config_dir = getenv("NETDATA_STOCK_CONFIG_DIR");
- if (!ebpf_stock_config_dir)
- ebpf_stock_config_dir = LIBCONFIG_DIR;
-
- ebpf_configured_log_dir = getenv("NETDATA_LOG_DIR");
- if (!ebpf_configured_log_dir)
- ebpf_configured_log_dir = LOG_DIR;
-
- ebpf_nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN);
- if (ebpf_nprocs < 0) {
- ebpf_nprocs = NETDATA_MAX_PROCESSOR;
- netdata_log_error("Cannot identify number of process, using default value %d", ebpf_nprocs);
- }
-
- isrh = get_redhat_release();
- pid_max = get_system_pid_max();
- running_on_kernel = ebpf_get_kernel_version();
-}
-
-/**
- * Load collector config
- */
-static inline void ebpf_load_thread_config()
-{
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_update_module(&ebpf_modules[i], default_btf, running_on_kernel, isrh);
- }
-}
-
-/**
- * Check Conditions
- *
- * This function checks kernel that plugin is running and permissions.
- *
- * @return It returns 0 on success and -1 otherwise
- */
-int ebpf_check_conditions()
-{
- if (!has_condition_to_run(running_on_kernel)) {
- netdata_log_error("The current collector cannot run on this kernel.");
- return -1;
- }
-
- if (!am_i_running_as_root()) {
- netdata_log_error(
- "ebpf.plugin should either run as root (now running with uid %u, euid %u) or have special capabilities..",
- (unsigned int)getuid(), (unsigned int)geteuid());
- return -1;
- }
-
- return 0;
-}
-
-/**
- * Adjust memory
- *
- * Adjust memory values to load eBPF programs.
- *
- * @return It returns 0 on success and -1 otherwise
- */
-int ebpf_adjust_memory_limit()
-{
- struct rlimit r = { RLIM_INFINITY, RLIM_INFINITY };
- if (setrlimit(RLIMIT_MEMLOCK, &r)) {
- netdata_log_error("Setrlimit(RLIMIT_MEMLOCK)");
- return -1;
- }
-
- return 0;
-}
-
-/**
- * Parse arguments given from user.
- *
- * @param argc the number of arguments
- * @param argv the pointer to the arguments
- */
-static void ebpf_parse_args(int argc, char **argv)
-{
- int disable_cgroups = 1;
- int freq = 0;
- int option_index = 0;
- uint64_t select_threads = 0;
- static struct option long_options[] = {
- {"process", no_argument, 0, 0 },
- {"net", no_argument, 0, 0 },
- {"cachestat", no_argument, 0, 0 },
- {"sync", no_argument, 0, 0 },
- {"dcstat", no_argument, 0, 0 },
- {"swap", no_argument, 0, 0 },
- {"vfs", no_argument, 0, 0 },
- {"filesystem", no_argument, 0, 0 },
- {"disk", no_argument, 0, 0 },
- {"mount", no_argument, 0, 0 },
- {"filedescriptor", no_argument, 0, 0 },
- {"hardirq", no_argument, 0, 0 },
- {"softirq", no_argument, 0, 0 },
- {"oomkill", no_argument, 0, 0 },
- {"shm", no_argument, 0, 0 },
- {"mdflush", no_argument, 0, 0 },
- /* INSERT NEW THREADS BEFORE THIS COMMENT TO KEEP COMPATIBILITY WITH enum ebpf_module_indexes */
- {"all", no_argument, 0, 0 },
- {"version", no_argument, 0, 0 },
- {"help", no_argument, 0, 0 },
- {"global", no_argument, 0, 0 },
- {"return", no_argument, 0, 0 },
- {"legacy", no_argument, 0, 0 },
- {"core", no_argument, 0, 0 },
- {"unittest", no_argument, 0, 0 },
- {0, 0, 0, 0}
- };
-
- memset(&network_viewer_opt, 0, sizeof(network_viewer_opt));
- rw_spinlock_init(&network_viewer_opt.rw_spinlock);
-
- if (argc > 1) {
- int n = (int)str2l(argv[1]);
- if (n > 0) {
- freq = n;
- }
- }
-
- if (!freq)
- freq = EBPF_DEFAULT_UPDATE_EVERY;
-
- //rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock);
- if (ebpf_load_collector_config(ebpf_user_config_dir, &disable_cgroups, freq)) {
- netdata_log_info(
- "Does not have a configuration file inside `%s/ebpf.d.conf. It will try to load stock file.",
- ebpf_user_config_dir);
- if (ebpf_load_collector_config(ebpf_stock_config_dir, &disable_cgroups, freq)) {
- netdata_log_info("Does not have a stock file. It is starting with default options.");
- }
- }
-
- ebpf_load_thread_config();
- //rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock);
-
- while (1) {
- int c = getopt_long_only(argc, argv, "", long_options, &option_index);
- if (c == -1)
- break;
-
- switch (option_index) {
- case EBPF_MODULE_PROCESS_IDX: {
- select_threads |= 1<<EBPF_MODULE_PROCESS_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"PROCESS\" charts, because it was started with the option \"[-]-process\".");
-#endif
- break;
- }
- case EBPF_MODULE_SOCKET_IDX: {
- select_threads |= 1<<EBPF_MODULE_SOCKET_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"NET\" charts, because it was started with the option \"[-]-net\".");
-#endif
- break;
- }
- case EBPF_MODULE_CACHESTAT_IDX: {
- select_threads |= 1<<EBPF_MODULE_CACHESTAT_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"CACHESTAT\" charts, because it was started with the option \"[-]-cachestat\".");
-#endif
- break;
- }
- case EBPF_MODULE_SYNC_IDX: {
- select_threads |= 1<<EBPF_MODULE_SYNC_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"SYNC\" chart, because it was started with the option \"[-]-sync\".");
-#endif
- break;
- }
- case EBPF_MODULE_DCSTAT_IDX: {
- select_threads |= 1<<EBPF_MODULE_DCSTAT_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"DCSTAT\" charts, because it was started with the option \"[-]-dcstat\".");
-#endif
- break;
- }
- case EBPF_MODULE_SWAP_IDX: {
- select_threads |= 1<<EBPF_MODULE_SWAP_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"SWAP\" chart, because it was started with the option \"[-]-swap\".");
-#endif
- break;
- }
- case EBPF_MODULE_VFS_IDX: {
- select_threads |= 1<<EBPF_MODULE_VFS_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"VFS\" chart, because it was started with the option \"[-]-vfs\".");
-#endif
- break;
- }
- case EBPF_MODULE_FILESYSTEM_IDX: {
- select_threads |= 1<<EBPF_MODULE_FILESYSTEM_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"FILESYSTEM\" chart, because it was started with the option \"[-]-filesystem\".");
-#endif
- break;
- }
- case EBPF_MODULE_DISK_IDX: {
- select_threads |= 1<<EBPF_MODULE_DISK_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"DISK\" chart, because it was started with the option \"[-]-disk\".");
-#endif
- break;
- }
- case EBPF_MODULE_MOUNT_IDX: {
- select_threads |= 1<<EBPF_MODULE_MOUNT_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"MOUNT\" chart, because it was started with the option \"[-]-mount\".");
-#endif
- break;
- }
- case EBPF_MODULE_FD_IDX: {
- select_threads |= 1<<EBPF_MODULE_FD_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"FILEDESCRIPTOR\" chart, because it was started with the option \"[-]-filedescriptor\".");
-#endif
- break;
- }
- case EBPF_MODULE_HARDIRQ_IDX: {
- select_threads |= 1<<EBPF_MODULE_HARDIRQ_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"HARDIRQ\" chart, because it was started with the option \"[-]-hardirq\".");
-#endif
- break;
- }
- case EBPF_MODULE_SOFTIRQ_IDX: {
- select_threads |= 1<<EBPF_MODULE_SOFTIRQ_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"SOFTIRQ\" chart, because it was started with the option \"[-]-softirq\".");
-#endif
- break;
- }
- case EBPF_MODULE_OOMKILL_IDX: {
- select_threads |= 1<<EBPF_MODULE_OOMKILL_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"OOMKILL\" chart, because it was started with the option \"[-]-oomkill\".");
-#endif
- break;
- }
- case EBPF_MODULE_SHM_IDX: {
- select_threads |= 1<<EBPF_MODULE_SHM_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"SHM\" chart, because it was started with the option \"[-]-shm\".");
-#endif
- break;
- }
- case EBPF_MODULE_MDFLUSH_IDX: {
- select_threads |= 1<<EBPF_MODULE_MDFLUSH_IDX;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF enabling \"MDFLUSH\" chart, because it was started with the option \"[-]-mdflush\".");
-#endif
- break;
- }
- case EBPF_OPTION_ALL_CHARTS: {
- ebpf_set_apps_mode(NETDATA_EBPF_APPS_FLAG_YES);
- disable_cgroups = 0;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF running with all chart groups, because it was started with the option \"[-]-all\".");
-#endif
- break;
- }
- case EBPF_OPTION_VERSION: {
- printf("ebpf.plugin %s\n", VERSION);
- exit(0);
- }
- case EBPF_OPTION_HELP: {
- ebpf_print_help();
- exit(0);
- }
- case EBPF_OPTION_GLOBAL_CHART: {
- disable_cgroups = 1;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF running with global chart group, because it was started with the option \"[-]-global\".");
-#endif
- break;
- }
- case EBPF_OPTION_RETURN_MODE: {
- ebpf_set_thread_mode(MODE_RETURN);
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF running in \"RETURN\" mode, because it was started with the option \"[-]-return\".");
-#endif
- break;
- }
- case EBPF_OPTION_LEGACY: {
- ebpf_set_load_mode(EBPF_LOAD_LEGACY, EBPF_LOADED_FROM_USER);
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF running with \"LEGACY\" code, because it was started with the option \"[-]-legacy\".");
-#endif
- break;
- }
- case EBPF_OPTION_CORE: {
- ebpf_set_load_mode(EBPF_LOAD_CORE, EBPF_LOADED_FROM_USER);
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("EBPF running with \"CO-RE\" code, because it was started with the option \"[-]-core\".");
-#endif
- break;
- }
- case EBPF_OPTION_UNITTEST: {
- // if we cannot run until the end, we will cancel the unittest
- int exit_code = ECANCELED;
- if (ebpf_check_conditions())
- goto unittest;
-
- if (ebpf_adjust_memory_limit())
- goto unittest;
-
- // Load binary in entry mode
- ebpf_ut_initialize_structure(MODE_ENTRY);
- if (ebpf_ut_load_real_binary())
- goto unittest;
-
- ebpf_ut_cleanup_memory();
-
- // Do not load a binary in entry mode
- ebpf_ut_initialize_structure(MODE_ENTRY);
- if (ebpf_ut_load_fake_binary())
- goto unittest;
-
- ebpf_ut_cleanup_memory();
-
- exit_code = 0;
-unittest:
- exit(exit_code);
- }
- default: {
- break;
- }
- }
- }
-
- if (disable_cgroups) {
- ebpf_disable_cgroups();
- }
-
- if (select_threads) {
- disable_all_global_charts();
- uint64_t idx;
- for (idx = 0; idx < EBPF_OPTION_ALL_CHARTS; idx++) {
- if (select_threads & 1<<idx)
- ebpf_enable_specific_chart(&ebpf_modules[idx], disable_cgroups);
- }
- }
-
- // Load apps_groups.conf
- if (ebpf_read_apps_groups_conf(
- &apps_groups_default_target, &apps_groups_root_target, ebpf_user_config_dir, "groups")) {
- netdata_log_info("Cannot read process groups configuration file '%s/apps_groups.conf'. Will try '%s/apps_groups.conf'",
- ebpf_user_config_dir, ebpf_stock_config_dir);
- if (ebpf_read_apps_groups_conf(
- &apps_groups_default_target, &apps_groups_root_target, ebpf_stock_config_dir, "groups")) {
- netdata_log_error("Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.",
- ebpf_stock_config_dir);
- ebpf_exit();
- }
- } else
- netdata_log_info("Loaded config file '%s/apps_groups.conf'", ebpf_user_config_dir);
-}
-
-/*****************************************************************
- *
- * Collector charts
- *
- *****************************************************************/
-
-static char *load_event_stat[NETDATA_EBPF_LOAD_STAT_END] = {"legacy", "co-re"};
-static char *memlock_stat = {"memory_locked"};
-static char *hash_table_stat = {"hash_table"};
-static char *hash_table_core[NETDATA_EBPF_LOAD_STAT_END] = {"per_core", "unique"};
-
-/**
- * Send Hash Table PID data
- *
- * Send all information associated with a specific pid table.
- *
- * @param chart chart id
- * @param idx index position in hash_table_stats
- */
-static inline void ebpf_send_hash_table_pid_data(char *chart, uint32_t idx)
-{
- int i;
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, chart, "");
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_module_t *wem = &ebpf_modules[i];
- if (wem->functions.apps_routine)
- write_chart_dimension((char *)wem->info.thread_name,
- (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ?
- wem->hash_table_stats[idx]:
- 0);
- }
- ebpf_write_end_chart();
-}
-
-/**
- * Send Global Hash Table data
- *
- * Send all information associated with a specific pid table.
- *
- */
-static inline void ebpf_send_global_hash_table_data()
-{
- int i;
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS, "");
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_module_t *wem = &ebpf_modules[i];
- write_chart_dimension((char *)wem->info.thread_name,
- (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? NETDATA_CONTROLLER_END: 0);
- }
- ebpf_write_end_chart();
-}
-
-/**
- * Send Statistic Data
- *
- * Send statistic information to netdata.
- */
-void ebpf_send_statistic_data()
-{
- if (!publish_internal_metrics)
- return;
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_THREADS, "");
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_module_t *wem = &ebpf_modules[i];
- if (wem->functions.fnct_routine)
- continue;
-
- write_chart_dimension((char *)wem->info.thread_name, (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? 1 : 0);
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_LIFE_TIME, "");
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX ; i++) {
- ebpf_module_t *wem = &ebpf_modules[i];
- // Threads like VFS is slow to load and this can create an invalid number, this is the motive
- // we are also testing wem->lifetime value.
- if (wem->functions.fnct_routine)
- continue;
-
- write_chart_dimension((char *)wem->info.thread_name,
- (wem->lifetime && wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ?
- (long long) (wem->lifetime - wem->running_time):
- 0) ;
- }
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_LOAD_METHOD, "");
- write_chart_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_LEGACY], (long long)plugin_statistics.legacy);
- write_chart_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_CORE], (long long)plugin_statistics.core);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_KERNEL_MEMORY, "");
- write_chart_dimension(memlock_stat, (long long)plugin_statistics.memlock_kern);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_LOADED, "");
- write_chart_dimension(hash_table_stat, (long long)plugin_statistics.hash_tables);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_PER_CORE, "");
- write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_PER_CORE], (long long)plugin_statistics.hash_percpu);
- write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_UNIQUE], (long long)plugin_statistics.hash_unique);
- ebpf_write_end_chart();
-
- ebpf_send_global_hash_table_data();
-
- ebpf_send_hash_table_pid_data(NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS, NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_ADD);
- ebpf_send_hash_table_pid_data(NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS, NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_DEL);
-
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_module_t *wem = &ebpf_modules[i];
- if (!wem->functions.fnct_routine)
- continue;
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, (char *)wem->functions.fcnt_thread_chart_name, "");
- write_chart_dimension((char *)wem->info.thread_name, (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? 1 : 0);
- ebpf_write_end_chart();
-
- ebpf_write_begin_chart(NETDATA_MONITORING_FAMILY, (char *)wem->functions.fcnt_thread_lifetime_name, "");
- write_chart_dimension((char *)wem->info.thread_name,
- (wem->lifetime && wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ?
- (long long) (wem->lifetime - wem->running_time):
- 0) ;
- ebpf_write_end_chart();
- }
-}
-
-/**
- * Update Internal Metric variable
- *
- * By default eBPF.plugin sends internal metrics for netdata, but user can
- * disable this.
- *
- * The function updates the variable used to send charts.
- */
-static void update_internal_metric_variable()
-{
- const char *s = getenv("NETDATA_INTERNALS_MONITORING");
- if (s && *s && strcmp(s, "NO") == 0)
- publish_internal_metrics = false;
-}
-
-/**
- * Create Thread Chart
- *
- * Write to standard output current values for threads charts.
- *
- * @param name is the chart name
- * @param title chart title.
- * @param units chart units
- * @param order is the chart order
- * @param update_every time used to update charts
- * @param module a module to create a specific chart.
- */
-static void ebpf_create_thread_chart(char *name,
- char *title,
- char *units,
- int order,
- int update_every,
- ebpf_module_t *module)
-{
- // common call for specific and all charts.
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- name,
- "",
- title,
- units,
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- order,
- update_every,
- "main");
-
- if (module) {
- ebpf_write_global_dimension((char *)module->info.thread_name,
- (char *)module->info.thread_name,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
- return;
- }
-
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_module_t *em = &ebpf_modules[i];
- if (em->functions.fnct_routine)
- continue;
-
- ebpf_write_global_dimension((char *)em->info.thread_name,
- (char *)em->info.thread_name,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
- }
-}
-
-/**
- * Create chart for Load Thread
- *
- * Write to standard output current values for load mode.
- *
- * @param update_every time used to update charts
- */
-static inline void ebpf_create_statistic_load_chart(int update_every)
-{
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- NETDATA_EBPF_LOAD_METHOD,
- "",
- "Load info.",
- "methods",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_EBPF_ORDER_STAT_LOAD_METHOD,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_write_global_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_LEGACY],
- load_event_stat[NETDATA_EBPF_LOAD_STAT_LEGACY],
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- ebpf_write_global_dimension(load_event_stat[NETDATA_EBPF_LOAD_STAT_CORE],
- load_event_stat[NETDATA_EBPF_LOAD_STAT_CORE],
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-}
-
-/**
- * Create chart for Kernel Memory
- *
- * Write to standard output current values for allocated memory.
- *
- * @param update_every time used to update charts
- */
-static inline void ebpf_create_statistic_kernel_memory(int update_every)
-{
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- NETDATA_EBPF_KERNEL_MEMORY,
- "",
- "Memory allocated for hash tables.",
- "bytes",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_EBPF_ORDER_STAT_KERNEL_MEMORY,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_write_global_dimension(memlock_stat,
- memlock_stat,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-}
-
-/**
- * Create chart Hash Table
- *
- * Write to standard output number of hash tables used with this software.
- *
- * @param update_every time used to update charts
- */
-static inline void ebpf_create_statistic_hash_tables(int update_every)
-{
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- NETDATA_EBPF_HASH_TABLES_LOADED,
- "",
- "Number of hash tables loaded.",
- "hash tables",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_EBPF_ORDER_STAT_HASH_TABLES,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_write_global_dimension(hash_table_stat,
- hash_table_stat,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-}
-
-/**
- * Create chart for percpu stats
- *
- * Write to standard output current values for threads.
- *
- * @param update_every time used to update charts
- */
-static inline void ebpf_create_statistic_hash_per_core(int update_every)
-{
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- NETDATA_EBPF_HASH_TABLES_PER_CORE,
- "",
- "How threads are loading hash/array tables.",
- "threads",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_EBPF_ORDER_STAT_HASH_CORE,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- ebpf_write_global_dimension(hash_table_core[NETDATA_EBPF_THREAD_PER_CORE],
- hash_table_core[NETDATA_EBPF_THREAD_PER_CORE],
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-
- ebpf_write_global_dimension(hash_table_core[NETDATA_EBPF_THREAD_UNIQUE],
- hash_table_core[NETDATA_EBPF_THREAD_UNIQUE],
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
-}
-
-/**
- * Hash table global elements
- *
- * Write to standard output current values inside global tables.
- *
- * @param update_every time used to update charts
- */
-static void ebpf_create_statistic_hash_global_elements(int update_every)
-{
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS,
- "",
- "Controllers inside global table",
- "rows",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- NETDATA_EBPF_ORDER_STAT_HASH_GLOBAL_TABLE_TOTAL,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_write_global_dimension((char *)ebpf_modules[i].info.thread_name,
- (char *)ebpf_modules[i].info.thread_name,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
- }
-}
-
-/**
- * Hash table global elements
- *
- * Write to standard output current values inside global tables.
- *
- * @param update_every time used to update charts
- * @param id chart id
- * @param title chart title
- * @param order ordder chart will be shown on dashboard.
- */
-static void ebpf_create_statistic_hash_pid_table(int update_every, char *id, char *title, int order)
-{
- ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
- id,
- "",
- title,
- "rows",
- NETDATA_EBPF_FAMILY,
- NETDATA_EBPF_CHART_TYPE_LINE,
- NULL,
- order,
- update_every,
- NETDATA_EBPF_MODULE_NAME_PROCESS);
-
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_module_t *wem = &ebpf_modules[i];
- if (wem->functions.apps_routine)
- ebpf_write_global_dimension((char *)wem->info.thread_name,
- (char *)wem->info.thread_name,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
- }
-}
-
-/**
- * Create Statistics Charts
- *
- * Create charts that will show statistics related to eBPF plugin.
- *
- * @param update_every time used to update charts
- */
-static void ebpf_create_statistic_charts(int update_every)
-{
- static char create_charts = 1;
- update_internal_metric_variable();
- if (!publish_internal_metrics)
- return;
-
- if (!create_charts)
- return;
-
- create_charts = 0;
-
- ebpf_create_thread_chart(NETDATA_EBPF_THREADS,
- "Threads running.",
- "boolean",
- NETDATA_EBPF_ORDER_STAT_THREADS,
- update_every,
- NULL);
- /*
-#ifdef NETDATA_DEV_MODE
- EBPF_PLUGIN_FUNCTIONS(EBPF_FUNCTION_THREAD, EBPF_PLUGIN_THREAD_FUNCTION_DESCRIPTION);
-#endif
- */
-
- ebpf_create_thread_chart(NETDATA_EBPF_LIFE_TIME,
- "Time remaining for thread.",
- "seconds",
- NETDATA_EBPF_ORDER_STAT_LIFE_TIME,
- update_every,
- NULL);
- /*
-#ifdef NETDATA_DEV_MODE
- EBPF_PLUGIN_FUNCTIONS(EBPF_FUNCTION_THREAD, EBPF_PLUGIN_THREAD_FUNCTION_DESCRIPTION);
-#endif
- */
-
- int i,j;
- char name[256];
- for (i = 0, j = NETDATA_EBPF_ORDER_FUNCTION_PER_THREAD; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- ebpf_module_t *em = &ebpf_modules[i];
- if (!em->functions.fnct_routine)
- continue;
-
- em->functions.order_thread_chart = j;
- snprintfz(name, sizeof(name) - 1, "%s_%s", NETDATA_EBPF_THREADS, em->info.thread_name);
- em->functions.fcnt_thread_chart_name = strdupz(name);
- ebpf_create_thread_chart(name,
- "Threads running.",
- "boolean",
- j++,
- update_every,
- em);
-#ifdef NETDATA_DEV_MODE
- EBPF_PLUGIN_FUNCTIONS(em->functions.fcnt_name, em->functions.fcnt_desc);
-#endif
-
- em->functions.order_thread_lifetime = j;
- snprintfz(name, sizeof(name) - 1, "%s_%s", NETDATA_EBPF_LIFE_TIME, em->info.thread_name);
- em->functions.fcnt_thread_lifetime_name = strdupz(name);
- ebpf_create_thread_chart(name,
- "Time remaining for thread.",
- "seconds",
- j++,
- update_every,
- em);
-#ifdef NETDATA_DEV_MODE
- EBPF_PLUGIN_FUNCTIONS(em->functions.fcnt_name, em->functions.fcnt_desc);
-#endif
- }
-
- ebpf_create_statistic_load_chart(update_every);
-
- ebpf_create_statistic_kernel_memory(update_every);
-
- ebpf_create_statistic_hash_tables(update_every);
-
- ebpf_create_statistic_hash_per_core(update_every);
-
- ebpf_create_statistic_hash_global_elements(update_every);
-
- ebpf_create_statistic_hash_pid_table(update_every,
- NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS,
- "Elements inserted into PID table",
- NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_ADDED);
-
- ebpf_create_statistic_hash_pid_table(update_every,
- NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS,
- "Elements removed from PID table",
- NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_REMOVED);
-
- fflush(stdout);
-}
-
-/*****************************************************************
- *
- * COLLECTOR ENTRY POINT
- *
- *****************************************************************/
-
-/**
- * Update PID file
- *
- * Update the content of PID file
- *
- * @param filename is the full name of the file.
- * @param pid that identifies the process
- */
-static void ebpf_update_pid_file(char *filename, pid_t pid)
-{
- FILE *fp = fopen(filename, "w");
- if (!fp)
- return;
-
- fprintf(fp, "%d", pid);
- fclose(fp);
-}
-
-/**
- * Get Process Name
- *
- * Get process name from /proc/PID/status
- *
- * @param pid that identifies the process
- */
-static char *ebpf_get_process_name(pid_t pid)
-{
- char *name = NULL;
- char filename[FILENAME_MAX + 1];
- snprintfz(filename, FILENAME_MAX, "/proc/%d/status", pid);
-
- procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
- if(unlikely(!ff)) {
- netdata_log_error("Cannot open %s", filename);
- return name;
- }
-
- ff = procfile_readall(ff);
- if(unlikely(!ff))
- return name;
-
- unsigned long i, lines = procfile_lines(ff);
- for(i = 0; i < lines ; i++) {
- char *cmp = procfile_lineword(ff, i, 0);
- if (!strcmp(cmp, "Name:")) {
- name = strdupz(procfile_lineword(ff, i, 1));
- break;
- }
- }
-
- procfile_close(ff);
-
- return name;
-}
-
-/**
- * Read Previous PID
- *
- * @param filename is the full name of the file.
- *
- * @return It returns the PID used during previous execution on success or 0 otherwise
- */
-static pid_t ebpf_read_previous_pid(char *filename)
-{
- FILE *fp = fopen(filename, "r");
- if (!fp)
- return 0;
-
- char buffer[64];
- size_t length = fread(buffer, sizeof(*buffer), 63, fp);
- pid_t old_pid = 0;
- if (length) {
- if (length > 63)
- length = 63;
-
- buffer[length] = '\0';
- old_pid = (pid_t) str2uint32_t(buffer, NULL);
- }
- fclose(fp);
-
- return old_pid;
-}
-
-/**
- * Kill previous process
- *
- * Kill previous process whether it was not closed.
- *
- * @param filename is the full name of the file.
- * @param pid that identifies the process
- */
-static void ebpf_kill_previous_process(char *filename, pid_t pid)
-{
- pid_t old_pid = ebpf_read_previous_pid(filename);
- if (!old_pid)
- return;
-
- // Process is not running
- char *prev_name = ebpf_get_process_name(old_pid);
- if (!prev_name)
- return;
-
- char *current_name = ebpf_get_process_name(pid);
-
- if (!strcmp(prev_name, current_name))
- kill(old_pid, SIGKILL);
-
- freez(prev_name);
- freez(current_name);
-
- // wait few microseconds before start new plugin
- sleep_usec(USEC_PER_MS * 300);
-}
-
-/**
- * PID file
- *
- * Write the filename for PID inside the given vector.
- *
- * @param filename vector where we will store the name.
- * @param length number of bytes available in filename vector
- */
-void ebpf_pid_file(char *filename, size_t length)
-{
- snprintfz(filename, length, "%s/var/run/ebpf.pid", netdata_configured_host_prefix);
-}
-
-/**
- * Manage PID
- *
- * This function kills another instance of eBPF whether it is necessary and update the file content.
- *
- * @param pid that identifies the process
- */
-static void ebpf_manage_pid(pid_t pid)
-{
- char filename[FILENAME_MAX + 1];
- ebpf_pid_file(filename, FILENAME_MAX);
-
- ebpf_kill_previous_process(filename, pid);
- ebpf_update_pid_file(filename, pid);
-}
-
-/**
- * Set start routine
- *
- * Set static routine before threads to be created.
- */
- static void ebpf_set_static_routine()
- {
- int i;
- for (i = 0; ebpf_modules[i].info.thread_name; i++) {
- ebpf_threads[i].start_routine = ebpf_modules[i].functions.start_routine;
- }
- }
-
-/**
- * Entry point
- *
- * @param argc the number of arguments
- * @param argv the pointer to the arguments
- *
- * @return it returns 0 on success and another integer otherwise
- */
-int main(int argc, char **argv)
-{
- clocks_init();
- nd_log_initialize_for_external_plugins("ebpf.plugin");
-
- main_thread_id = gettid();
-
- set_global_variables();
- ebpf_parse_args(argc, argv);
- ebpf_manage_pid(getpid());
-
- if (ebpf_check_conditions())
- return 2;
-
- if (ebpf_adjust_memory_limit())
- return 3;
-
- signal(SIGINT, ebpf_stop_threads);
- signal(SIGQUIT, ebpf_stop_threads);
- signal(SIGTERM, ebpf_stop_threads);
- signal(SIGPIPE, ebpf_stop_threads);
-
- ebpf_start_pthread_variables();
-
- netdata_configured_host_prefix = getenv("NETDATA_HOST_PREFIX");
- if(verify_netdata_host_prefix(true) == -1) ebpf_exit(6);
-
- ebpf_allocate_common_vectors();
-
-#ifdef LIBBPF_MAJOR_VERSION
- libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
-#endif
-
- ebpf_read_local_addresses_unsafe();
- read_local_ports("/proc/net/tcp", IPPROTO_TCP);
- read_local_ports("/proc/net/tcp6", IPPROTO_TCP);
- read_local_ports("/proc/net/udp", IPPROTO_UDP);
- read_local_ports("/proc/net/udp6", IPPROTO_UDP);
-
- ebpf_set_static_routine();
-
- cgroup_integration_thread.thread = mallocz(sizeof(netdata_thread_t));
- cgroup_integration_thread.start_routine = ebpf_cgroup_integration;
-
- netdata_thread_create(cgroup_integration_thread.thread, cgroup_integration_thread.name,
- NETDATA_THREAD_OPTION_DEFAULT, ebpf_cgroup_integration, NULL);
-
- int i;
- for (i = 0; ebpf_threads[i].name != NULL; i++) {
- struct netdata_static_thread *st = &ebpf_threads[i];
-
- ebpf_module_t *em = &ebpf_modules[i];
- em->thread = st;
- em->thread_id = i;
- if (em->enabled != NETDATA_THREAD_EBPF_NOT_RUNNING) {
- st->thread = mallocz(sizeof(netdata_thread_t));
- em->enabled = NETDATA_THREAD_EBPF_RUNNING;
- em->lifetime = EBPF_NON_FUNCTION_LIFE_TIME;
- netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, em);
- } else {
- em->lifetime = EBPF_DEFAULT_LIFETIME;
- }
- }
-
- usec_t step = USEC_PER_SEC;
- heartbeat_t hb;
- heartbeat_init(&hb);
- int update_apps_every = (int) EBPF_CFG_UPDATE_APPS_EVERY_DEFAULT;
- int update_apps_list = update_apps_every - 1;
- int process_maps_per_core = ebpf_modules[EBPF_MODULE_PROCESS_IDX].maps_per_core;
- //Plugin will be killed when it receives a signal
- for ( ; !ebpf_plugin_exit; global_iterations_counter++) {
- (void)heartbeat_next(&hb, step);
-
- if (global_iterations_counter % EBPF_DEFAULT_UPDATE_EVERY == 0) {
- pthread_mutex_lock(&lock);
- ebpf_create_statistic_charts(EBPF_DEFAULT_UPDATE_EVERY);
-
- ebpf_send_statistic_data();
- pthread_mutex_unlock(&lock);
- fflush(stdout);
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- pthread_mutex_lock(&collect_data_mutex);
- if (++update_apps_list == update_apps_every) {
- update_apps_list = 0;
- cleanup_exited_pids();
- collect_data_for_all_processes(process_pid_fd, process_maps_per_core);
-
- pthread_mutex_lock(&lock);
- ebpf_create_apps_charts(apps_groups_root_target);
- pthread_mutex_unlock(&lock);
- }
- pthread_mutex_unlock(&collect_data_mutex);
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- }
-
- ebpf_stop_threads(0);
-
- return 0;
-}
diff --git a/collectors/ebpf.plugin/ebpf.d/swap.conf b/collectors/ebpf.plugin/ebpf.d/swap.conf
deleted file mode 100644
index 29d9b4204..000000000
--- a/collectors/ebpf.plugin/ebpf.d/swap.conf
+++ /dev/null
@@ -1,34 +0,0 @@
-# The `ebpf load mode` option accepts the following values :
-# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
-# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
-# new charts for the return of these functions, such as errors.
-#
-# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
-# or `cgroups.plugin`.
-# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
-# the setting `apps` and `cgroups` to 'no'.
-#
-# The `ebpf type format` option accepts the following values :
-# `auto` : The eBPF collector will investigate hardware and select between the two next options.
-# `legacy`: The eBPF collector will load the legacy code. Note: This has a bigger overload.
-# `co-re` : The eBPF collector will use latest tracing method. Note: This is not available on all platforms.
-#
-# The `ebpf co-re tracing` option accepts the following values:
-# `trampoline`: This is the default mode used by the eBPF collector, due the small overhead added to host.
-# `probe` : This is the same as legacy code.
-#
-# The `maps per core` defines if hash tables will be per core or not. This option is ignored on kernels older than 4.6.
-#
-# The `lifetime` defines the time length a thread will run when it is enabled by a function.
-#
-# Uncomment lines to define specific options for thread.
-[global]
-# ebpf load mode = entry
-# apps = yes
-# cgroups = no
-# update every = 10
-# pid table size = 32768
- ebpf type format = auto
- ebpf co-re tracing = trampoline
-# maps per core = yes
- lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h
deleted file mode 100644
index ad7c5a94c..000000000
--- a/collectors/ebpf.plugin/ebpf.h
+++ /dev/null
@@ -1,393 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_COLLECTOR_EBPF_H
-#define NETDATA_COLLECTOR_EBPF_H 1
-
-#ifndef __FreeBSD__
-#include <linux/perf_event.h>
-#endif
-#include <stdint.h>
-#include <errno.h>
-#include <signal.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <unistd.h>
-#include <dlfcn.h>
-
-#include <fcntl.h>
-#include <ctype.h>
-#include <dirent.h>
-
-// From libnetdata.h
-#include "libnetdata/threads/threads.h"
-#include "libnetdata/locks/locks.h"
-#include "libnetdata/avl/avl.h"
-#include "libnetdata/clocks/clocks.h"
-#include "libnetdata/config/appconfig.h"
-#include "libnetdata/ebpf/ebpf.h"
-#include "libnetdata/procfile/procfile.h"
-#include "collectors/cgroups.plugin/sys_fs_cgroup.h"
-#include "daemon/main.h"
-
-#include "ebpf_apps.h"
-#include "ebpf_functions.h"
-#include "ebpf_cgroup.h"
-
-#define NETDATA_EBPF_OLD_CONFIG_FILE "ebpf.conf"
-#define NETDATA_EBPF_CONFIG_FILE "ebpf.d.conf"
-
-#ifdef LIBBPF_MAJOR_VERSION // BTF code
-#include "includes/cachestat.skel.h"
-#include "includes/dc.skel.h"
-#include "includes/disk.skel.h"
-#include "includes/fd.skel.h"
-#include "includes/hardirq.skel.h"
-#include "includes/mdflush.skel.h"
-#include "includes/mount.skel.h"
-#include "includes/shm.skel.h"
-#include "includes/socket.skel.h"
-#include "includes/swap.skel.h"
-#include "includes/vfs.skel.h"
-
-extern struct cachestat_bpf *cachestat_bpf_obj;
-extern struct dc_bpf *dc_bpf_obj;
-extern struct disk_bpf *disk_bpf_obj;
-extern struct fd_bpf *fd_bpf_obj;
-extern struct hardirq_bpf *hardirq_bpf_obj;
-extern struct mount_bpf *mount_bpf_obj;
-extern struct mdflush_bpf *mdflush_bpf_obj;
-extern struct shm_bpf *shm_bpf_obj;
-extern struct socket_bpf *socket_bpf_obj;
-extern struct swap_bpf *bpf_obj;
-extern struct vfs_bpf *vfs_bpf_obj;
-#endif
-
-typedef struct netdata_syscall_stat {
- unsigned long bytes; // total number of bytes
- uint64_t call; // total number of calls
- uint64_t ecall; // number of calls that returned error
- struct netdata_syscall_stat *next; // Link list
-} netdata_syscall_stat_t;
-
-typedef struct netdata_publish_syscall {
- char *dimension;
- char *name;
- char *algorithm;
- unsigned long nbyte;
- unsigned long pbyte;
- uint64_t ncall;
- uint64_t pcall;
- uint64_t nerr;
- uint64_t perr;
- struct netdata_publish_syscall *next;
-} netdata_publish_syscall_t;
-
-typedef struct netdata_publish_vfs_common {
- long write;
- long read;
-
- long running;
- long zombie;
-} netdata_publish_vfs_common_t;
-
-typedef struct netdata_error_report {
- char comm[16];
- __u32 pid;
-
- int type;
- int err;
-} netdata_error_report_t;
-
-typedef struct netdata_ebpf_judy_pid {
- ARAL *pid_table;
-
- // Index for PIDs
- struct { // support for multiple indexing engines
- Pvoid_t JudyLArray; // the hash table
- RW_SPINLOCK rw_spinlock; // protect the index
- } index;
-} netdata_ebpf_judy_pid_t;
-
-typedef struct netdata_ebpf_judy_pid_stats {
- char *cmdline;
-
- // Index for Socket timestamp
- struct { // support for multiple indexing engines
- Pvoid_t JudyLArray; // the hash table
- RW_SPINLOCK rw_spinlock; // protect the index
- } socket_stats;
-} netdata_ebpf_judy_pid_stats_t;
-
-extern ebpf_module_t ebpf_modules[];
-enum ebpf_main_index {
- EBPF_MODULE_PROCESS_IDX,
- EBPF_MODULE_SOCKET_IDX,
- EBPF_MODULE_CACHESTAT_IDX,
- EBPF_MODULE_SYNC_IDX,
- EBPF_MODULE_DCSTAT_IDX,
- EBPF_MODULE_SWAP_IDX,
- EBPF_MODULE_VFS_IDX,
- EBPF_MODULE_FILESYSTEM_IDX,
- EBPF_MODULE_DISK_IDX,
- EBPF_MODULE_MOUNT_IDX,
- EBPF_MODULE_FD_IDX,
- EBPF_MODULE_HARDIRQ_IDX,
- EBPF_MODULE_SOFTIRQ_IDX,
- EBPF_MODULE_OOMKILL_IDX,
- EBPF_MODULE_SHM_IDX,
- EBPF_MODULE_MDFLUSH_IDX,
- EBPF_MODULE_FUNCTION_IDX,
- /* THREADS MUST BE INCLUDED BEFORE THIS COMMENT */
- EBPF_OPTION_ALL_CHARTS,
- EBPF_OPTION_VERSION,
- EBPF_OPTION_HELP,
- EBPF_OPTION_GLOBAL_CHART,
- EBPF_OPTION_RETURN_MODE,
- EBPF_OPTION_LEGACY,
- EBPF_OPTION_CORE,
- EBPF_OPTION_UNITTEST
-};
-
-typedef struct ebpf_tracepoint {
- bool enabled;
- char *class;
- char *event;
-} ebpf_tracepoint_t;
-
-// Copied from musl header
-#ifndef offsetof
-#if __GNUC__ > 3
-#define offsetof(type, member) __builtin_offsetof(type, member)
-#else
-#define offsetof(type, member) ((size_t)((char *)&(((type *)0)->member) - (char *)0))
-#endif
-#endif
-
-// Messages
-#define NETDATA_EBPF_DEFAULT_FNT_NOT_FOUND "Cannot find the necessary functions to monitor"
-
-// Chart definitions
-#define NETDATA_EBPF_FAMILY "ebpf"
-#define NETDATA_EBPF_IP_FAMILY "ip"
-#define NETDATA_FILESYSTEM_FAMILY "filesystem"
-#define NETDATA_EBPF_MOUNT_GLOBAL_FAMILY "mount_points"
-#define NETDATA_EBPF_CHART_TYPE_LINE "line"
-#define NETDATA_EBPF_CHART_TYPE_STACKED "stacked"
-#define NETDATA_EBPF_MEMORY_GROUP "mem"
-#define NETDATA_EBPF_SYSTEM_GROUP "system"
-#define NETDATA_SYSTEM_SWAP_SUBMENU "swap"
-#define NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU "swap (eBPF)"
-#define NETDATA_SYSTEM_IPC_SHM_SUBMENU "ipc shared memory"
-#define NETDATA_MONITORING_FAMILY "netdata"
-
-// Statistics charts
-#define NETDATA_EBPF_THREADS "ebpf_threads"
-#define NETDATA_EBPF_LIFE_TIME "ebpf_life_time"
-#define NETDATA_EBPF_LOAD_METHOD "ebpf_load_methods"
-#define NETDATA_EBPF_KERNEL_MEMORY "ebpf_kernel_memory"
-#define NETDATA_EBPF_HASH_TABLES_LOADED "ebpf_hash_tables_count"
-#define NETDATA_EBPF_HASH_TABLES_PER_CORE "ebpf_hash_tables_per_core"
-#define NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS "ebpf_hash_tables_global_elements"
-#define NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS "ebpf_hash_tables_insert_pid_elements"
-#define NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS "ebpf_hash_tables_remove_pid_elements"
-
-// Log file
-#define NETDATA_DEVELOPER_LOG_FILE "developer.log"
-
-// Maximum number of processors monitored on perf events
-#define NETDATA_MAX_PROCESSOR 512
-
-// Kernel versions calculated with the formula:
-// R = MAJOR*65536 + MINOR*256 + PATCH
-#define NETDATA_KERNEL_V5_3 328448
-#define NETDATA_KERNEL_V4_15 265984
-
-#define EBPF_SYS_CLONE_IDX 11
-#define EBPF_MAX_MAPS 32
-
-#define EBPF_DEFAULT_UPDATE_EVERY 10
-
-enum ebpf_algorithms_list {
- NETDATA_EBPF_ABSOLUTE_IDX,
- NETDATA_EBPF_INCREMENTAL_IDX
-};
-
-// Threads
-void *ebpf_process_thread(void *ptr);
-void *ebpf_socket_thread(void *ptr);
-
-// Common variables
-extern pthread_mutex_t lock;
-extern pthread_mutex_t ebpf_exit_cleanup;
-extern int ebpf_nprocs;
-extern int running_on_kernel;
-extern int isrh;
-extern char *ebpf_plugin_dir;
-extern int process_pid_fd;
-
-extern pthread_mutex_t collect_data_mutex;
-
-// Common functions
-void ebpf_global_labels(netdata_syscall_stat_t *is,
- netdata_publish_syscall_t *pio,
- char **dim,
- char **name,
- int *algorithm,
- int end);
-
-void ebpf_write_chart_cmd(char *type,
- char *id,
- char *suffix,
- char *title,
- char *units,
- char *family,
- char *charttype,
- char *context,
- int order,
- int update_every,
- char *module);
-
-void ebpf_write_global_dimension(char *name, char *id, char *algorithm);
-
-void ebpf_create_global_dimension(void *ptr, int end);
-
-void ebpf_create_chart(char *type,
- char *id,
- char *title,
- char *units,
- char *family,
- char *context,
- char *charttype,
- int order,
- void (*ncd)(void *, int),
- void *move,
- int end,
- int update_every,
- char *module);
-
-void write_chart_dimension(char *dim, long long value);
-
-void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end);
-
-void write_err_chart(char *name, char *family, netdata_publish_syscall_t *move, int end);
-
-void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite,
- char *dread, long long vread);
-
-/**
- * Create Chart labels
- *
- * @param name the label name.
- * @param value the label value.
- * @param origin the labeel source.
- */
-static inline void ebpf_create_chart_labels(char *name, char *value, int source)
-{
- fprintf(stdout, "CLABEL '%s' '%s' %d\n", name, value, source);
-}
-
-/**
- * Commit label
- *
- * Write commit label to stdout
- */
-static inline void ebpf_commit_label()
-{
- fprintf(stdout, "CLABEL_COMMIT\n");
-}
-
-/**
- * Write begin command on standard output
- *
- * @param family the chart family name
- * @param name the chart name
- * @param metric the chart suffix (used with apps and cgroups)
- */
-static inline void ebpf_write_begin_chart(char *family, char *name, char *metric)
-{
- printf("BEGIN %s.%s%s\n", family, name, metric);
-}
-
-/**
- * Write END command on stdout.
- */
-static inline void ebpf_write_end_chart()
-{
- printf("END\n");
-}
-
-int ebpf_enable_tracepoint(ebpf_tracepoint_t *tp);
-int ebpf_disable_tracepoint(ebpf_tracepoint_t *tp);
-uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps);
-
-void ebpf_pid_file(char *filename, size_t length);
-
-#define EBPF_PROGRAMS_SECTION "ebpf programs"
-
-#define EBPF_COMMON_DIMENSION_PERCENTAGE "%"
-#define EBPF_PROGRAMS_SECTION "ebpf programs"
-
-#define EBPF_COMMON_DIMENSION_PERCENTAGE "%"
-#define EBPF_COMMON_DIMENSION_CALL "calls/s"
-#define EBPF_COMMON_DIMENSION_CONNECTIONS "connections/s"
-#define EBPF_COMMON_DIMENSION_BITS "kilobits/s"
-#define EBPF_COMMON_DIMENSION_BYTES "bytes/s"
-#define EBPF_COMMON_DIMENSION_DIFFERENCE "difference"
-#define EBPF_COMMON_DIMENSION_PACKETS "packets"
-#define EBPF_COMMON_DIMENSION_FILES "files"
-#define EBPF_COMMON_DIMENSION_MILLISECONDS "milliseconds"
-#define EBPF_COMMON_DIMENSION_KILLS "kills"
-
-// Common variables
-extern int debug_enabled;
-extern struct ebpf_pid_stat *ebpf_root_of_pids;
-extern ebpf_cgroup_target_t *ebpf_cgroup_pids;
-extern char *ebpf_algorithms[];
-extern struct config collector_config;
-extern netdata_ebpf_cgroup_shm_t shm_ebpf_cgroup;
-extern int shm_fd_ebpf_cgroup;
-extern sem_t *shm_sem_ebpf_cgroup;
-extern pthread_mutex_t mutex_cgroup_shm;
-extern size_t ebpf_all_pids_count;
-extern ebpf_plugin_stats_t plugin_statistics;
-#ifdef LIBBPF_MAJOR_VERSION
-extern struct btf *default_btf;
-#else
-extern void *default_btf;
-#endif
-
-// Socket functions and variables
-// Common functions
-void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr);
-void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr);
-void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *root);
-void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long long v1);
-collected_number get_value_from_structure(char *basis, size_t offset);
-void ebpf_update_pid_table(ebpf_local_maps_t *pid, ebpf_module_t *em);
-void ebpf_write_chart_obsolete(char *type, char *id, char *suffix, char *title, char *units, char *family,
- char *charttype, char *context, int order, int update_every);
-void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end);
-void ebpf_update_disabled_plugin_stats(ebpf_module_t *em);
-ARAL *ebpf_allocate_pid_aral(char *name, size_t size);
-void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link **probe_links);
-
-void ebpf_read_global_table_stats(netdata_idx_t *stats, netdata_idx_t *values, int map_fd,
- int maps_per_core, uint32_t begin, uint32_t end);
-void **ebpf_judy_insert_unsafe(PPvoid_t arr, Word_t key);
-netdata_ebpf_judy_pid_stats_t *ebpf_get_pid_from_judy_unsafe(PPvoid_t judy_array, uint32_t pid);
-
-void parse_network_viewer_section(struct config *cfg);
-void ebpf_clean_ip_structure(ebpf_network_viewer_ip_list_t **clean);
-void ebpf_clean_port_structure(ebpf_network_viewer_port_list_t **clean);
-void ebpf_read_local_addresses_unsafe();
-
-extern ebpf_filesystem_partitions_t localfs[];
-extern ebpf_sync_syscalls_t local_syscalls[];
-extern bool ebpf_plugin_exit;
-void ebpf_stop_threads(int sig);
-extern netdata_ebpf_judy_pid_t ebpf_judy_pid;
-
-#define EBPF_MAX_SYNCHRONIZATION_TIME 300
-
-#endif /* NETDATA_COLLECTOR_EBPF_H */
diff --git a/collectors/ebpf.plugin/ebpf_cgroup.h b/collectors/ebpf.plugin/ebpf_cgroup.h
deleted file mode 100644
index ba8346934..000000000
--- a/collectors/ebpf.plugin/ebpf_cgroup.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_CGROUP_H
-#define NETDATA_EBPF_CGROUP_H 1
-
-#define NETDATA_EBPF_CGROUP_MAX_TRIES 3
-#define NETDATA_EBPF_CGROUP_NEXT_TRY_SEC 30
-
-#include "ebpf.h"
-#include "ebpf_apps.h"
-
-#define NETDATA_SERVICE_FAMILY "services"
-
-struct pid_on_target2 {
- int32_t pid;
- int updated;
-
- netdata_publish_swap_t swap;
- netdata_fd_stat_t fd;
- netdata_publish_vfs_t vfs;
- ebpf_process_stat_t ps;
- netdata_dcstat_pid_t dc;
- netdata_publish_shm_t shm;
- netdata_socket_t socket;
- netdata_cachestat_pid_t cachestat;
-
- struct pid_on_target2 *next;
-};
-
-enum ebpf_cgroup_flags {
- NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART = 1,
- NETDATA_EBPF_CGROUP_HAS_SWAP_CHART = 1<<2,
- NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART = 1<<3,
- NETDATA_EBPF_CGROUP_HAS_FD_CHART = 1<<4,
- NETDATA_EBPF_CGROUP_HAS_VFS_CHART = 1<<5,
- NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART = 1<<6,
- NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART = 1<<7,
- NETDATA_EBPF_CGROUP_HAS_DC_CHART = 1<<8,
- NETDATA_EBPF_CGROUP_HAS_SHM_CHART = 1<<9
-};
-
-typedef struct ebpf_cgroup_target {
- char name[256]; // title
- uint32_t hash;
- uint32_t flags;
- uint32_t systemd;
- uint32_t updated;
-
- netdata_publish_swap_t publish_systemd_swap;
- netdata_fd_stat_t publish_systemd_fd;
- netdata_publish_vfs_t publish_systemd_vfs;
- ebpf_process_stat_t publish_systemd_ps;
- netdata_publish_dcstat_t publish_dc;
- int oomkill;
- netdata_publish_shm_t publish_shm;
- ebpf_socket_publish_apps_t publish_socket;
- netdata_publish_cachestat_t publish_cachestat;
-
- struct pid_on_target2 *pids;
- struct ebpf_cgroup_target *next;
-} ebpf_cgroup_target_t;
-
-void ebpf_map_cgroup_shared_memory();
-void ebpf_parse_cgroup_shm_data();
-void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order,
- char *algorithm, char *context, char *module, int update_every);
-void *ebpf_cgroup_integration(void *ptr);
-void ebpf_unmap_cgroup_shared_memory();
-extern int send_cgroup_chart;
-
-#endif /* NETDATA_EBPF_CGROUP_H */
diff --git a/collectors/ebpf.plugin/ebpf_functions.c b/collectors/ebpf.plugin/ebpf_functions.c
deleted file mode 100644
index 6a481ad64..000000000
--- a/collectors/ebpf.plugin/ebpf_functions.c
+++ /dev/null
@@ -1,1093 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#include "ebpf.h"
-#include "ebpf_functions.h"
-
-/*****************************************************************
- * EBPF FUNCTION COMMON
- *****************************************************************/
-
-/**
- * Function Start thread
- *
- * Start a specific thread after user request.
- *
- * @param em The structure with thread information
- * @param period
- * @return
- */
-static int ebpf_function_start_thread(ebpf_module_t *em, int period)
-{
- struct netdata_static_thread *st = em->thread;
- // another request for thread that already ran, cleanup and restart
- if (st->thread)
- freez(st->thread);
-
- if (period <= 0)
- period = EBPF_DEFAULT_LIFETIME;
-
- st->thread = mallocz(sizeof(netdata_thread_t));
- em->enabled = NETDATA_THREAD_EBPF_FUNCTION_RUNNING;
- em->lifetime = period;
-
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("Starting thread %s with lifetime = %d", em->info.thread_name, period);
-#endif
-
- return netdata_thread_create(st->thread, st->name, NETDATA_THREAD_OPTION_DEFAULT, st->start_routine, em);
-}
-
-/*****************************************************************
- * EBPF SELECT MODULE
- *****************************************************************/
-
-/**
- * Select Module
- *
- * @param thread_name name of the thread we are looking for.
- *
- * @return it returns a pointer for the module that has thread_name on success or NULL otherwise.
-ebpf_module_t *ebpf_functions_select_module(const char *thread_name) {
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- if (strcmp(ebpf_modules[i].info.thread_name, thread_name) == 0) {
- return &ebpf_modules[i];
- }
- }
-
- return NULL;
-}
- */
-
-/*****************************************************************
- * EBPF HELP FUNCTIONS
- *****************************************************************/
-
-/**
- * Thread Help
- *
- * Shows help with all options accepted by thread function.
- *
- * @param transaction the transaction id that Netdata sent for this function execution
-static void ebpf_function_thread_manipulation_help(const char *transaction) {
- BUFFER *wb = buffer_create(0, NULL);
- buffer_sprintf(wb, "%s",
- "ebpf.plugin / thread\n"
- "\n"
- "Function `thread` allows user to control eBPF threads.\n"
- "\n"
- "The following filters are supported:\n"
- "\n"
- " thread:NAME\n"
- " Shows information for the thread NAME. Names are listed inside `ebpf.d.conf`.\n"
- "\n"
- " enable:NAME:PERIOD\n"
- " Enable a specific thread named `NAME` to run a specific PERIOD in seconds. When PERIOD is not\n"
- " specified plugin will use the default 300 seconds\n"
- "\n"
- " disable:NAME\n"
- " Disable a sp.\n"
- "\n"
- "Filters can be combined. Each filter can be given only one time.\n"
- );
-
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600, wb);
-
- buffer_free(wb);
-}
-*/
-
-/*****************************************************************
- * EBPF ERROR FUNCTIONS
- *****************************************************************/
-
-/**
- * Function error
- *
- * Show error when a wrong function is given
- *
- * @param transaction the transaction id that Netdata sent for this function execution
- * @param code the error code to show with the message.
- * @param msg the error message
- */
-static void ebpf_function_error(const char *transaction, int code, const char *msg) {
- pluginsd_function_json_error_to_stdout(transaction, code, msg);
-}
-
-/*****************************************************************
- * EBPF THREAD FUNCTION
- *****************************************************************/
-
-/**
- * Function: thread
- *
- * Enable a specific thread.
- *
- * @param transaction the transaction id that Netdata sent for this function execution
- * @param function function name and arguments given to thread.
- * @param line_buffer buffer used to parse args
- * @param line_max Number of arguments given
- * @param timeout The function timeout
- * @param em The structure with thread information
-static void ebpf_function_thread_manipulation(const char *transaction,
- char *function __maybe_unused,
- char *line_buffer __maybe_unused,
- int line_max __maybe_unused,
- int timeout __maybe_unused,
- ebpf_module_t *em)
-{
- char *words[PLUGINSD_MAX_WORDS] = { NULL };
- char message[512];
- uint32_t show_specific_thread = 0;
- size_t num_words = quoted_strings_splitter_pluginsd(function, words, PLUGINSD_MAX_WORDS);
- for(int i = 1; i < PLUGINSD_MAX_WORDS ;i++) {
- const char *keyword = get_word(words, num_words, i);
- if (!keyword)
- break;
-
- ebpf_module_t *lem;
- if(strncmp(keyword, EBPF_THREADS_ENABLE_CATEGORY, sizeof(EBPF_THREADS_ENABLE_CATEGORY) -1) == 0) {
- char thread_name[128];
- int period = -1;
- const char *name = &keyword[sizeof(EBPF_THREADS_ENABLE_CATEGORY) - 1];
- char *separator = strchr(name, ':');
- if (separator) {
- strncpyz(thread_name, name, separator - name);
- period = str2i(++separator);
- } else {
- strncpyz(thread_name, name, strlen(name));
- }
-
- lem = ebpf_functions_select_module(thread_name);
- if (!lem) {
- snprintfz(message, sizeof(message) - 1, "%s%s", EBPF_PLUGIN_THREAD_FUNCTION_ERROR_THREAD_NOT_FOUND, name);
- ebpf_function_error(transaction, HTTP_RESP_NOT_FOUND, message);
- return;
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (lem->enabled > NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- // Load configuration again
- ebpf_update_module(lem, default_btf, running_on_kernel, isrh);
-
- if (ebpf_function_start_thread(lem, period)) {
- ebpf_function_error(transaction,
- HTTP_RESP_INTERNAL_SERVER_ERROR,
- "Cannot start thread.");
- return;
- }
- } else {
- lem->running_time = 0;
- if (period > 0) // user is modifying period to run
- lem->lifetime = period;
-#ifdef NETDATA_INTERNAL_CHECKS
- netdata_log_info("Thread %s had lifetime updated for %d", thread_name, period);
-#endif
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- } else if(strncmp(keyword, EBPF_THREADS_DISABLE_CATEGORY, sizeof(EBPF_THREADS_DISABLE_CATEGORY) -1) == 0) {
- const char *name = &keyword[sizeof(EBPF_THREADS_DISABLE_CATEGORY) - 1];
- lem = ebpf_functions_select_module(name);
- if (!lem) {
- snprintfz(message, sizeof(message) - 1, "%s%s", EBPF_PLUGIN_THREAD_FUNCTION_ERROR_THREAD_NOT_FOUND, name);
- ebpf_function_error(transaction, HTTP_RESP_NOT_FOUND, message);
- return;
- }
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (lem->enabled < NETDATA_THREAD_EBPF_STOPPING && lem->thread->thread) {
- lem->lifetime = 0;
- lem->running_time = lem->update_every;
- netdata_thread_cancel(*lem->thread->thread);
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- } else if(strncmp(keyword, EBPF_THREADS_SELECT_THREAD, sizeof(EBPF_THREADS_SELECT_THREAD) -1) == 0) {
- const char *name = &keyword[sizeof(EBPF_THREADS_SELECT_THREAD) - 1];
- lem = ebpf_functions_select_module(name);
- if (!lem) {
- snprintfz(message, sizeof(message) - 1, "%s%s", EBPF_PLUGIN_THREAD_FUNCTION_ERROR_THREAD_NOT_FOUND, name);
- ebpf_function_error(transaction, HTTP_RESP_NOT_FOUND, message);
- return;
- }
-
- show_specific_thread |= 1<<lem->thread_id;
- } else if(strncmp(keyword, "help", 4) == 0) {
- ebpf_function_thread_manipulation_help(transaction);
- return;
- }
- }
-
- time_t expires = now_realtime_sec() + em->update_every;
-
- BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL);
- buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_NEWLINE_ON_ARRAY_ITEMS);
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_time_t(wb, "update_every", em->update_every);
- buffer_json_member_add_string(wb, "help", EBPF_PLUGIN_THREAD_FUNCTION_DESCRIPTION);
-
- // Collect data
- buffer_json_member_add_array(wb, "data");
- int i;
- for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
- if (show_specific_thread && !(show_specific_thread & 1<<i))
- continue;
-
- ebpf_module_t *wem = &ebpf_modules[i];
- buffer_json_add_array_item_array(wb);
-
- // IMPORTANT!
- // THE ORDER SHOULD BE THE SAME WITH THE FIELDS!
-
- // thread name
- buffer_json_add_array_item_string(wb, wem->info.thread_name);
-
- // description
- buffer_json_add_array_item_string(wb, wem->info.thread_description);
- // Either it is not running or received a disabled signal and it is stopping.
- if (wem->enabled > NETDATA_THREAD_EBPF_FUNCTION_RUNNING ||
- (!wem->lifetime && (int)wem->running_time == wem->update_every)) {
- // status
- buffer_json_add_array_item_string(wb, EBPF_THREAD_STATUS_STOPPED);
-
- // Time remaining
- buffer_json_add_array_item_uint64(wb, 0);
-
- // action
- buffer_json_add_array_item_string(wb, "NULL");
- } else {
- // status
- buffer_json_add_array_item_string(wb, EBPF_THREAD_STATUS_RUNNING);
-
- // Time remaining
- buffer_json_add_array_item_uint64(wb, (wem->lifetime) ? (wem->lifetime - wem->running_time) : 0);
-
- // action
- buffer_json_add_array_item_string(wb, "Enabled/Disabled");
- }
-
- buffer_json_array_close(wb);
- }
-
- buffer_json_array_close(wb); // data
-
- buffer_json_member_add_object(wb, "columns");
- {
- int fields_id = 0;
-
- // IMPORTANT!
- // THE ORDER SHOULD BE THE SAME WITH THE VALUES!
- buffer_rrdf_table_add_field(wb, fields_id++, "Thread", "Thread Name", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY | RRDF_FIELD_OPTS_UNIQUE_KEY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Description", "Thread Desc", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Status", "Thread Status", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Time", "Time Remaining", RRDF_FIELD_TYPE_INTEGER,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL,
- NAN, RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_NONE, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Action", "Thread Action", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
- }
- buffer_json_object_close(wb); // columns
-
- buffer_json_member_add_string(wb, "default_sort_column", "Thread");
-
- buffer_json_member_add_object(wb, "charts");
- {
- // Threads
- buffer_json_member_add_object(wb, "eBPFThreads");
- {
- buffer_json_member_add_string(wb, "name", "Threads");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Threads");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // Life Time
- buffer_json_member_add_object(wb, "eBPFLifeTime");
- {
- buffer_json_member_add_string(wb, "name", "LifeTime");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Threads");
- buffer_json_add_array_item_string(wb, "Time");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // charts
-
- // Do we use only on fields that can be groupped?
- buffer_json_member_add_object(wb, "group_by");
- {
- // group by Status
- buffer_json_member_add_object(wb, "Status");
- {
- buffer_json_member_add_string(wb, "name", "Thread status");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Status");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // group_by
-
- buffer_json_member_add_time_t(wb, "expires", expires);
- buffer_json_finalize(wb);
-
- // Lock necessary to avoid race condition
- pluginsd_function_result_to_stdout(transaction, HTTP_RESP_OK, "application/json", expires, wb);
-
- buffer_free(wb);
-}
- */
-
-/*****************************************************************
- * EBPF SOCKET FUNCTION
- *****************************************************************/
-
-/**
- * Thread Help
- *
- * Shows help with all options accepted by thread function.
- *
- * @param transaction the transaction id that Netdata sent for this function execution
-*/
-static void ebpf_function_socket_help(const char *transaction) {
- pluginsd_function_result_begin_to_stdout(transaction, HTTP_RESP_OK, "text/plain", now_realtime_sec() + 3600);
- fprintf(stdout, "%s",
- "ebpf.plugin / socket\n"
- "\n"
- "Function `socket` display information for all open sockets during ebpf.plugin runtime.\n"
- "During thread runtime the plugin is always collecting data, but when an option is modified, the plugin\n"
- "resets completely the previous table and can show a clean data for the first request before to bring the\n"
- "modified request.\n"
- "\n"
- "The following filters are supported:\n"
- "\n"
- " family:FAMILY\n"
- " Shows information for the FAMILY specified. Option accepts IPV4, IPV6 and all, that is the default.\n"
- "\n"
- " period:PERIOD\n"
- " Enable socket to run a specific PERIOD in seconds. When PERIOD is not\n"
- " specified plugin will use the default 300 seconds\n"
- "\n"
- " resolve:BOOL\n"
- " Resolve service name, default value is YES.\n"
- "\n"
- " range:CIDR\n"
- " Show sockets that have only a specific destination. Default all addresses.\n"
- "\n"
- " port:range\n"
- " Show sockets that have only a specific destination.\n"
- "\n"
- " reset\n"
- " Send a reset to collector. When a collector receives this command, it uses everything defined in configuration file.\n"
- "\n"
- " interfaces\n"
- " When the collector receives this command, it read all available interfaces on host.\n"
- "\n"
- "Filters can be combined. Each filter can be given only one time. Default all ports\n"
- );
- pluginsd_function_result_end_to_stdout();
- fflush(stdout);
-}
-
-/**
- * Fill Fake socket
- *
- * Fill socket with an invalid request.
- *
- * @param fake_values is the structure where we are storing the value.
- */
-static inline void ebpf_socket_fill_fake_socket(netdata_socket_plus_t *fake_values)
-{
- snprintfz(fake_values->socket_string.src_ip, INET6_ADDRSTRLEN, "%s", "127.0.0.1");
- snprintfz(fake_values->socket_string.dst_ip, INET6_ADDRSTRLEN, "%s", "127.0.0.1");
- fake_values->pid = getpid();
- //fake_values->socket_string.src_port = 0;
- fake_values->socket_string.dst_port[0] = 0;
- snprintfz(fake_values->socket_string.dst_ip, NI_MAXSERV, "%s", "none");
- fake_values->data.family = AF_INET;
- fake_values->data.protocol = AF_UNSPEC;
-}
-
-/**
- * Fill function buffer
- *
- * Fill buffer with data to be shown on cloud.
- *
- * @param wb buffer where we store data.
- * @param values data read from hash table
- * @param name the process name
- */
-static void ebpf_fill_function_buffer(BUFFER *wb, netdata_socket_plus_t *values, char *name)
-{
- buffer_json_add_array_item_array(wb);
-
- // IMPORTANT!
- // THE ORDER SHOULD BE THE SAME WITH THE FIELDS!
-
- // PID
- buffer_json_add_array_item_uint64(wb, (uint64_t)values->pid);
-
- // NAME
- buffer_json_add_array_item_string(wb, (name) ? name : "not identified");
-
- // Origin
- buffer_json_add_array_item_string(wb, (values->data.external_origin) ? "incoming" : "outgoing");
-
- // Source IP
- buffer_json_add_array_item_string(wb, values->socket_string.src_ip);
-
- // SRC Port
- //buffer_json_add_array_item_uint64(wb, (uint64_t) values->socket_string.src_port);
-
- // Destination IP
- buffer_json_add_array_item_string(wb, values->socket_string.dst_ip);
-
- // DST Port
- buffer_json_add_array_item_string(wb, values->socket_string.dst_port);
-
- uint64_t connections;
- if (values->data.protocol == IPPROTO_TCP) {
- // Protocol
- buffer_json_add_array_item_string(wb, "TCP");
-
- // Bytes received
- buffer_json_add_array_item_uint64(wb, (uint64_t) values->data.tcp.tcp_bytes_received);
-
- // Bytes sent
- buffer_json_add_array_item_uint64(wb, (uint64_t) values->data.tcp.tcp_bytes_sent);
-
- // Connections
- connections = values->data.tcp.ipv4_connect + values->data.tcp.ipv6_connect;
- } else if (values->data.protocol == IPPROTO_UDP) {
- // Protocol
- buffer_json_add_array_item_string(wb, "UDP");
-
- // Bytes received
- buffer_json_add_array_item_uint64(wb, (uint64_t) values->data.udp.udp_bytes_received);
-
- // Bytes sent
- buffer_json_add_array_item_uint64(wb, (uint64_t) values->data.udp.udp_bytes_sent);
-
- // Connections
- connections = values->data.udp.call_udp_sent + values->data.udp.call_udp_received;
- } else {
- // Protocol
- buffer_json_add_array_item_string(wb, "UNSPEC");
-
- // Bytes received
- buffer_json_add_array_item_uint64(wb, 0);
-
- // Bytes sent
- buffer_json_add_array_item_uint64(wb, 0);
-
- connections = 1;
- }
-
- // Connections
- if (values->flags & NETDATA_SOCKET_FLAGS_ALREADY_OPEN) {
- connections++;
- } else if (!connections) {
- // If no connections, this means that we lost when connection was opened
- values->flags |= NETDATA_SOCKET_FLAGS_ALREADY_OPEN;
- connections++;
- }
- buffer_json_add_array_item_uint64(wb, connections);
-
- buffer_json_array_close(wb);
-}
-
-/**
- * Clean Judy array unsafe
- *
- * Clean all Judy Array allocated to show table when a function is called.
- * Before to call this function it is necessary to lock `ebpf_judy_pid.index.rw_spinlock`.
- **/
-static void ebpf_socket_clean_judy_array_unsafe()
-{
- if (!ebpf_judy_pid.index.JudyLArray)
- return;
-
- Pvoid_t *pid_value, *socket_value;
- Word_t local_pid = 0, local_socket = 0;
- bool first_pid = true, first_socket = true;
- while ((pid_value = JudyLFirstThenNext(ebpf_judy_pid.index.JudyLArray, &local_pid, &first_pid))) {
- netdata_ebpf_judy_pid_stats_t *pid_ptr = (netdata_ebpf_judy_pid_stats_t *)*pid_value;
- rw_spinlock_write_lock(&pid_ptr->socket_stats.rw_spinlock);
- if (pid_ptr->socket_stats.JudyLArray) {
- while ((socket_value = JudyLFirstThenNext(pid_ptr->socket_stats.JudyLArray, &local_socket, &first_socket))) {
- netdata_socket_plus_t *socket_clean = *socket_value;
- aral_freez(aral_socket_table, socket_clean);
- }
- JudyLFreeArray(&pid_ptr->socket_stats.JudyLArray, PJE0);
- pid_ptr->socket_stats.JudyLArray = NULL;
- }
- rw_spinlock_write_unlock(&pid_ptr->socket_stats.rw_spinlock);
- }
-}
-
-/**
- * Fill function buffer unsafe
- *
- * Fill the function buffer with socket information. Before to call this function it is necessary to lock
- * ebpf_judy_pid.index.rw_spinlock
- *
- * @param buf buffer used to store data to be shown by function.
- *
- * @return it returns 0 on success and -1 otherwise.
- */
-static void ebpf_socket_fill_function_buffer_unsafe(BUFFER *buf)
-{
- int counter = 0;
-
- Pvoid_t *pid_value, *socket_value;
- Word_t local_pid = 0;
- bool first_pid = true;
- while ((pid_value = JudyLFirstThenNext(ebpf_judy_pid.index.JudyLArray, &local_pid, &first_pid))) {
- netdata_ebpf_judy_pid_stats_t *pid_ptr = (netdata_ebpf_judy_pid_stats_t *)*pid_value;
- bool first_socket = true;
- Word_t local_timestamp = 0;
- rw_spinlock_read_lock(&pid_ptr->socket_stats.rw_spinlock);
- if (pid_ptr->socket_stats.JudyLArray) {
- while ((socket_value = JudyLFirstThenNext(pid_ptr->socket_stats.JudyLArray, &local_timestamp, &first_socket))) {
- netdata_socket_plus_t *values = (netdata_socket_plus_t *)*socket_value;
- ebpf_fill_function_buffer(buf, values, pid_ptr->cmdline);
- }
- counter++;
- }
- rw_spinlock_read_unlock(&pid_ptr->socket_stats.rw_spinlock);
- }
-
- if (!counter) {
- netdata_socket_plus_t fake_values = { };
- ebpf_socket_fill_fake_socket(&fake_values);
- ebpf_fill_function_buffer(buf, &fake_values, NULL);
- }
-}
-
-/**
- * Socket read hash
- *
- * This is the thread callback.
- * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket.
- *
- * @param buf the buffer to store data;
- * @param em the module main structure.
- *
- * @return It always returns NULL.
- */
-void ebpf_socket_read_open_connections(BUFFER *buf, struct ebpf_module *em)
-{
- // thread was not initialized or Array was reset
- rw_spinlock_read_lock(&ebpf_judy_pid.index.rw_spinlock);
- if (!em->maps || (em->maps[NETDATA_SOCKET_OPEN_SOCKET].map_fd == ND_EBPF_MAP_FD_NOT_INITIALIZED) ||
- !ebpf_judy_pid.index.JudyLArray){
- netdata_socket_plus_t fake_values = { };
-
- ebpf_socket_fill_fake_socket(&fake_values);
-
- ebpf_fill_function_buffer(buf, &fake_values, NULL);
- rw_spinlock_read_unlock(&ebpf_judy_pid.index.rw_spinlock);
- return;
- }
-
- rw_spinlock_read_lock(&network_viewer_opt.rw_spinlock);
- ebpf_socket_fill_function_buffer_unsafe(buf);
- rw_spinlock_read_unlock(&network_viewer_opt.rw_spinlock);
- rw_spinlock_read_unlock(&ebpf_judy_pid.index.rw_spinlock);
-}
-
-/**
- * Function: Socket
- *
- * Show information for sockets stored in hash tables.
- *
- * @param transaction the transaction id that Netdata sent for this function execution
- * @param function function name and arguments given to thread.
- * @param timeout The function timeout
- * @param cancelled Variable used to store function status.
- */
-static void ebpf_function_socket_manipulation(const char *transaction,
- char *function __maybe_unused,
- int timeout __maybe_unused,
- bool *cancelled __maybe_unused)
-{
- UNUSED(timeout);
- ebpf_module_t *em = &ebpf_modules[EBPF_MODULE_SOCKET_IDX];
-
- char *words[PLUGINSD_MAX_WORDS] = {NULL};
- size_t num_words = quoted_strings_splitter_pluginsd(function, words, PLUGINSD_MAX_WORDS);
- const char *name;
- int period = -1;
- rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock);
- network_viewer_opt.enabled = CONFIG_BOOLEAN_YES;
- uint32_t previous;
-
- for (int i = 1; i < PLUGINSD_MAX_WORDS; i++) {
- const char *keyword = get_word(words, num_words, i);
- if (!keyword)
- break;
-
- if (strncmp(keyword, EBPF_FUNCTION_SOCKET_FAMILY, sizeof(EBPF_FUNCTION_SOCKET_FAMILY) - 1) == 0) {
- name = &keyword[sizeof(EBPF_FUNCTION_SOCKET_FAMILY) - 1];
- previous = network_viewer_opt.family;
- uint32_t family = AF_UNSPEC;
- if (!strcmp(name, "IPV4"))
- family = AF_INET;
- else if (!strcmp(name, "IPV6"))
- family = AF_INET6;
-
- if (family != previous) {
- rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock);
- network_viewer_opt.family = family;
- rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock);
- ebpf_socket_clean_judy_array_unsafe();
- }
- } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_PERIOD, sizeof(EBPF_FUNCTION_SOCKET_PERIOD) - 1) == 0) {
- name = &keyword[sizeof(EBPF_FUNCTION_SOCKET_PERIOD) - 1];
- pthread_mutex_lock(&ebpf_exit_cleanup);
- period = str2i(name);
- if (period > 0) {
- em->lifetime = period;
- } else
- em->lifetime = EBPF_NON_FUNCTION_LIFE_TIME;
-
-#ifdef NETDATA_DEV_MODE
- collector_info("Lifetime modified for %u", em->lifetime);
-#endif
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_RESOLVE, sizeof(EBPF_FUNCTION_SOCKET_RESOLVE) - 1) == 0) {
- previous = network_viewer_opt.service_resolution_enabled;
- uint32_t resolution;
- name = &keyword[sizeof(EBPF_FUNCTION_SOCKET_RESOLVE) - 1];
- resolution = (!strcasecmp(name, "YES")) ? CONFIG_BOOLEAN_YES : CONFIG_BOOLEAN_NO;
-
- if (previous != resolution) {
- rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock);
- network_viewer_opt.service_resolution_enabled = resolution;
- rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock);
-
- ebpf_socket_clean_judy_array_unsafe();
- }
- } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_RANGE, sizeof(EBPF_FUNCTION_SOCKET_RANGE) - 1) == 0) {
- name = &keyword[sizeof(EBPF_FUNCTION_SOCKET_RANGE) - 1];
- rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock);
- ebpf_clean_ip_structure(&network_viewer_opt.included_ips);
- ebpf_clean_ip_structure(&network_viewer_opt.excluded_ips);
- ebpf_parse_ips_unsafe((char *)name);
- rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock);
-
- ebpf_socket_clean_judy_array_unsafe();
- } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_PORT, sizeof(EBPF_FUNCTION_SOCKET_PORT) - 1) == 0) {
- name = &keyword[sizeof(EBPF_FUNCTION_SOCKET_PORT) - 1];
- rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock);
- ebpf_clean_port_structure(&network_viewer_opt.included_port);
- ebpf_clean_port_structure(&network_viewer_opt.excluded_port);
- ebpf_parse_ports((char *)name);
- rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock);
-
- ebpf_socket_clean_judy_array_unsafe();
- } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_RESET, sizeof(EBPF_FUNCTION_SOCKET_RESET) - 1) == 0) {
- rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock);
- ebpf_clean_port_structure(&network_viewer_opt.included_port);
- ebpf_clean_port_structure(&network_viewer_opt.excluded_port);
-
- ebpf_clean_ip_structure(&network_viewer_opt.included_ips);
- ebpf_clean_ip_structure(&network_viewer_opt.excluded_ips);
- ebpf_clean_ip_structure(&network_viewer_opt.ipv4_local_ip);
- ebpf_clean_ip_structure(&network_viewer_opt.ipv6_local_ip);
-
- parse_network_viewer_section(&socket_config);
- ebpf_read_local_addresses_unsafe();
- network_viewer_opt.enabled = CONFIG_BOOLEAN_YES;
- rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock);
- } else if (strncmp(keyword, EBPF_FUNCTION_SOCKET_INTERFACES, sizeof(EBPF_FUNCTION_SOCKET_INTERFACES) - 1) == 0) {
- rw_spinlock_write_lock(&network_viewer_opt.rw_spinlock);
- ebpf_read_local_addresses_unsafe();
- rw_spinlock_write_unlock(&network_viewer_opt.rw_spinlock);
- } else if (strncmp(keyword, "help", 4) == 0) {
- ebpf_function_socket_help(transaction);
- rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock);
- return;
- }
- }
- rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock);
-
- pthread_mutex_lock(&ebpf_exit_cleanup);
- if (em->enabled > NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
- // Cleanup when we already had a thread running
- rw_spinlock_write_lock(&ebpf_judy_pid.index.rw_spinlock);
- ebpf_socket_clean_judy_array_unsafe();
- rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock);
-
- if (ebpf_function_start_thread(em, period)) {
- ebpf_function_error(transaction,
- HTTP_RESP_INTERNAL_SERVER_ERROR,
- "Cannot start thread.");
- pthread_mutex_unlock(&ebpf_exit_cleanup);
- return;
- }
- } else {
- if (period < 0 && em->lifetime < EBPF_NON_FUNCTION_LIFE_TIME) {
- em->lifetime = EBPF_NON_FUNCTION_LIFE_TIME;
- }
- }
- pthread_mutex_unlock(&ebpf_exit_cleanup);
-
- time_t expires = now_realtime_sec() + em->update_every;
-
- BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL);
- buffer_json_initialize(wb, "\"", "\"", 0, true, false);
- buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
- buffer_json_member_add_string(wb, "type", "table");
- buffer_json_member_add_time_t(wb, "update_every", em->update_every);
- buffer_json_member_add_string(wb, "help", EBPF_PLUGIN_SOCKET_FUNCTION_DESCRIPTION);
-
- // Collect data
- buffer_json_member_add_array(wb, "data");
- ebpf_socket_read_open_connections(wb, em);
- buffer_json_array_close(wb); // data
-
- buffer_json_member_add_object(wb, "columns");
- {
- int fields_id = 0;
-
- // IMPORTANT!
- // THE ORDER SHOULD BE THE SAME WITH THE VALUES!
- buffer_rrdf_table_add_field(wb, fields_id++, "PID", "Process ID", RRDF_FIELD_TYPE_INTEGER,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Process Name", "Process Name", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Origin", "The connection origin.", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Request from", "Request from IP", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- /*
- buffer_rrdf_table_add_field(wb, fields_id++, "SRC PORT", "Source Port", RRDF_FIELD_TYPE_INTEGER,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY,
- NULL);
- */
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Destination IP", "Destination IP", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Destination Port", "Destination Port", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Protocol", "Communication protocol", RRDF_FIELD_TYPE_STRING,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NONE, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY, NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Incoming Bandwidth", "Bytes received.", RRDF_FIELD_TYPE_INTEGER,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id++, "Outgoing Bandwidth", "Bytes sent.", RRDF_FIELD_TYPE_INTEGER,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY,
- NULL);
-
- buffer_rrdf_table_add_field(wb, fields_id, "Connections", "Number of calls to tcp_vX_connections and udp_sendmsg, where X is the protocol version.", RRDF_FIELD_TYPE_INTEGER,
- RRDF_FIELD_VISUAL_VALUE, RRDF_FIELD_TRANSFORM_NUMBER, 0, NULL, NAN,
- RRDF_FIELD_SORT_ASCENDING, NULL, RRDF_FIELD_SUMMARY_COUNT,
- RRDF_FIELD_FILTER_MULTISELECT,
- RRDF_FIELD_OPTS_VISIBLE | RRDF_FIELD_OPTS_STICKY,
- NULL);
- }
- buffer_json_object_close(wb); // columns
-
- buffer_json_member_add_object(wb, "charts");
- {
- // OutBound Connections
- buffer_json_member_add_object(wb, "IPInboundConn");
- {
- buffer_json_member_add_string(wb, "name", "TCP Inbound Connection");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "connected_tcp");
- buffer_json_add_array_item_string(wb, "connected_udp");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // OutBound Connections
- buffer_json_member_add_object(wb, "IPTCPOutboundConn");
- {
- buffer_json_member_add_string(wb, "name", "TCP Outbound Connection");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "connected_V4");
- buffer_json_add_array_item_string(wb, "connected_V6");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // TCP Functions
- buffer_json_member_add_object(wb, "TCPFunctions");
- {
- buffer_json_member_add_string(wb, "name", "TCPFunctions");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "received");
- buffer_json_add_array_item_string(wb, "sent");
- buffer_json_add_array_item_string(wb, "close");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // TCP Bandwidth
- buffer_json_member_add_object(wb, "TCPBandwidth");
- {
- buffer_json_member_add_string(wb, "name", "TCPBandwidth");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "received");
- buffer_json_add_array_item_string(wb, "sent");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // UDP Functions
- buffer_json_member_add_object(wb, "UDPFunctions");
- {
- buffer_json_member_add_string(wb, "name", "UDPFunctions");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "received");
- buffer_json_add_array_item_string(wb, "sent");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // UDP Bandwidth
- buffer_json_member_add_object(wb, "UDPBandwidth");
- {
- buffer_json_member_add_string(wb, "name", "UDPBandwidth");
- buffer_json_member_add_string(wb, "type", "line");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "received");
- buffer_json_add_array_item_string(wb, "sent");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- }
- buffer_json_object_close(wb); // charts
-
- buffer_json_member_add_string(wb, "default_sort_column", "PID");
-
- // Do we use only on fields that can be groupped?
- buffer_json_member_add_object(wb, "group_by");
- {
- // group by PID
- buffer_json_member_add_object(wb, "PID");
- {
- buffer_json_member_add_string(wb, "name", "Process ID");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "PID");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // group by Process Name
- buffer_json_member_add_object(wb, "Process Name");
- {
- buffer_json_member_add_string(wb, "name", "Process Name");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Process Name");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // group by Process Name
- buffer_json_member_add_object(wb, "Origin");
- {
- buffer_json_member_add_string(wb, "name", "Origin");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Origin");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // group by Request From IP
- buffer_json_member_add_object(wb, "Request from");
- {
- buffer_json_member_add_string(wb, "name", "Request from IP");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Request from");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // group by Destination IP
- buffer_json_member_add_object(wb, "Destination IP");
- {
- buffer_json_member_add_string(wb, "name", "Destination IP");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Destination IP");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // group by DST Port
- buffer_json_member_add_object(wb, "Destination Port");
- {
- buffer_json_member_add_string(wb, "name", "Destination Port");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Destination Port");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
-
- // group by Protocol
- buffer_json_member_add_object(wb, "Protocol");
- {
- buffer_json_member_add_string(wb, "name", "Protocol");
- buffer_json_member_add_array(wb, "columns");
- {
- buffer_json_add_array_item_string(wb, "Protocol");
- }
- buffer_json_array_close(wb);
- }
- buffer_json_object_close(wb);
- }
- buffer_json_object_close(wb); // group_by
-
- buffer_json_member_add_time_t(wb, "expires", expires);
- buffer_json_finalize(wb);
-
- // Lock necessary to avoid race condition
- pluginsd_function_result_begin_to_stdout(transaction, HTTP_RESP_OK, "application/json", expires);
-
- fwrite(buffer_tostring(wb), buffer_strlen(wb), 1, stdout);
-
- pluginsd_function_result_end_to_stdout();
- fflush(stdout);
-
- buffer_free(wb);
-}
-
-/*****************************************************************
- * EBPF FUNCTION THREAD
- *****************************************************************/
-
-/**
- * FUNCTION thread.
- *
- * @param ptr a `ebpf_module_t *`.
- *
- * @return always NULL.
- */
-void *ebpf_function_thread(void *ptr)
-{
- (void)ptr;
-
- struct functions_evloop_globals *wg = functions_evloop_init(1,
- "EBPF",
- &lock,
- &ebpf_plugin_exit);
-
- functions_evloop_add_function(wg,
- "ebpf_socket",
- ebpf_function_socket_manipulation,
- PLUGINS_FUNCTIONS_TIMEOUT_DEFAULT);
-
- heartbeat_t hb;
- heartbeat_init(&hb);
- while(!ebpf_plugin_exit) {
- (void)heartbeat_next(&hb, USEC_PER_SEC);
-
- if (ebpf_plugin_exit) {
- break;
- }
- }
-
- return NULL;
-}
diff --git a/collectors/ebpf.plugin/ebpf_functions.h b/collectors/ebpf.plugin/ebpf_functions.h
deleted file mode 100644
index 795703b42..000000000
--- a/collectors/ebpf.plugin/ebpf_functions.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-3.0-or-later
-
-#ifndef NETDATA_EBPF_FUNCTIONS_H
-#define NETDATA_EBPF_FUNCTIONS_H 1
-
-#ifdef NETDATA_DEV_MODE
-// Common
-static inline void EBPF_PLUGIN_FUNCTIONS(const char *NAME, const char *DESC) {
- fprintf(stdout, "%s \"%s\" 10 \"%s\"\n", PLUGINSD_KEYWORD_FUNCTION, NAME, DESC);
-}
-#endif
-
-// configuration file & description
-#define NETDATA_DIRECTORY_FUNCTIONS_CONFIG_FILE "functions.conf"
-#define NETDATA_EBPF_FUNCTIONS_MODULE_DESC "Show information about current function status."
-
-// function list
-#define EBPF_FUNCTION_THREAD "ebpf_thread"
-#define EBPF_FUNCTION_SOCKET "ebpf_socket"
-
-// thread constants
-#define EBPF_PLUGIN_THREAD_FUNCTION_DESCRIPTION "Detailed information about eBPF threads."
-#define EBPF_PLUGIN_THREAD_FUNCTION_ERROR_THREAD_NOT_FOUND "ebpf.plugin does not have thread named "
-
-#define EBPF_THREADS_SELECT_THREAD "thread:"
-#define EBPF_THREADS_ENABLE_CATEGORY "enable:"
-#define EBPF_THREADS_DISABLE_CATEGORY "disable:"
-
-#define EBPF_THREAD_STATUS_RUNNING "running"
-#define EBPF_THREAD_STATUS_STOPPED "stopped"
-
-// socket constants
-#define EBPF_PLUGIN_SOCKET_FUNCTION_DESCRIPTION "Detailed information about open sockets."
-#define EBPF_FUNCTION_SOCKET_FAMILY "family:"
-#define EBPF_FUNCTION_SOCKET_PERIOD "period:"
-#define EBPF_FUNCTION_SOCKET_RESOLVE "resolve:"
-#define EBPF_FUNCTION_SOCKET_RANGE "range:"
-#define EBPF_FUNCTION_SOCKET_PORT "port:"
-#define EBPF_FUNCTION_SOCKET_RESET "reset"
-#define EBPF_FUNCTION_SOCKET_INTERFACES "interfaces"
-
-void *ebpf_function_thread(void *ptr);
-
-#endif
diff --git a/collectors/ebpf.plugin/metadata.yaml b/collectors/ebpf.plugin/metadata.yaml
deleted file mode 100644
index 97b5df389..000000000
--- a/collectors/ebpf.plugin/metadata.yaml
+++ /dev/null
@@ -1,3320 +0,0 @@
-plugin_name: ebpf.plugin
-modules:
- - meta:
- plugin_name: ebpf.plugin
- module_name: filedescriptor
- monitored_instance:
- name: eBPF Filedescriptor
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - file
- - eBPF
- - fd
- - open
- - close
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor calls for functions responsible to open or close a file descriptor and possible errors."
- method_description: "Attach tracing (kprobe and trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netdata sets necessary permissions during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "Depending of kernel version and frequency that files are open and close, this thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/fd.conf"
- description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: "These Metrics show grouped information per cgroup/service."
- labels: []
- metrics:
- - name: cgroup.fd_open
- description: Number of open files
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: cgroup.fd_open_error
- description: Fails to open files
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: cgroup.fd_closed
- description: Files closed
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: close
- - name: cgroup.fd_close_error
- description: Fails to close files
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: close
- - name: services.file_open
- description: Number of open files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.file_open_error
- description: Fails to open files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.file_closed
- description: Files closed
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.file_close_error
- description: Fails to close files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: global
- description: "These metrics show total number of calls to functions inside kernel."
- labels: []
- metrics:
- - name: filesystem.file_descriptor
- description: Open and close calls
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: close
- - name: filesystem.file_error
- description: Open fails
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: close
- - name: apps
- description: "These Metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.ebpf_file_open
- description: Number of open files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_file_open_error
- description: Fails to open files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_file_closed
- description: Files closed
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_file_close_error
- description: Fails to close files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - meta:
- plugin_name: ebpf.plugin
- module_name: processes
- monitored_instance:
- name: eBPF Processes
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - thread
- - fork
- - process
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor calls for function creating tasks (threads and processes) inside Linux kernel."
- method_description: "Attach tracing (kprobe or tracepoint, and trampoline) to internal kernel functions."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).
- configuration:
- file:
- name: "ebpf.d/process.conf"
- description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). This plugin will always try to attach a tracepoint, so option here will impact only function used to monitor task (thread and process) creation."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show total number of calls to functions inside kernel."
- labels: []
- metrics:
- - name: system.process_thread
- description: Start process
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: process
- - name: system.process_status
- description: Process not closed
- unit: "difference"
- chart_type: line
- dimensions:
- - name: process
- - name: zombie
- - name: system.exit
- description: Exit process
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: process
- - name: system.task_error
- description: Fails to create process
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: task
- - name: apps
- description: "These Metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.process_create
- description: Process started
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.thread_create
- description: Threads started
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: call
- - name: app.task_exit
- description: Tasks starts exit process
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: call
- - name: app.task_close
- description: Tasks closed
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: call
- - name: app.task_error
- description: Errors to create process or threads
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: app
- - name: cgroup
- description: "These Metrics show grouped information per cgroup/service."
- labels: []
- metrics:
- - name: cgroup.process_create
- description: Process started
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: process
- - name: cgroup.thread_create
- description: Threads started
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: thread
- - name: cgroup.task_exit
- description: Tasks starts exit process
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: exit
- - name: cgroup.task_close
- description: Tasks closed
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: process
- - name: cgroup.task_error
- description: Errors to create process or threads
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: process
- - name: services.process_create
- description: Process started
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.thread_create
- description: Threads started
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.task_close
- description: Tasks starts exit process
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.task_exit
- description: Tasks closed
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.task_error
- description: Errors to create process or threads
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - meta:
- plugin_name: ebpf.plugin
- module_name: disk
- monitored_instance:
- name: eBPF Disk
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - hard Disk
- - eBPF
- - latency
- - partition
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Measure latency for I/O events on disk."
- method_description: "Attach tracepoints to internal kernel functions."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
- configuration:
- file:
- name: "ebpf.d/disk.conf"
- description: "Overwrite default configuration reducing number of I/O events."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: disk
- description: "These metrics measure latency for I/O events on every hard disk present on host."
- labels: []
- metrics:
- - name: disk.latency_io
- description: Disk latency
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency
- - meta:
- plugin_name: ebpf.plugin
- module_name: hardirq
- monitored_instance:
- name: eBPF Hardirq
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - HardIRQ
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor latency for each HardIRQ available."
- method_description: "Attach tracepoints to internal kernel functions."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).
- configuration:
- file:
- name: "ebpf.d/hardirq.conf"
- description: "Overwrite default configuration reducing number of I/O events."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show latest timestamp for each hardIRQ available on host."
- labels: []
- metrics:
- - name: system.hardirq_latency
- description: Hard IRQ latency
- unit: "milliseconds"
- chart_type: stacked
- dimensions:
- - name: hardirq names
- - meta:
- plugin_name: ebpf.plugin
- module_name: cachestat
- monitored_instance:
- name: eBPF Cachestat
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - Page cache
- - Hit ratio
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor Linux page cache events giving for users a general vision about how his kernel is manipulating files."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/cachestat.conf"
- description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show total number of calls to functions inside kernel."
- labels: []
- metrics:
- - name: mem.cachestat_ratio
- description: Hit ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: mem.cachestat_dirties
- description: Number of dirty pages
- unit: "page/s"
- chart_type: line
- dimensions:
- - name: dirty
- - name: mem.cachestat_hits
- description: Number of accessed files
- unit: "hits/s"
- chart_type: line
- dimensions:
- - name: hit
- - name: mem.cachestat_misses
- description: Files out of page cache
- unit: "misses/s"
- chart_type: line
- dimensions:
- - name: miss
- - name: apps
- description: "These Metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.ebpf_cachestat_hit_ratio
- description: Hit ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: app.ebpf_cachestat_dirty_pages
- description: Number of dirty pages
- unit: "page/s"
- chart_type: stacked
- dimensions:
- - name: pages
- - name: app.ebpf_cachestat_access
- description: Number of accessed files
- unit: "hits/s"
- chart_type: stacked
- dimensions:
- - name: hits
- - name: app.ebpf_cachestat_misses
- description: Files out of page cache
- unit: "misses/s"
- chart_type: stacked
- dimensions:
- - name: misses
- - name: cgroup
- description: ""
- labels: []
- metrics:
- - name: cgroup.cachestat_ratio
- description: Hit ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: cgroup.cachestat_dirties
- description: Number of dirty pages
- unit: "page/s"
- chart_type: line
- dimensions:
- - name: dirty
- - name: cgroup.cachestat_hits
- description: Number of accessed files
- unit: "hits/s"
- chart_type: line
- dimensions:
- - name: hit
- - name: cgroup.cachestat_misses
- description: Files out of page cache
- unit: "misses/s"
- chart_type: line
- dimensions:
- - name: miss
- - name: services.cachestat_ratio
- description: Hit ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.cachestat_dirties
- description: Number of dirty pages
- unit: "page/s"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.cachestat_hits
- description: Number of accessed files
- unit: "hits/s"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.cachestat_misses
- description: Files out of page cache
- unit: "misses/s"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - meta:
- plugin_name: ebpf.plugin
- module_name: sync
- monitored_instance:
- name: eBPF Sync
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - syscall
- - eBPF
- - hard disk
- - memory
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor syscall responsible to move data from memory to storage device."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug`).
- configuration:
- file:
- name: "ebpf.d/sync.conf"
- description: "Overwrite default configuration and allows user to select charts visible on dashboard."
- options:
- description: |
- This configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- - name: sync
- description: Enable or disable monitoring for syscall `sync`
- default_value: yes
- required: false
- - name: msync
- description: Enable or disable monitoring for syscall `msync`
- default_value: yes
- required: false
- - name: fsync
- description: Enable or disable monitoring for syscall `fsync`
- default_value: yes
- required: false
- - name: fdatasync
- description: Enable or disable monitoring for syscall `fdatasync`
- default_value: yes
- required: false
- - name: syncfs
- description: Enable or disable monitoring for syscall `syncfs`
- default_value: yes
- required: false
- - name: sync_file_range
- description: Enable or disable monitoring for syscall `sync_file_range`
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: sync_freq
- link: https://github.com/netdata/netdata/blob/master/health/health.d/synchronization.conf
- metric: mem.sync
- info:
- number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the
- underlying filesystems.
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show total number of calls to functions inside kernel."
- labels: []
- metrics:
- - name: mem.file_sync
- description: Monitor calls to fsync(2) and fdatasync(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: fsync
- - name: fdatasync
- - name: mem.meory_map
- description: Monitor calls to msync(2).
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: msync
- - name: mem.sync
- description: Monitor calls to sync(2) and syncfs(2).
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: sync
- - name: syncfs
- - name: mem.file_segment
- description: Monitor calls to sync_file_range(2).
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: sync_file_range
- - meta:
- plugin_name: ebpf.plugin
- module_name: mdflush
- monitored_instance:
- name: eBPF MDflush
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - MD
- - RAID
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor when flush events happen between disks."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that `md_flush_request` is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/mdflush.conf"
- description: "Overwrite default configuration reducing I/O events."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "Number of times md_flush_request was called since last time."
- labels: []
- metrics:
- - name: mdstat.mdstat_flush
- description: MD flushes
- unit: "flushes"
- chart_type: stacked
- dimensions:
- - name: disk
- - meta:
- plugin_name: ebpf.plugin
- module_name: swap
- monitored_instance:
- name: eBPF SWAP
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - SWAP
- - memory
- - eBPF
- - Hard Disk
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitors when swap has I/O events and applications executing events."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/swap.conf"
- description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: "These Metrics show grouped information per cgroup/service."
- labels: []
- metrics:
- - name: cgroup.swap_read
- description: Calls to function swap_readpage.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: cgroup.swap_write
- description: Calls to function swap_writepage.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: write
- - name: services.swap_read
- description: Calls to swap_readpage.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.swap_write
- description: Calls to function swap_writepage.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: apps
- description: "These Metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.ebpf_call_swap_readpage
- description: Calls to function swap_readpage.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: app.ebpf_call_swap_writepage
- description: Calls to function swap_writepage.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: global
- description: "These metrics show total number of calls to functions inside kernel."
- labels: []
- metrics:
- - name: mem.swapcalls
- description: Calls to access swap memory
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: write
- - name: read
- - meta:
- plugin_name: ebpf.plugin
- module_name: oomkill
- monitored_instance:
- name: eBPF OOMkill
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - application
- - memory
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor applications that reach out of memory."
- method_description: "Attach tracepoint to internal kernel functions."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).
- configuration:
- file:
- name: "ebpf.d/oomkill.conf"
- description: "Overwrite default configuration reducing number of I/O events."
- options:
- description: |
- Overwrite default configuration reducing number of I/O events
- folding:
- title: "Config options"
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: "These metrics show cgroup/service that reached OOM."
- labels: []
- metrics:
- - name: cgroup.oomkills
- description: OOM kills. This chart is provided by eBPF plugin.
- unit: "kills"
- chart_type: line
- dimensions:
- - name: cgroup name
- - name: services.oomkills
- description: OOM kills. This chart is provided by eBPF plugin.
- unit: "kills"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: apps
- description: "These metrics show cgroup/service that reached OOM."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.oomkill
- description: OOM kills
- unit: "kills"
- chart_type: stacked
- dimensions:
- - name: kills
- - meta:
- plugin_name: ebpf.plugin
- module_name: socket
- monitored_instance:
- name: eBPF Socket
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - TCP
- - UDP
- - bandwidth
- - server
- - connection
- - socket
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor bandwidth consumption per application for protocols TCP and UDP."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/network.conf"
- description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
- options:
- description: |
- All options are defined inside section `[global]`. Options inside `network connections` are ignored for while.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: bandwidth table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 16384
- required: false
- - name: ipv4 connection table size
- description: Number of elements stored inside hash tables used to monitor calls per IPV4 connections.
- default_value: 16384
- required: false
- - name: ipv6 connection table size
- description: Number of elements stored inside hash tables used to monitor calls per IPV6 connections.
- default_value: 16384
- required: false
- - name: udp connection table size
- description: Number of temporary elements stored inside hash tables used to monitor UDP connections.
- default_value: 4096
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show total number of calls to functions inside kernel."
- labels: []
- metrics:
- - name: ip.inbound_conn
- description: Inbound connections.
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: connection_tcp
- - name: ip.tcp_outbound_conn
- description: TCP outbound connections.
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: received
- - name: ip.tcp_functions
- description: Calls to internal functions
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: closed
- - name: ip.total_tcp_bandwidth
- description: TCP bandwidth
- unit: "kilobits/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: ip.tcp_error
- description: TCP errors
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: ip.tcp_retransmit
- description: Packages retransmitted
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: retransmited
- - name: ip.udp_functions
- description: UDP calls
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: ip.total_udp_bandwidth
- description: UDP bandwidth
- unit: "kilobits/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: ip.udp_error
- description: UDP errors
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: apps
- description: "These metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.ebpf_call_tcp_v4_connection
- description: Calls to tcp_v4_connection
- unit: "connections/s"
- chart_type: stacked
- dimensions:
- - name: connections
- - name: app.app.ebpf_call_tcp_v6_connection
- description: Calls to tcp_v6_connection
- unit: "connections/s"
- chart_type: stacked
- dimensions:
- - name: connections
- - name: app.ebpf_sock_bytes_sent
- description: Bytes sent
- unit: "kilobits/s"
- chart_type: stacked
- dimensions:
- - name: bandwidth
- - name: app.ebpf_sock_bytes_received
- description: bytes received
- unit: "kilobits/s"
- chart_type: stacked
- dimensions:
- - name: bandwidth
- - name: app.ebpf_call_tcp_sendmsg
- description: Calls for tcp_sendmsg
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_tcp_cleanup_rbuf
- description: Calls for tcp_cleanup_rbuf
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_tcp_retransmit
- description: Calls for tcp_retransmit
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_udp_sendmsg
- description: Calls for udp_sendmsg
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_udp_recvmsg
- description: Calls for udp_recvmsg
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: cgroup
- description: ""
- labels: []
- metrics:
- - name: cgroup.net_conn_ipv4
- description: Calls to tcp_v4_connection
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: connected_v4
- - name: cgroup.net_conn_ipv6
- description: Calls to tcp_v6_connection
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: connected_v6
- - name: cgroup.net_bytes_recv
- description: Bytes received
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: cgroup.net_bytes_sent
- description: Bytes sent
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: cgroup.net_tcp_recv
- description: Calls to tcp_cleanup_rbuf.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: cgroup.net_tcp_send
- description: Calls to tcp_sendmsg.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: cgroup.net_retransmit
- description: Calls to tcp_retransmit.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: retransmitted
- - name: cgroup.net_udp_send
- description: Calls to udp_sendmsg
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: cgroup.net_udp_recv
- description: Calls to udp_recvmsg
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: services.net_conn_ipv6
- description: Calls to tcp_v6_connection
- unit: "connections/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_bytes_recv
- description: Bytes received
- unit: "kilobits/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_bytes_sent
- description: Bytes sent
- unit: "kilobits/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_tcp_recv
- description: Calls to tcp_cleanup_rbuf.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_tcp_send
- description: Calls to tcp_sendmsg.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_tcp_retransmit
- description: Calls to tcp_retransmit
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_udp_send
- description: Calls to udp_sendmsg
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_udp_recv
- description: Calls to udp_recvmsg
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - meta:
- plugin_name: ebpf.plugin
- module_name: dcstat
- monitored_instance:
- name: eBPF DCstat
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - Directory Cache
- - File system
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor directory cache events per application given an overall vision about files on memory or storage device."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/dcstat.conf"
- description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config option"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: apps
- description: "These Metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.ebpf_dc_ratio
- description: Percentage of files inside directory cache
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: app.ebpf_dc_reference
- description: Count file access
- unit: "files"
- chart_type: stacked
- dimensions:
- - name: files
- - name: app.ebpf_dc_not_cache
- description: Files not present inside directory cache
- unit: "files"
- chart_type: stacked
- dimensions:
- - name: files
- - name: app.ebpf_dc_not_found
- description: Files not found
- unit: "files"
- chart_type: stacked
- dimensions:
- - name: files
- - name: filesystem
- description: "These metrics show total number of calls to functions inside kernel."
- labels: []
- metrics:
- - name: filesystem.dc_reference
- description: Variables used to calculate hit ratio.
- unit: "files"
- chart_type: line
- dimensions:
- - name: reference
- - name: slow
- - name: miss
- - name: filesystem.dc_hit_ratio
- description: Percentage of files inside directory cache
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: cgroup
- description: ""
- labels: []
- metrics:
- - name: cgroup.dc_ratio
- description: Percentage of files inside directory cache
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: cgroup.dc_reference
- description: Count file access
- unit: "files"
- chart_type: line
- dimensions:
- - name: reference
- - name: cgroup.dc_not_cache
- description: Files not present inside directory cache
- unit: "files"
- chart_type: line
- dimensions:
- - name: slow
- - name: cgroup.dc_not_found
- description: Files not found
- unit: "files"
- chart_type: line
- dimensions:
- - name: miss
- - name: services.dc_ratio
- description: Percentage of files inside directory cache
- unit: "%"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.dc_reference
- description: Count file access
- unit: "files"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.dc_not_cache
- description: Files not present inside directory cache
- unit: "files"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.dc_not_found
- description: Files not found
- unit: "files"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - meta:
- plugin_name: ebpf.plugin
- module_name: filesystem
- monitored_instance:
- name: eBPF Filesystem
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - Filesystem
- - ext4
- - btrfs
- - nfs
- - xfs
- - zfs
- - eBPF
- - latency
- - I/O
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor latency for main actions on filesystem like I/O events."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/filesystem.conf"
- description: "Overwrite default configuration and allows user to select charts visible on dashboard."
- options:
- description: |
- This configuration file have two different sections. The `[global]` overwrites default options, while `[filesystem]` allow user to select the filesystems to monitor.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- - name: btrfsdist
- description: Enable or disable latency monitoring for functions associated with btrfs filesystem.
- default_value: yes
- required: false
- - name: ext4dist
- description: Enable or disable latency monitoring for functions associated with ext4 filesystem.
- default_value: yes
- required: false
- - name: nfsdist
- description: Enable or disable latency monitoring for functions associated with nfs filesystem.
- default_value: yes
- required: false
- - name: xfsdist
- description: Enable or disable latency monitoring for functions associated with xfs filesystem.
- default_value: yes
- required: false
- - name: zfsdist
- description: Enable or disable latency monitoring for functions associated with zfs filesystem.
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: filesystem
- description: "Latency charts associate with filesystem actions."
- labels: []
- metrics:
- - name: filesystem.read_latency
- description: ext4 latency for each read request.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency period
- - name: filesystem.open_latency
- description: ext4 latency for each open request.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency period
- - name: filesystem.sync_latency
- description: ext4 latency for each sync request.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency period
- - name: iilesystem
- description: ""
- labels: []
- metrics:
- - name: filesystem.write_latency
- description: ext4 latency for each write request.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency period
- - name: global
- description: ""
- labels: []
- metrics:
- - name: filesystem.attributte_latency
- description: nfs latency for each attribute request.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency period
- - meta:
- plugin_name: ebpf.plugin
- module_name: shm
- monitored_instance:
- name: eBPF SHM
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - syscall
- - shared memory
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor syscall responsible to manipulate shared memory."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
- configuration:
- file:
- name: "ebpf.d/shm.conf"
- description: "Overwrite default configuration and allows user to select charts visible on dashboard."
- options:
- description: |
- This configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- - name: shmget
- description: Enable or disable monitoring for syscall `shmget`
- default_value: yes
- required: false
- - name: shmat
- description: Enable or disable monitoring for syscall `shmat`
- default_value: yes
- required: false
- - name: shmdt
- description: Enable or disable monitoring for syscall `shmdt`
- default_value: yes
- required: false
- - name: shmctl
- description: Enable or disable monitoring for syscall `shmctl`
- default_value: yes
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: "These Metrics show grouped information per cgroup/service."
- labels: []
- metrics:
- - name: cgroup.shmget
- description: Calls to syscall shmget(2).
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: get
- - name: cgroup.shmat
- description: Calls to syscall shmat(2).
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: at
- - name: cgroup.shmdt
- description: Calls to syscall shmdt(2).
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: dt
- - name: cgroup.shmctl
- description: Calls to syscall shmctl(2).
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: ctl
- - name: services.shmget
- description: Calls to syscall shmget(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.shmat
- description: Calls to syscall shmat(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.shmdt
- description: Calls to syscall shmdt(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.shmctl
- description: Calls to syscall shmctl(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: apps
- description: "These Metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.ebpf_shmget_call
- description: Calls to syscall shmget(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_shmat_call
- description: Calls to syscall shmat(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_shmdt_call
- description: Calls to syscall shmdt(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_shmctl_call
- description: Calls to syscall shmctl(2).
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: global
- description: "These Metrics show number of calls for specified syscall."
- labels: []
- metrics:
- - name: system.shared_memory_calls
- description: Calls to shared memory system calls
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: get
- - name: at
- - name: dt
- - name: ctl
- - meta:
- plugin_name: ebpf.plugin
- module_name: softirq
- monitored_instance:
- name: eBPF SoftIRQ
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - SoftIRQ
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor latency for each SoftIRQ available."
- method_description: "Attach kprobe to internal kernel functions."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
- configuration:
- file:
- name: "ebpf.d/softirq.conf"
- description: "Overwrite default configuration reducing number of I/O events."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "These metrics show latest timestamp for each softIRQ available on host."
- labels: []
- metrics:
- - name: system.softirq_latency
- description: Soft IRQ latency
- unit: "milliseconds"
- chart_type: stacked
- dimensions:
- - name: soft IRQs
- - meta:
- plugin_name: ebpf.plugin
- module_name: mount
- monitored_instance:
- name: eBPF Mount
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - mount
- - umount
- - device
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor calls for mount and umount syscall."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- - title: Debug Filesystem
- description: |
- This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
- configuration:
- file:
- name: "ebpf.d/mount.conf"
- description: "Overwrite default configuration."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "Calls for syscalls mount an umount."
- labels: []
- metrics:
- - name: mount_points.call
- description: Calls to mount and umount syscalls
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: mount
- - name: umount
- - name: mount_points.error
- description: Errors to mount and umount file systems
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: mount
- - name: umount
- - meta:
- plugin_name: ebpf.plugin
- module_name: vfs
- monitored_instance:
- name: eBPF VFS
- link: "https://kernel.org/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list:
- - plugin_name: apps.plugin
- module_name: apps
- - plugin_name: cgroups.plugin
- module_name: cgroups
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - virtual
- - filesystem
- - eBPF
- - I/O
- - files
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor I/O events on Linux Virtual Filesystem."
- method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
- default_behavior:
- auto_detection:
- description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
- limits:
- description: ""
- performance_impact:
- description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
- setup:
- prerequisites:
- list:
- - title: Compile kernel
- description: |
- Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
- When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
- with different names.
-
- Now follow steps:
- 1. Copy the configuration file to /usr/src/linux/.config.
- 2. Select the necessary options: make oldconfig
- 3. Compile your kernel image: make bzImage
- 4. Compile your modules: make modules
- 5. Copy your new kernel image for boot loader directory
- 6. Install the new modules: make modules_install
- 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
- 8. Update your boot loader
- configuration:
- file:
- name: "ebpf.d/vfs.conf"
- description: "Overwrite default configuration helping to reduce memory usage."
- options:
- description: |
- All options are defined inside section `[global]`.
- folding:
- title: "Config options"
- enabled: true
- list:
- - name: update every
- description: Data collection frequency.
- default_value: 5
- required: false
- - name: ebpf load mode
- description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
- default_value: entry
- required: false
- - name: apps
- description: Enable or disable integration with apps.plugin
- default_value: no
- required: false
- - name: cgroups
- description: Enable or disable integration with cgroup.plugin
- default_value: no
- required: false
- - name: pid table size
- description: Number of elements stored inside hash tables used to monitor calls per PID.
- default_value: 32768
- required: false
- - name: ebpf type format
- description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
- default_value: auto
- required: false
- - name: ebpf co-re tracing
- description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
- default_value: trampoline
- required: false
- - name: maps per core
- description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
- default_value: yes
- required: false
- - name: lifetime
- description: Set default lifetime for thread when enabled by cloud.
- default_value: 300
- required: false
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: "These Metrics show grouped information per cgroup/service."
- labels: []
- metrics:
- - name: cgroup.vfs_unlink
- description: Files deleted
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: delete
- - name: cgroup.vfs_write
- description: Write to disk
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: write
- - name: cgroup.vfs_write_error
- description: Fails to write
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: write
- - name: cgroup.vfs_read
- description: Read from disk
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: cgroup.vfs_read_error
- description: Fails to read
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: cgroup.vfs_write_bytes
- description: Bytes written on disk
- unit: "bytes/s"
- chart_type: line
- dimensions:
- - name: write
- - name: cgroup.vfs_read_bytes
- description: Bytes read from disk
- unit: "bytes/s"
- chart_type: line
- dimensions:
- - name: read
- - name: cgroup.vfs_fsync
- description: Calls to vfs_fsync.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: fsync
- - name: cgroup.vfs_fsync_error
- description: Sync error
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: fsync
- - name: cgroup.vfs_open
- description: Calls to vfs_open.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: cgroup.vfs_open_error
- description: Open error
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: cgroup.vfs_create
- description: Calls to vfs_create.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: create
- - name: cgroup.vfs_create_error
- description: Create error
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: create
- - name: services.vfs_unlink
- description: Files deleted
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_write
- description: Write to disk
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_write_error
- description: Fails to write
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_read
- description: Read from disk
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_read_error
- description: Fails to read
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_write_bytes
- description: Bytes written on disk
- unit: "bytes/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_read_bytes
- description: Bytes read from disk
- unit: "bytes/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_fsync
- description: Calls to vfs_fsync.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_fsync_error
- description: Sync error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_open
- description: Calls to vfs_open.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_open_error
- description: Open error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_create
- description: Calls to vfs_create.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_create_error
- description: Create error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: global
- description: "These Metrics show grouped information per cgroup/service."
- labels: []
- metrics:
- - name: filesystem.vfs_deleted_objects
- description: Remove files
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: delete
- - name: filesystem.vfs_io
- description: Calls to IO
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: filesystem.vfs_io_bytes
- description: Bytes written and read
- unit: "bytes/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: filesystem.vfs_io_error
- description: Fails to write or read
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: filesystem.vfs_fsync
- description: Calls to vfs_fsync.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: fsync
- - name: filesystem.vfs_fsync_error
- description: Fails to synchronize
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: fsync
- - name: filesystem.vfs_open
- description: Calls to vfs_open.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: filesystem.vfs_open_error
- description: Fails to open a file
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: filesystem.vfs_create
- description: Calls to vfs_create.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: create
- - name: filesystem.vfs_create_error
- description: Fails to create a file.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: create
- - name: apps
- description: "These Metrics show grouped information per apps group."
- labels:
- - name: app_group
- description: The name of the group defined in the configuration.
- metrics:
- - name: app.ebpf_call_vfs_unlink
- description: Files deleted
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_write
- description: Write to disk
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_write_error
- description: Fails to write
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_read
- description: Read from disk
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_read_error
- description: Fails to read
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_write_bytes
- description: Bytes written on disk
- unit: "bytes/s"
- chart_type: stacked
- dimensions:
- - name: writes
- - name: app.ebpf_call_vfs_read_bytes
- description: Bytes read on disk
- unit: "bytes/s"
- chart_type: stacked
- dimensions:
- - name: reads
- - name: app.ebpf_call_vfs_fsync
- description: Calls to vfs_fsync.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_fsync_error
- description: Sync error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_open
- description: Calls to vfs_open.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_open_error
- description: Open error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_create
- description: Calls to vfs_create.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - name: app.ebpf_call_vfs_create_error
- description: Create error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: calls
- - meta:
- plugin_name: ebpf.plugin
- module_name: process
- monitored_instance:
- name: eBPF Process
- link: "https://github.com/netdata/netdata/"
- categories:
- - data-collection.ebpf
- icon_filename: "ebpf.jpg"
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ""
- keywords:
- - Memory
- - plugin
- - eBPF
- most_popular: false
- overview:
- data_collection:
- metrics_description: "Monitor internal memory usage."
- method_description: "Uses netdata internal statistic to monitor memory management by plugin."
- supported_platforms:
- include:
- - Linux
- exclude: []
- multi_instance: true
- additional_permissions:
- description: ""
- default_behavior:
- auto_detection:
- description: ""
- limits:
- description: ""
- performance_impact:
- description: ""
- setup:
- prerequisites:
- list:
- - title: Netdata flags.
- description: "To have these charts you need to compile netdata with flag `NETDATA_DEV_MODE`."
- configuration:
- file:
- name: ""
- description: ""
- options:
- description: ""
- folding:
- title: ""
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ""
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: "How plugin is allocating memory."
- labels: []
- metrics:
- - name: netdata.ebpf_aral_stat_size
- description: Bytes allocated for ARAL.
- unit: "bytes"
- chart_type: stacked
- dimensions:
- - name: memory
- - name: netdata.ebpf_aral_stat_alloc
- description: Calls to allocate memory.
- unit: "calls"
- chart_type: stacked
- dimensions:
- - name: aral
- - name: netdata.ebpf_threads
- description: Threads info
- unit: "threads"
- chart_type: line
- dimensions:
- - name: total
- - name: running
- - name: netdata.ebpf_load_methods
- description: Load info
- unit: "methods"
- chart_type: line
- dimensions:
- - name: legacy
- - name: co-re
- - name: netdata.ebpf_kernel_memory
- description: Memory allocated for hash tables.
- unit: "bytes"
- chart_type: line
- dimensions:
- - name: memory_locked
- - name: netdata.ebpf_hash_tables_count
- description: Number of hash tables loaded
- unit: "hash tables"
- chart_type: line
- dimensions:
- - name: hash_table
- - name: netdata.ebpf_aral_stat_size
- description: Bytes allocated for ARAL
- unit: "bytes"
- chart_type: stacked
- dimensions:
- - name: memory
- - name: netdata.ebpf_aral_stat_alloc
- description: Calls to allocate memory
- unit: "calls"
- chart_type: stacked
- dimensions:
- - name: aral
- - name: netdata.ebpf_aral_stat_size
- description: Bytes allocated for ARAL.
- unit: "bytes"
- chart_type: stacked
- dimensions:
- - name: memory
- - name: netdata.ebpf_aral_stat_alloc
- description: Calls to allocate memory
- unit: "calls"
- chart_type: stacked
- dimensions:
- - name: aral
- - name: netdata.ebpf_hash_tables_insert_pid_elements
- description: Number of times an element was inserted in a hash table.
- unit: "rows"
- chart_type: line
- dimensions:
- - name: thread
- - name: netdata.ebpf_hash_tables_remove_pid_elements
- description: Number of times an element was removed in a hash table.
- unit: "rows"
- chart_type: line
- dimensions:
- - name: thread
diff --git a/collectors/ebpf.plugin/ebpf.d.conf b/src/collectors/ebpf.plugin/ebpf.d.conf
index 5cb844b20..833c8fd99 100644
--- a/collectors/ebpf.plugin/ebpf.d.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d.conf
@@ -58,20 +58,20 @@
# When plugin detects that system has support to BTF, it enables integration with apps.plugin.
#
[ebpf programs]
- cachestat = yes
+ cachestat = no
dcstat = no
disk = no
- fd = yes
+ fd = no
filesystem = no
hardirq = no
mdflush = no
mount = yes
oomkill = yes
- process = yes
- shm = yes
+ process = no
+ shm = no
socket = no
softirq = yes
sync = no
- swap = yes
+ swap = no
vfs = no
network connections = no
diff --git a/collectors/ebpf.plugin/ebpf.d/cachestat.conf b/src/collectors/ebpf.plugin/ebpf.d/cachestat.conf
index 9c51b2c52..c378e82e8 100644
--- a/collectors/ebpf.plugin/ebpf.d/cachestat.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/cachestat.conf
@@ -37,6 +37,6 @@
# pid table size = 32768
ebpf type format = auto
ebpf co-re tracing = trampoline
- collect pid = real parent
+ collect pid = all
# maps per core = yes
lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.d/dcstat.conf b/src/collectors/ebpf.plugin/ebpf.d/dcstat.conf
index 614d814e6..2d54bce97 100644
--- a/collectors/ebpf.plugin/ebpf.d/dcstat.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/dcstat.conf
@@ -35,6 +35,6 @@
# pid table size = 32768
ebpf type format = auto
ebpf co-re tracing = trampoline
- collect pid = real parent
+ collect pid = all
# maps per core = yes
lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.d/disk.conf b/src/collectors/ebpf.plugin/ebpf.d/disk.conf
index c5a0a2708..c5a0a2708 100644
--- a/collectors/ebpf.plugin/ebpf.d/disk.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/disk.conf
diff --git a/collectors/ebpf.plugin/ebpf.d/ebpf_kernel_reject_list.txt b/src/collectors/ebpf.plugin/ebpf.d/ebpf_kernel_reject_list.txt
index 539bf357f..539bf357f 100644
--- a/collectors/ebpf.plugin/ebpf.d/ebpf_kernel_reject_list.txt
+++ b/src/collectors/ebpf.plugin/ebpf.d/ebpf_kernel_reject_list.txt
diff --git a/collectors/ebpf.plugin/ebpf.d/fd.conf b/src/collectors/ebpf.plugin/ebpf.d/fd.conf
index d48230323..d48230323 100644
--- a/collectors/ebpf.plugin/ebpf.d/fd.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/fd.conf
diff --git a/collectors/ebpf.plugin/ebpf.d/filesystem.conf b/src/collectors/ebpf.plugin/ebpf.d/filesystem.conf
index 209abba77..209abba77 100644
--- a/collectors/ebpf.plugin/ebpf.d/filesystem.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/filesystem.conf
diff --git a/collectors/ebpf.plugin/ebpf.d/functions.conf b/src/collectors/ebpf.plugin/ebpf.d/functions.conf
index a4f57f641..a4f57f641 100644
--- a/collectors/ebpf.plugin/ebpf.d/functions.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/functions.conf
diff --git a/collectors/ebpf.plugin/ebpf.d/hardirq.conf b/src/collectors/ebpf.plugin/ebpf.d/hardirq.conf
index 6a47a94bf..6a47a94bf 100644
--- a/collectors/ebpf.plugin/ebpf.d/hardirq.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/hardirq.conf
diff --git a/collectors/ebpf.plugin/ebpf.d/mdflush.conf b/src/collectors/ebpf.plugin/ebpf.d/mdflush.conf
index ea97ebe85..ea97ebe85 100644
--- a/collectors/ebpf.plugin/ebpf.d/mdflush.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/mdflush.conf
diff --git a/collectors/ebpf.plugin/ebpf.d/mount.conf b/src/collectors/ebpf.plugin/ebpf.d/mount.conf
index ff9a2948c..ff9a2948c 100644
--- a/collectors/ebpf.plugin/ebpf.d/mount.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/mount.conf
diff --git a/collectors/ebpf.plugin/ebpf.d/network.conf b/src/collectors/ebpf.plugin/ebpf.d/network.conf
index 99c32edc1..99c32edc1 100644
--- a/collectors/ebpf.plugin/ebpf.d/network.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/network.conf
diff --git a/collectors/ebpf.plugin/ebpf.d/oomkill.conf b/src/collectors/ebpf.plugin/ebpf.d/oomkill.conf
index ea97ebe85..ea97ebe85 100644
--- a/collectors/ebpf.plugin/ebpf.d/oomkill.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/oomkill.conf
diff --git a/collectors/ebpf.plugin/ebpf.d/process.conf b/src/collectors/ebpf.plugin/ebpf.d/process.conf
index 150c57920..6f6477003 100644
--- a/collectors/ebpf.plugin/ebpf.d/process.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/process.conf
@@ -26,6 +26,6 @@
# cgroups = no
# update every = 10
# pid table size = 32768
- collect pid = real parent
+ collect pid = all
# maps per core = yes
lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.d/shm.conf b/src/collectors/ebpf.plugin/ebpf.d/shm.conf
index 95fb54e0f..0314bdc95 100644
--- a/collectors/ebpf.plugin/ebpf.d/shm.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/shm.conf
@@ -31,6 +31,7 @@
# pid table size = 32768
ebpf type format = auto
ebpf co-re tracing = trampoline
+ collect pid = all
# maps per core = yes
lifetime = 300
diff --git a/collectors/ebpf.plugin/ebpf.d/softirq.conf b/src/collectors/ebpf.plugin/ebpf.d/softirq.conf
index 6a47a94bf..6a47a94bf 100644
--- a/collectors/ebpf.plugin/ebpf.d/softirq.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/softirq.conf
diff --git a/collectors/ebpf.plugin/ebpf.d/sync.conf b/src/collectors/ebpf.plugin/ebpf.d/sync.conf
index a086ed4db..a086ed4db 100644
--- a/collectors/ebpf.plugin/ebpf.d/sync.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/sync.conf
diff --git a/collectors/ebpf.plugin/ebpf.d/vfs.conf b/src/collectors/ebpf.plugin/ebpf.d/vfs.conf
index f511581b8..f511581b8 100644
--- a/collectors/ebpf.plugin/ebpf.d/vfs.conf
+++ b/src/collectors/ebpf.plugin/ebpf.d/vfs.conf
diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/src/collectors/ebpf.plugin/ebpf_apps.c
index 10c452267..a17cdb33d 100644
--- a/collectors/ebpf.plugin/ebpf_apps.c
+++ b/src/collectors/ebpf.plugin/ebpf_apps.c
@@ -7,24 +7,6 @@
// ----------------------------------------------------------------------------
// ARAL vectors used to speed up processing
ARAL *ebpf_aral_apps_pid_stat = NULL;
-ARAL *ebpf_aral_process_stat = NULL;
-ARAL *ebpf_aral_socket_pid = NULL;
-ARAL *ebpf_aral_cachestat_pid = NULL;
-ARAL *ebpf_aral_dcstat_pid = NULL;
-ARAL *ebpf_aral_vfs_pid = NULL;
-ARAL *ebpf_aral_fd_pid = NULL;
-ARAL *ebpf_aral_shm_pid = NULL;
-
-// ----------------------------------------------------------------------------
-// Global vectors used with apps
-ebpf_socket_publish_apps_t **socket_bandwidth_curr = NULL;
-netdata_publish_cachestat_t **cachestat_pid = NULL;
-netdata_publish_dcstat_t **dcstat_pid = NULL;
-netdata_publish_swap_t **swap_pid = NULL;
-netdata_publish_vfs_t **vfs_pid = NULL;
-netdata_fd_stat_t **fd_pid = NULL;
-netdata_publish_shm_t **shm_pid = NULL;
-ebpf_process_stat_t **global_process_stats = NULL;
/**
* eBPF ARAL Init
@@ -41,8 +23,6 @@ void ebpf_aral_init(void)
ebpf_aral_apps_pid_stat = ebpf_allocate_pid_aral("ebpf_pid_stat", sizeof(struct ebpf_pid_stat));
- ebpf_aral_process_stat = ebpf_allocate_pid_aral(NETDATA_EBPF_PROC_ARAL_NAME, sizeof(ebpf_process_stat_t));
-
#ifdef NETDATA_DEV_MODE
netdata_log_info("Plugin is using ARAL with values %d", NETDATA_EBPF_ALLOC_MAX_PID);
#endif
@@ -72,266 +52,6 @@ void ebpf_pid_stat_release(struct ebpf_pid_stat *stat)
aral_freez(ebpf_aral_apps_pid_stat, stat);
}
-/*****************************************************************
- *
- * PROCESS ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF process stat get
- *
- * Get a ebpf_pid_stat entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-ebpf_process_stat_t *ebpf_process_stat_get(void)
-{
- ebpf_process_stat_t *target = aral_mallocz(ebpf_aral_process_stat);
- memset(target, 0, sizeof(ebpf_process_stat_t));
- return target;
-}
-
-/**
- * eBPF process release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_process_stat_release(ebpf_process_stat_t *stat)
-{
- aral_freez(ebpf_aral_process_stat, stat);
-}
-
-/*****************************************************************
- *
- * SOCKET ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF socket Aral init
- *
- * Initiallize array allocator that will be used when integration with apps is enabled.
- */
-void ebpf_socket_aral_init()
-{
- ebpf_aral_socket_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_SOCKET_ARAL_NAME, sizeof(ebpf_socket_publish_apps_t));
-}
-
-/**
- * eBPF socket get
- *
- * Get a ebpf_socket_publish_apps_t entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-ebpf_socket_publish_apps_t *ebpf_socket_stat_get(void)
-{
- ebpf_socket_publish_apps_t *target = aral_mallocz(ebpf_aral_socket_pid);
- memset(target, 0, sizeof(ebpf_socket_publish_apps_t));
- return target;
-}
-
-/*****************************************************************
- *
- * CACHESTAT ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF Cachestat Aral init
- *
- * Initiallize array allocator that will be used when integration with apps is enabled.
- */
-void ebpf_cachestat_aral_init()
-{
- ebpf_aral_cachestat_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_CACHESTAT_ARAL_NAME, sizeof(netdata_publish_cachestat_t));
-}
-
-/**
- * eBPF publish cachestat get
- *
- * Get a netdata_publish_cachestat_t entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-netdata_publish_cachestat_t *ebpf_publish_cachestat_get(void)
-{
- netdata_publish_cachestat_t *target = aral_mallocz(ebpf_aral_cachestat_pid);
- memset(target, 0, sizeof(netdata_publish_cachestat_t));
- return target;
-}
-
-/**
- * eBPF cachestat release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_cachestat_release(netdata_publish_cachestat_t *stat)
-{
- aral_freez(ebpf_aral_cachestat_pid, stat);
-}
-
-/*****************************************************************
- *
- * DCSTAT ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF directory cache Aral init
- *
- * Initiallize array allocator that will be used when integration with apps is enabled.
- */
-void ebpf_dcstat_aral_init()
-{
- ebpf_aral_dcstat_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_DCSTAT_ARAL_NAME, sizeof(netdata_publish_dcstat_t));
-}
-
-/**
- * eBPF publish dcstat get
- *
- * Get a netdata_publish_dcstat_t entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-netdata_publish_dcstat_t *ebpf_publish_dcstat_get(void)
-{
- netdata_publish_dcstat_t *target = aral_mallocz(ebpf_aral_dcstat_pid);
- memset(target, 0, sizeof(netdata_publish_dcstat_t));
- return target;
-}
-
-/**
- * eBPF dcstat release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_dcstat_release(netdata_publish_dcstat_t *stat)
-{
- aral_freez(ebpf_aral_dcstat_pid, stat);
-}
-
-/*****************************************************************
- *
- * VFS ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF VFS Aral init
- *
- * Initiallize array allocator that will be used when integration with apps is enabled.
- */
-void ebpf_vfs_aral_init()
-{
- ebpf_aral_vfs_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_VFS_ARAL_NAME, sizeof(netdata_publish_vfs_t));
-}
-
-/**
- * eBPF publish VFS get
- *
- * Get a netdata_publish_vfs_t entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-netdata_publish_vfs_t *ebpf_vfs_get(void)
-{
- netdata_publish_vfs_t *target = aral_mallocz(ebpf_aral_vfs_pid);
- memset(target, 0, sizeof(netdata_publish_vfs_t));
- return target;
-}
-
-/**
- * eBPF VFS release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_vfs_release(netdata_publish_vfs_t *stat)
-{
- aral_freez(ebpf_aral_vfs_pid, stat);
-}
-
-/*****************************************************************
- *
- * FD ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF file descriptor Aral init
- *
- * Initiallize array allocator that will be used when integration with apps is enabled.
- */
-void ebpf_fd_aral_init()
-{
- ebpf_aral_fd_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_FD_ARAL_NAME, sizeof(netdata_fd_stat_t));
-}
-
-/**
- * eBPF publish file descriptor get
- *
- * Get a netdata_fd_stat_t entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-netdata_fd_stat_t *ebpf_fd_stat_get(void)
-{
- netdata_fd_stat_t *target = aral_mallocz(ebpf_aral_fd_pid);
- memset(target, 0, sizeof(netdata_fd_stat_t));
- return target;
-}
-
-/**
- * eBPF file descriptor release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_fd_release(netdata_fd_stat_t *stat)
-{
- aral_freez(ebpf_aral_fd_pid, stat);
-}
-
-/*****************************************************************
- *
- * SHM ARAL FUNCTIONS
- *
- *****************************************************************/
-
-/**
- * eBPF shared memory Aral init
- *
- * Initiallize array allocator that will be used when integration with apps is enabled.
- */
-void ebpf_shm_aral_init()
-{
- ebpf_aral_shm_pid = ebpf_allocate_pid_aral(NETDATA_EBPF_SHM_ARAL_NAME, sizeof(netdata_publish_shm_t));
-}
-
-/**
- * eBPF shared memory get
- *
- * Get a netdata_publish_shm_t entry to be used with a specific PID.
- *
- * @return it returns the address on success.
- */
-netdata_publish_shm_t *ebpf_shm_stat_get(void)
-{
- netdata_publish_shm_t *target = aral_mallocz(ebpf_aral_shm_pid);
- memset(target, 0, sizeof(netdata_publish_shm_t));
- return target;
-}
-
-/**
- * eBPF shared memory release
- *
- * @param stat Release a target after usage.
- */
-void ebpf_shm_release(netdata_publish_shm_t *stat)
-{
- aral_freez(ebpf_aral_shm_pid, stat);
-}
-
// ----------------------------------------------------------------------------
// internal flags
// handled in code (automatically set)
@@ -372,24 +92,6 @@ int ebpf_read_hash_table(void *ep, int fd, uint32_t pid)
*****************************************************************/
/**
- * Am I running as Root
- *
- * Verify the user that is running the collector.
- *
- * @return It returns 1 for root and 0 otherwise.
- */
-int am_i_running_as_root()
-{
- uid_t uid = getuid(), euid = geteuid();
-
- if (uid == 0 || euid == 0) {
- return 1;
- }
-
- return 0;
-}
-
-/**
* Reset the target values
*
* @param root the pointer to the chain that will be reset.
@@ -753,14 +455,19 @@ static inline int managed_log(struct ebpf_pid_stat *p, uint32_t log, int status)
*
* Get or allocate the PID entry for the specified pid.
*
- * @param pid the pid to search the data.
+ * @param pid the pid to search the data.
+ * @param tgid the task group id
*
* @return It returns the pid entry structure
*/
-static inline struct ebpf_pid_stat *get_pid_entry(pid_t pid)
+ebpf_pid_stat_t *ebpf_get_pid_entry(pid_t pid, pid_t tgid)
{
- if (unlikely(ebpf_all_pids[pid]))
+ ebpf_pid_stat_t *ptr = ebpf_all_pids[pid];
+ if (unlikely(ptr)) {
+ if (!ptr->ppid && tgid)
+ ptr->ppid = tgid;
return ebpf_all_pids[pid];
+ }
struct ebpf_pid_stat *p = ebpf_pid_stat_get();
@@ -771,6 +478,7 @@ static inline struct ebpf_pid_stat *get_pid_entry(pid_t pid)
ebpf_root_of_pids = p;
p->pid = pid;
+ p->ppid = tgid;
ebpf_all_pids[pid] = p;
ebpf_all_pids_count++;
@@ -951,14 +659,14 @@ static inline int read_proc_pid_stat(struct ebpf_pid_stat *p, void *ptr)
*
* @return It returns 1 on success and 0 otherwise
*/
-static inline int collect_data_for_pid(pid_t pid, void *ptr)
+static inline int ebpf_collect_data_for_pid(pid_t pid, void *ptr)
{
if (unlikely(pid < 0 || pid > pid_max)) {
netdata_log_error("Invalid pid %d read (expected %d to %d). Ignoring process.", pid, 0, pid_max);
return 0;
}
- struct ebpf_pid_stat *p = get_pid_entry(pid);
+ ebpf_pid_stat_t *p = ebpf_get_pid_entry(pid, 0);
if (unlikely(!p || p->read))
return 0;
p->read = 1;
@@ -1164,7 +872,7 @@ static inline void post_aggregate_targets(struct ebpf_target *root)
*
* @param pid the PID that will be removed.
*/
-static inline void del_pid_entry(pid_t pid)
+static inline void ebpf_del_pid_entry(pid_t pid)
{
struct ebpf_pid_stat *p = ebpf_all_pids[pid];
@@ -1201,6 +909,7 @@ static inline void del_pid_entry(pid_t pid)
}
JudyLFreeArray(&pid_ptr->socket_stats.JudyLArray, PJE0);
}
+ aral_freez(ebpf_judy_pid.pid_table, pid_ptr);
JudyLDel(&ebpf_judy_pid.index.JudyLArray, p->pid, PJE0);
}
rw_spinlock_write_unlock(&ebpf_judy_pid.index.rw_spinlock);
@@ -1240,79 +949,23 @@ int get_pid_comm(pid_t pid, size_t n, char *dest)
}
/**
- * Cleanup variable from other threads
- *
- * @param pid current pid.
- */
-void cleanup_variables_from_other_threads(uint32_t pid)
-{
- // Clean cachestat structure
- if (cachestat_pid) {
- ebpf_cachestat_release(cachestat_pid[pid]);
- cachestat_pid[pid] = NULL;
- }
-
- // Clean directory cache structure
- if (dcstat_pid) {
- ebpf_dcstat_release(dcstat_pid[pid]);
- dcstat_pid[pid] = NULL;
- }
-
- // Clean swap structure
- if (swap_pid) {
- freez(swap_pid[pid]);
- swap_pid[pid] = NULL;
- }
-
- // Clean vfs structure
- if (vfs_pid) {
- ebpf_vfs_release(vfs_pid[pid]);
- vfs_pid[pid] = NULL;
- }
-
- // Clean fd structure
- if (fd_pid) {
- ebpf_fd_release(fd_pid[pid]);
- fd_pid[pid] = NULL;
- }
-
- // Clean shm structure
- if (shm_pid) {
- ebpf_shm_release(shm_pid[pid]);
- shm_pid[pid] = NULL;
- }
-}
-
-/**
* Remove PIDs when they are not running more.
*/
-void cleanup_exited_pids()
+void ebpf_cleanup_exited_pids(int max)
{
struct ebpf_pid_stat *p = NULL;
for (p = ebpf_root_of_pids; p;) {
- if (!p->updated && (!p->keep || p->keeploops > 0)) {
+ if (p->not_updated > max) {
if (unlikely(debug_enabled && (p->keep || p->keeploops)))
debug_log(" > CLEANUP cannot keep exited process %d (%s) anymore - removing it.", p->pid, p->comm);
pid_t r = p->pid;
p = p->next;
- // Clean process structure
- if (global_process_stats) {
- ebpf_process_stat_release(global_process_stats[r]);
- global_process_stats[r] = NULL;
- }
-
- cleanup_variables_from_other_threads(r);
-
- del_pid_entry(r);
- } else {
- if (unlikely(p->keep))
- p->keeploops++;
- p->keep = 0;
- p = p->next;
+ ebpf_del_pid_entry(r);
}
+ p = p->next;
}
}
@@ -1344,7 +997,7 @@ static inline void read_proc_filesystem()
if (unlikely(endptr == de->d_name || *endptr != '\0'))
continue;
- collect_data_for_pid(pid, NULL);
+ ebpf_collect_data_for_pid(pid, NULL);
}
closedir(dir);
}
@@ -1400,6 +1053,31 @@ void ebpf_process_apps_accumulator(ebpf_process_stat_t *out, int maps_per_core)
}
/**
+ * Sum values for pid
+ *
+ * @param structure to store result.
+ * @param root the structure with all available PIDs
+ */
+void ebpf_process_sum_values_for_pids(ebpf_process_stat_t *process, struct ebpf_pid_on_target *root)
+{
+ memset(process, 0, sizeof(ebpf_process_stat_t));
+ while (root) {
+ int32_t pid = root->pid;
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
+ if (local_pid) {
+ ebpf_process_stat_t *in = &local_pid->process;
+ process->task_err += in->task_err;
+ process->release_call += in->release_call;
+ process->exit_call += in->exit_call;
+ process->create_thread += in->create_thread;
+ process->create_process += in->create_process;
+ }
+
+ root = root->next;
+ }
+}
+
+/**
* Collect data for all process
*
* Read data from hash table and store it in appropriate vectors.
@@ -1431,42 +1109,31 @@ void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core)
read_proc_filesystem();
- uint32_t key;
pids = ebpf_root_of_pids; // global list of all processes running
- // while (bpf_map_get_next_key(tbl_pid_stats_fd, &key, &next_key) == 0) {
if (tbl_pid_stats_fd != -1) {
size_t length = sizeof(ebpf_process_stat_t);
if (maps_per_core)
length *= ebpf_nprocs;
- while (pids) {
- key = pids->pid;
-
- ebpf_process_stat_t *w = global_process_stats[key];
- if (!w) {
- w = ebpf_process_stat_get();
- global_process_stats[key] = w;
- }
+ uint32_t key = 0, next_key = 0;
+ while (bpf_map_get_next_key(tbl_pid_stats_fd, &key, &next_key) == 0) {
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(key, 0);
+ if (!local_pid)
+ goto end_process_loop;
+ ebpf_process_stat_t *w = &local_pid->process;
if (bpf_map_lookup_elem(tbl_pid_stats_fd, &key, process_stat_vector)) {
- // Clean Process structures
- ebpf_process_stat_release(w);
- global_process_stats[key] = NULL;
-
- cleanup_variables_from_other_threads(key);
-
- pids = pids->next;
- continue;
+ goto end_process_loop;
}
ebpf_process_apps_accumulator(process_stat_vector, maps_per_core);
memcpy(w, process_stat_vector, sizeof(ebpf_process_stat_t));
+end_process_loop:
memset(process_stat_vector, 0, length);
-
- pids = pids->next;
+ key = next_key;
}
}
@@ -1482,4 +1149,12 @@ void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core)
aggregate_pid_on_target(pids->target, pids, NULL);
post_aggregate_targets(apps_groups_root_target);
+
+ struct ebpf_target *w;
+ for (w = apps_groups_root_target; w; w = w->next) {
+ if (unlikely(!(w->processes)))
+ continue;
+
+ ebpf_process_sum_values_for_pids(&w->process, w->root_pid);
+ }
}
diff --git a/collectors/ebpf.plugin/ebpf_apps.h b/src/collectors/ebpf.plugin/ebpf_apps.h
index 258091507..a2cbaf3b7 100644
--- a/collectors/ebpf.plugin/ebpf_apps.h
+++ b/src/collectors/ebpf.plugin/ebpf_apps.h
@@ -13,11 +13,14 @@
#define NETDATA_APP_FAMILY "app"
#define NETDATA_APPS_FILE_GROUP "file_access"
#define NETDATA_APPS_FILE_FDS "fds"
-#define NETDATA_APPS_FILE_CGROUP_GROUP "file_access (eBPF)"
-#define NETDATA_APPS_PROCESS_GROUP "process (eBPF)"
+#define NETDATA_APPS_PROCESS_GROUP "process"
#define NETDATA_APPS_NET_GROUP "net"
#define NETDATA_APPS_IPC_SHM_GROUP "ipc shm"
+#ifndef TASK_COMM_LEN
+#define TASK_COMM_LEN 16
+#endif
+
#include "ebpf_process.h"
#include "ebpf_dcstat.h"
#include "ebpf_disk.h"
@@ -39,6 +42,31 @@
#define EBPF_MAX_COMPARE_NAME 100
#define EBPF_MAX_NAME 100
+#define EBPF_CLEANUP_FACTOR 10
+
+// ----------------------------------------------------------------------------
+// Structures used to read information from kernel ring
+typedef struct ebpf_process_stat {
+ uint64_t ct;
+ uint32_t uid;
+ uint32_t gid;
+ char name[TASK_COMM_LEN];
+
+ uint32_t tgid;
+ uint32_t pid;
+
+ //Counter
+ uint32_t exit_call;
+ uint32_t release_call;
+ uint32_t create_process;
+ uint32_t create_thread;
+
+ //Counter
+ uint32_t task_err;
+
+ uint8_t removeme;
+} ebpf_process_stat_t;
+
// ----------------------------------------------------------------------------
// pid_stat
//
@@ -61,6 +89,8 @@ struct ebpf_target {
netdata_publish_vfs_t vfs;
netdata_fd_stat_t fd;
netdata_publish_shm_t shm;
+ ebpf_process_stat_t process;
+ ebpf_socket_publish_apps_t socket;
kernel_uint_t starttime;
kernel_uint_t collected_starttime;
@@ -84,7 +114,7 @@ extern struct ebpf_target *apps_groups_root_target;
extern struct ebpf_target *users_root_target;
extern struct ebpf_target *groups_root_target;
-struct ebpf_pid_stat {
+typedef struct ebpf_pid_stat {
int32_t pid;
char comm[EBPF_MAX_COMPARE_NAME + 1];
char *cmdline;
@@ -105,6 +135,16 @@ struct ebpf_pid_stat {
int sortlist; // higher numbers = top on the process tree
// each process gets a unique number
+ netdata_publish_cachestat_t cachestat;
+ netdata_publish_dcstat_t dc;
+ netdata_fd_stat_t fd;
+ ebpf_process_stat_t process;
+ netdata_publish_shm_t shm;
+ netdata_publish_swap_t swap;
+ ebpf_socket_publish_apps_t socket;
+ netdata_publish_vfs_t vfs;
+
+ int not_updated;
struct ebpf_target *target; // app_groups.conf targets
struct ebpf_target *user_target; // uid based targets
@@ -113,6 +153,8 @@ struct ebpf_pid_stat {
usec_t stat_collected_usec;
usec_t last_stat_collected_usec;
+ netdata_publish_cachestat_t cache;
+
char *stat_filename;
char *status_filename;
char *io_filename;
@@ -121,7 +163,7 @@ struct ebpf_pid_stat {
struct ebpf_pid_stat *parent;
struct ebpf_pid_stat *prev;
struct ebpf_pid_stat *next;
-};
+} ebpf_pid_stat_t;
// ----------------------------------------------------------------------------
// target
@@ -136,24 +178,6 @@ struct ebpf_pid_on_target {
struct ebpf_pid_on_target *next;
};
-// ----------------------------------------------------------------------------
-// Structures used to read information from kernel ring
-typedef struct ebpf_process_stat {
- uint64_t pid_tgid; // This cannot be removed, because it is used inside kernel ring.
- uint32_t pid;
-
- //Counter
- uint32_t exit_call;
- uint32_t release_call;
- uint32_t create_process;
- uint32_t create_thread;
-
- //Counter
- uint32_t task_err;
-
- uint8_t removeme;
-} ebpf_process_stat_t;
-
/**
* Internal function used to write debug messages.
*
@@ -186,8 +210,6 @@ void clean_apps_groups_target(struct ebpf_target *apps_groups_root_target);
size_t zero_all_targets(struct ebpf_target *root);
-int am_i_running_as_root();
-
void cleanup_exited_pids();
int ebpf_read_hash_table(void *ep, int fd, uint32_t pid);
@@ -197,16 +219,8 @@ int get_pid_comm(pid_t pid, size_t n, char *dest);
void collect_data_for_all_processes(int tbl_pid_stats_fd, int maps_per_core);
void ebpf_process_apps_accumulator(ebpf_process_stat_t *out, int maps_per_core);
-extern ebpf_process_stat_t **global_process_stats;
-extern netdata_publish_cachestat_t **cachestat_pid;
-extern netdata_publish_dcstat_t **dcstat_pid;
-extern netdata_publish_swap_t **swap_pid;
-extern netdata_publish_vfs_t **vfs_pid;
-extern netdata_fd_stat_t **fd_pid;
-extern netdata_publish_shm_t **shm_pid;
-
// The default value is at least 32 times smaller than maximum number of PIDs allowed on system,
-// this is only possible because we are using ARAL (https://github.com/netdata/netdata/tree/master/libnetdata/aral).
+// this is only possible because we are using ARAL (https://github.com/netdata/netdata/tree/master/src/libnetdata/aral).
#ifndef NETDATA_EBPF_ALLOC_MAX_PID
# define NETDATA_EBPF_ALLOC_MAX_PID 1024
#endif
@@ -214,51 +228,25 @@ extern netdata_publish_shm_t **shm_pid;
// ARAL Sectiion
extern void ebpf_aral_init(void);
-
-extern ebpf_process_stat_t *ebpf_process_stat_get(void);
-extern void ebpf_process_stat_release(ebpf_process_stat_t *stat);
+extern ebpf_pid_stat_t *ebpf_get_pid_entry(pid_t pid, pid_t tgid);
extern ebpf_process_stat_t *process_stat_vector;
-extern ARAL *ebpf_aral_socket_pid;
-void ebpf_socket_aral_init();
-ebpf_socket_publish_apps_t *ebpf_socket_stat_get(void);
-
-extern ARAL *ebpf_aral_cachestat_pid;
-void ebpf_cachestat_aral_init();
-netdata_publish_cachestat_t *ebpf_publish_cachestat_get(void);
-void ebpf_cachestat_release(netdata_publish_cachestat_t *stat);
-
-extern ARAL *ebpf_aral_dcstat_pid;
-void ebpf_dcstat_aral_init();
-netdata_publish_dcstat_t *ebpf_publish_dcstat_get(void);
-void ebpf_dcstat_release(netdata_publish_dcstat_t *stat);
-
extern ARAL *ebpf_aral_vfs_pid;
void ebpf_vfs_aral_init();
netdata_publish_vfs_t *ebpf_vfs_get(void);
void ebpf_vfs_release(netdata_publish_vfs_t *stat);
-extern ARAL *ebpf_aral_fd_pid;
-void ebpf_fd_aral_init();
-netdata_fd_stat_t *ebpf_fd_stat_get(void);
-void ebpf_fd_release(netdata_fd_stat_t *stat);
-
extern ARAL *ebpf_aral_shm_pid;
void ebpf_shm_aral_init();
netdata_publish_shm_t *ebpf_shm_stat_get(void);
void ebpf_shm_release(netdata_publish_shm_t *stat);
+void ebpf_cleanup_exited_pids(int max);
// ARAL Section end
// Threads integrated with apps
-extern ebpf_socket_publish_apps_t **socket_bandwidth_curr;
// Threads integrated with apps
#include "libnetdata/threads/threads.h"
-// ARAL variables
-extern ARAL *ebpf_aral_apps_pid_stat;
-extern ARAL *ebpf_aral_process_stat;
-#define NETDATA_EBPF_PROC_ARAL_NAME "ebpf_proc_stat"
-
#endif /* NETDATA_EBPF_APPS_H */
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/src/collectors/ebpf.plugin/ebpf_cachestat.c
index d9f8f7b06..45e09766f 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.c
+++ b/src/collectors/ebpf.plugin/ebpf_cachestat.c
@@ -58,9 +58,16 @@ netdata_ebpf_targets_t cachestat_targets[] = { {.name = "add_to_page_cache_lru",
static char *account_page[NETDATA_CACHESTAT_ACCOUNT_DIRTY_END] ={ "account_page_dirtied",
"__set_page_dirty", "__folio_mark_dirty" };
-#ifdef NETDATA_DEV_MODE
-int cachestat_disable_priority;
-#endif
+struct netdata_static_thread ebpf_read_cachestat = {
+ .name = "EBPF_READ_CACHESTAT",
+ .config_section = NULL,
+ .config_name = NULL,
+ .env_name = NULL,
+ .enabled = 1,
+ .thread = NULL,
+ .init_routine = NULL,
+ .start_routine = NULL
+};
#ifdef LIBBPF_MAJOR_VERSION
/**
@@ -78,7 +85,6 @@ static void ebpf_cachestat_disable_probe(struct cachestat_bpf *obj)
bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_kprobe, false);
bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_kprobe, false);
bpf_program__set_autoload(obj->progs.netdata_mark_buffer_dirty_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_release_task_kprobe, false);
}
/*
@@ -119,7 +125,6 @@ static void ebpf_cachestat_disable_trampoline(struct cachestat_bpf *obj)
bpf_program__set_autoload(obj->progs.netdata_set_page_dirty_fentry, false);
bpf_program__set_autoload(obj->progs.netdata_account_page_dirtied_fentry, false);
bpf_program__set_autoload(obj->progs.netdata_mark_buffer_dirty_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_release_task_fentry, false);
}
/*
@@ -175,9 +180,6 @@ static inline void netdata_set_trampoline_target(struct cachestat_bpf *obj)
bpf_program__set_attach_target(obj->progs.netdata_mark_buffer_dirty_fentry, 0,
cachestat_targets[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_release_task_fentry, 0,
- EBPF_COMMON_FNCT_CLEAN_UP);
}
/**
@@ -194,7 +196,7 @@ static int ebpf_cachestat_attach_probe(struct cachestat_bpf *obj)
obj->links.netdata_add_to_page_cache_lru_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_add_to_page_cache_lru_kprobe,
false,
cachestat_targets[NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU].name);
- int ret = libbpf_get_error(obj->links.netdata_add_to_page_cache_lru_kprobe);
+ long ret = libbpf_get_error(obj->links.netdata_add_to_page_cache_lru_kprobe);
if (ret)
return -1;
@@ -234,13 +236,6 @@ static int ebpf_cachestat_attach_probe(struct cachestat_bpf *obj)
if (ret)
return -1;
- obj->links.netdata_release_task_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_release_task_kprobe,
- false,
- EBPF_COMMON_FNCT_CLEAN_UP);
- ret = libbpf_get_error(obj->links.netdata_release_task_kprobe);
- if (ret)
- return -1;
-
return 0;
}
@@ -277,19 +272,6 @@ static void ebpf_cachestat_set_hash_tables(struct cachestat_bpf *obj)
}
/**
- * Disable Release Task
- *
- * Disable release task when apps is not enabled.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_cachestat_disable_release_task(struct cachestat_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_release_task_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_release_task_fentry, false);
-}
-
-/**
* Load and attach
*
* Load and attach the eBPF code in kernel.
@@ -316,9 +298,6 @@ static inline int ebpf_cachestat_load_and_attach(struct cachestat_bpf *obj, ebpf
ebpf_cachestat_adjust_map(obj, em);
- if (!em->apps_charts && !em->cgroup_charts)
- ebpf_cachestat_disable_release_task(obj);
-
int ret = cachestat_bpf__load(obj);
if (ret) {
return ret;
@@ -349,11 +328,11 @@ static void ebpf_obsolete_specific_cachestat_charts(char *type, int update_every
*
* @param em a pointer to `struct ebpf_module`
*/
-static void ebpf_obsolete_services(ebpf_module_t *em)
+static void ebpf_obsolete_cachestat_services(ebpf_module_t *em, char *id)
{
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_CACHESTAT_HIT_RATIO_CHART,
- "",
"Hit ratio",
EBPF_COMMON_DIMENSION_PERCENTAGE,
NETDATA_CACHESTAT_SUBMENU,
@@ -363,8 +342,8 @@ static void ebpf_obsolete_services(ebpf_module_t *em)
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_CACHESTAT_DIRTY_CHART,
- "",
"Number of dirty pages",
EBPF_CACHESTAT_DIMENSION_PAGE,
NETDATA_CACHESTAT_SUBMENU,
@@ -374,8 +353,8 @@ static void ebpf_obsolete_services(ebpf_module_t *em)
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_CACHESTAT_HIT_CHART,
- "",
"Number of accessed files",
EBPF_CACHESTAT_DIMENSION_HITS,
NETDATA_CACHESTAT_SUBMENU,
@@ -385,8 +364,8 @@ static void ebpf_obsolete_services(ebpf_module_t *em)
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_CACHESTAT_MISSES_CHART,
- "",
"Files out of page cache",
EBPF_CACHESTAT_DIMENSION_MISSES,
NETDATA_CACHESTAT_SUBMENU,
@@ -406,12 +385,13 @@ static void ebpf_obsolete_services(ebpf_module_t *em)
static inline void ebpf_obsolete_cachestat_cgroup_charts(ebpf_module_t *em) {
pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_obsolete_services(em);
-
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
+ if (ect->systemd) {
+ ebpf_obsolete_cachestat_services(em, ect->name);
+
continue;
+ }
ebpf_obsolete_specific_cachestat_charts(ect->name, em->update_every);
}
@@ -483,6 +463,7 @@ void ebpf_obsolete_cachestat_apps_charts(struct ebpf_module *em)
{
struct ebpf_target *w;
int update_every = em->update_every;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = apps_groups_root_target; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_CACHESTAT_IDX))))
continue;
@@ -532,6 +513,7 @@ void ebpf_obsolete_cachestat_apps_charts(struct ebpf_module *em)
update_every);
w->charts_created &= ~(1<<EBPF_MODULE_CACHESTAT_IDX);
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -545,6 +527,9 @@ static void ebpf_cachestat_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (ebpf_read_cachestat.thread)
+ netdata_thread_cancel(*ebpf_read_cachestat.thread);
+
if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
pthread_mutex_lock(&lock);
if (em->cgroup_charts) {
@@ -558,12 +543,6 @@ static void ebpf_cachestat_exit(void *ptr)
ebpf_obsolete_cachestat_global(em);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_cachestat_pid)
- ebpf_statistic_obsolete_aral_chart(em, cachestat_disable_priority);
-#endif
-
-
fflush(stdout);
pthread_mutex_unlock(&lock);
}
@@ -688,13 +667,17 @@ static void cachestat_apps_accumulator(netdata_cachestat_pid_t *out, int maps_pe
{
int i, end = (maps_per_core) ? ebpf_nprocs : 1;
netdata_cachestat_pid_t *total = &out[0];
+ uint64_t ct = total->ct;
for (i = 1; i < end; i++) {
netdata_cachestat_pid_t *w = &out[i];
total->account_page_dirtied += w->account_page_dirtied;
total->add_to_page_cache_lru += w->add_to_page_cache_lru;
total->mark_buffer_dirty += w->mark_buffer_dirty;
total->mark_page_accessed += w->mark_page_accessed;
+ if (w->ct > ct)
+ ct = w->ct;
}
+ total->ct = ct;
}
/**
@@ -703,39 +686,18 @@ static void cachestat_apps_accumulator(netdata_cachestat_pid_t *out, int maps_pe
* Save the current values inside the structure
*
* @param out vector used to plot charts
- * @param publish vector with values read from hash tables.
+ * @param in vector with values read from hash tables.
*/
-static inline void cachestat_save_pid_values(netdata_publish_cachestat_t *out, netdata_cachestat_pid_t *publish)
+static inline void cachestat_save_pid_values(netdata_publish_cachestat_t *out, netdata_cachestat_pid_t *in)
{
+ out->ct = in->ct;
if (!out->current.mark_page_accessed) {
- memcpy(&out->current, &publish[0], sizeof(netdata_cachestat_pid_t));
+ memcpy(&out->current, &in[0], sizeof(netdata_cachestat_pid_t));
return;
}
memcpy(&out->prev, &out->current, sizeof(netdata_cachestat_pid_t));
- memcpy(&out->current, &publish[0], sizeof(netdata_cachestat_pid_t));
-}
-
-/**
- * Fill PID
- *
- * Fill PID structures
- *
- * @param current_pid pid that we are collecting data
- * @param out values read from hash tables;
- */
-static void cachestat_fill_pid(uint32_t current_pid, netdata_cachestat_pid_t *publish)
-{
- netdata_publish_cachestat_t *curr = cachestat_pid[current_pid];
- if (!curr) {
- curr = ebpf_publish_cachestat_get();
- cachestat_pid[current_pid] = curr;
-
- cachestat_save_pid_values(curr, publish);
- return;
- }
-
- cachestat_save_pid_values(curr, publish);
+ memcpy(&out->current, &in[0], sizeof(netdata_cachestat_pid_t));
}
/**
@@ -745,32 +707,39 @@ static void cachestat_fill_pid(uint32_t current_pid, netdata_cachestat_pid_t *pu
*
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_read_cachestat_apps_table(int maps_per_core)
+static void ebpf_read_cachestat_apps_table(int maps_per_core, int max_period)
{
netdata_cachestat_pid_t *cv = cachestat_vector;
- uint32_t key;
- struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd;
size_t length = sizeof(netdata_cachestat_pid_t);
if (maps_per_core)
length *= ebpf_nprocs;
- while (pids) {
- key = pids->pid;
-
+ uint32_t key = 0, next_key = 0;
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
if (bpf_map_lookup_elem(fd, &key, cv)) {
- pids = pids->next;
- continue;
+ goto end_cachestat_loop;
}
cachestat_apps_accumulator(cv, maps_per_core);
- cachestat_fill_pid(key, cv);
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(key, cv->tgid);
+ if (!local_pid)
+ goto end_cachestat_loop;
+
+ netdata_publish_cachestat_t *publish = &local_pid->cachestat;
+ if (!publish->ct || publish->ct != cv->ct){
+ cachestat_save_pid_values(publish, cv);
+ local_pid->not_updated = 0;
+ } else if (++local_pid->not_updated >= max_period) {
+ bpf_map_delete_elem(fd, &key);
+ local_pid->not_updated = 0;
+ }
+end_cachestat_loop:
// We are cleaning to avoid passing data read from one process to other.
memset(cv, 0, length);
-
- pids = pids->next;
+ key = next_key;
}
}
@@ -781,14 +750,8 @@ static void ebpf_read_cachestat_apps_table(int maps_per_core)
*
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_cachestat_cgroup(int maps_per_core)
+static void ebpf_update_cachestat_cgroup()
{
- netdata_cachestat_pid_t *cv = cachestat_vector;
- int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd;
- size_t length = sizeof(netdata_cachestat_pid_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
ebpf_cgroup_target_t *ect;
pthread_mutex_lock(&mutex_cgroup_shm);
for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
@@ -796,19 +759,11 @@ static void ebpf_update_cachestat_cgroup(int maps_per_core)
for (pids = ect->pids; pids; pids = pids->next) {
int pid = pids->pid;
netdata_cachestat_pid_t *out = &pids->cachestat;
- if (likely(cachestat_pid) && cachestat_pid[pid]) {
- netdata_publish_cachestat_t *in = cachestat_pid[pid];
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
+ if (local_pid) {
+ netdata_publish_cachestat_t *in = &local_pid->cachestat;
memcpy(out, &in->current, sizeof(netdata_cachestat_pid_t));
- } else {
- memset(cv, 0, length);
- if (bpf_map_lookup_elem(fd, &pid, cv)) {
- continue;
- }
-
- cachestat_apps_accumulator(cv, maps_per_core);
-
- memcpy(out, cv, sizeof(netdata_cachestat_pid_t));
}
}
}
@@ -816,6 +771,104 @@ static void ebpf_update_cachestat_cgroup(int maps_per_core)
}
/**
+ * Cachestat sum PIDs
+ *
+ * Sum values for all PIDs associated to a group
+ *
+ * @param publish output structure.
+ * @param root structure with listed IPs
+ */
+void ebpf_cachestat_sum_pids(netdata_publish_cachestat_t *publish, struct ebpf_pid_on_target *root)
+{
+ memcpy(&publish->prev, &publish->current,sizeof(publish->current));
+ memset(&publish->current, 0, sizeof(publish->current));
+
+ netdata_cachestat_pid_t *dst = &publish->current;
+ while (root) {
+ int32_t pid = root->pid;
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
+ if (local_pid) {
+ netdata_publish_cachestat_t *w = &local_pid->cachestat;
+ netdata_cachestat_pid_t *src = &w->current;
+ dst->account_page_dirtied += src->account_page_dirtied;
+ dst->add_to_page_cache_lru += src->add_to_page_cache_lru;
+ dst->mark_buffer_dirty += src->mark_buffer_dirty;
+ dst->mark_page_accessed += src->mark_page_accessed;
+ }
+
+ root = root->next;
+ }
+}
+
+/**
+ * Resume apps data
+ */
+void ebpf_resume_apps_data()
+{
+ struct ebpf_target *w;
+
+ for (w = apps_groups_root_target; w; w = w->next) {
+ if (unlikely(!(w->charts_created & (1 << EBPF_MODULE_CACHESTAT_IDX))))
+ continue;
+
+ ebpf_cachestat_sum_pids(&w->cachestat, w->root_pid);
+ }
+}
+
+/**
+ * Cachestat thread
+ *
+ * Thread used to generate cachestat charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_read_cachestat_thread(void *ptr)
+{
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+
+ int maps_per_core = em->maps_per_core;
+ int update_every = em->update_every;
+ int max_period = update_every * EBPF_CLEANUP_FACTOR;
+
+ int counter = update_every - 1;
+
+ uint32_t lifetime = em->lifetime;
+ uint32_t running_time = 0;
+ usec_t period = update_every * USEC_PER_SEC;
+ while (!ebpf_plugin_exit && running_time < lifetime) {
+ (void)heartbeat_next(&hb, period);
+ if (ebpf_plugin_exit || ++counter != update_every)
+ continue;
+
+ netdata_thread_disable_cancelability();
+
+ pthread_mutex_lock(&collect_data_mutex);
+ ebpf_read_cachestat_apps_table(maps_per_core, max_period);
+ ebpf_resume_apps_data();
+ pthread_mutex_unlock(&collect_data_mutex);
+
+ counter = 0;
+
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ if (running_time && !em->running_time)
+ running_time = update_every;
+ else
+ running_time += update_every;
+
+ em->running_time = running_time;
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
+ netdata_thread_enable_cancelability();
+ }
+
+ return NULL;
+}
+
+/**
* Create apps charts
*
* Call ebpf_create_chart to create the charts on apps submenu.
@@ -944,7 +997,7 @@ static void cachestat_send_global(netdata_publish_cachestat_t *publish)
ebpf_one_dimension_write_charts(
NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_DIRTY_CHART, ptr[NETDATA_CACHESTAT_IDX_DIRTY].dimension,
- cachestat_hash_values[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY]);
+ (long long)cachestat_hash_values[NETDATA_KEY_CALLS_MARK_BUFFER_DIRTY]);
ebpf_one_dimension_write_charts(
NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_CHART, ptr[NETDATA_CACHESTAT_IDX_HIT].dimension, publish->hit);
@@ -955,35 +1008,6 @@ static void cachestat_send_global(netdata_publish_cachestat_t *publish)
}
/**
- * Cachestat sum PIDs
- *
- * Sum values for all PIDs associated to a group
- *
- * @param publish output structure.
- * @param root structure with listed IPs
- */
-void ebpf_cachestat_sum_pids(netdata_publish_cachestat_t *publish, struct ebpf_pid_on_target *root)
-{
- memcpy(&publish->prev, &publish->current,sizeof(publish->current));
- memset(&publish->current, 0, sizeof(publish->current));
-
- netdata_cachestat_pid_t *dst = &publish->current;
- while (root) {
- int32_t pid = root->pid;
- netdata_publish_cachestat_t *w = cachestat_pid[pid];
- if (w) {
- netdata_cachestat_pid_t *src = &w->current;
- dst->account_page_dirtied += src->account_page_dirtied;
- dst->add_to_page_cache_lru += src->add_to_page_cache_lru;
- dst->mark_buffer_dirty += src->mark_buffer_dirty;
- dst->mark_page_accessed += src->mark_page_accessed;
- }
-
- root = root->next;
- }
-}
-
-/**
* Send data to Netdata calling auxiliary functions.
*
* @param root the target list.
@@ -993,17 +1017,17 @@ void ebpf_cache_send_apps_data(struct ebpf_target *root)
struct ebpf_target *w;
collected_number value;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = root; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_CACHESTAT_IDX))))
continue;
- ebpf_cachestat_sum_pids(&w->cachestat, w->root_pid);
netdata_cachestat_pid_t *current = &w->cachestat.current;
netdata_cachestat_pid_t *prev = &w->cachestat.prev;
uint64_t mpa = current->mark_page_accessed - prev->mark_page_accessed;
uint64_t mbd = current->mark_buffer_dirty - prev->mark_buffer_dirty;
- w->cachestat.dirty = mbd;
+ w->cachestat.dirty = (long long)mbd;
uint64_t apcl = current->add_to_page_cache_lru - prev->add_to_page_cache_lru;
uint64_t apd = current->account_page_dirtied - prev->account_page_dirtied;
@@ -1029,6 +1053,7 @@ void ebpf_cache_send_apps_data(struct ebpf_target *root)
write_chart_dimension("misses", value);
ebpf_write_end_chart();
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -1073,7 +1098,7 @@ void ebpf_cachestat_calc_chart_values()
uint64_t mpa = current->mark_page_accessed - prev->mark_page_accessed;
uint64_t mbd = current->mark_buffer_dirty - prev->mark_buffer_dirty;
- ect->publish_cachestat.dirty = mbd;
+ ect->publish_cachestat.dirty = (long long)mbd;
uint64_t apcl = current->add_to_page_cache_lru - prev->add_to_page_cache_lru;
uint64_t apd = current->account_page_dirtied - prev->account_page_dirtied;
@@ -1090,35 +1115,82 @@ void ebpf_cachestat_calc_chart_values()
**/
static void ebpf_create_systemd_cachestat_charts(int update_every)
{
- ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_HIT_RATIO_CHART,
- "Hit ratio",
- EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, 21100,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_CACHESTAT_HIT_RATIO_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT,
- update_every);
+ static ebpf_systemd_args_t data_hit_ratio = {
+ .title = "Hit ratio",
+ .units = EBPF_COMMON_DIMENSION_PERCENTAGE,
+ .family = NETDATA_CACHESTAT_SUBMENU,
+ .charttype = NETDATA_EBPF_CHART_TYPE_LINE,
+ .order = 21100,
+ .algorithm = EBPF_CHART_ALGORITHM_ABSOLUTE,
+ .context = NETDATA_SYSTEMD_CACHESTAT_HIT_RATIO_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_CACHESTAT,
+ .update_every = 0,
+ .suffix = NETDATA_CACHESTAT_HIT_RATIO_CHART,
+ .dimension = "percentage"
+ };
- ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_DIRTY_CHART,
- "Number of dirty pages",
- EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, 21101,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_CACHESTAT_MODIFIED_CACHE_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT,
- update_every);
+ static ebpf_systemd_args_t data_dirty = {
+ .title = "Number of dirty pages",
+ .units = EBPF_CACHESTAT_DIMENSION_PAGE,
+ .family = NETDATA_CACHESTAT_SUBMENU,
+ .charttype = NETDATA_EBPF_CHART_TYPE_LINE,
+ .order = 21101,
+ .algorithm = EBPF_CHART_ALGORITHM_ABSOLUTE,
+ .context = NETDATA_SYSTEMD_CACHESTAT_MODIFIED_CACHE_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_CACHESTAT,
+ .update_every = 0,
+ .suffix = NETDATA_CACHESTAT_DIRTY_CHART,
+ .dimension = "pages"
+ };
- ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_HIT_CHART, "Number of accessed files",
- EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, 21102,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_CACHESTAT_HIT_FILE_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT,
- update_every);
+ static ebpf_systemd_args_t data_hit = {
+ .title = "Number of accessed pages",
+ .units = EBPF_CACHESTAT_DIMENSION_HITS,
+ .family = NETDATA_CACHESTAT_SUBMENU,
+ .charttype = NETDATA_EBPF_CHART_TYPE_LINE,
+ .order = 21102,
+ .algorithm = EBPF_CHART_ALGORITHM_ABSOLUTE,
+ .context = NETDATA_SYSTEMD_CACHESTAT_HIT_FILE_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_CACHESTAT,
+ .update_every = 0,
+ .suffix = NETDATA_CACHESTAT_HIT_CHART,
+ .dimension = "hits"
+ };
- ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_MISSES_CHART, "Files out of page cache",
- EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE, 21103,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_CACHESTAT_MISS_FILES_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT,
- update_every);
+ static ebpf_systemd_args_t data_miss = {
+ .title = "Files out of page cache",
+ .units = EBPF_CACHESTAT_DIMENSION_MISSES,
+ .family = NETDATA_CACHESTAT_SUBMENU,
+ .charttype = NETDATA_EBPF_CHART_TYPE_LINE,
+ .order = 21103,
+ .algorithm = EBPF_CHART_ALGORITHM_ABSOLUTE,
+ .context = NETDATA_SYSTEMD_CACHESTAT_MISS_FILES_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_CACHESTAT,
+ .update_every = 0,
+ .suffix = NETDATA_CACHESTAT_MISSES_CHART,
+ .dimension = "misses"
+ };
+
+ if (!data_miss.update_every)
+ data_hit_ratio.update_every = data_dirty.update_every =
+ data_hit.update_every = data_miss.update_every = update_every;
+
+ ebpf_cgroup_target_t *w;
+ for (w = ebpf_cgroup_pids; w; w = w->next) {
+ if (unlikely(!w->systemd || w->flags & NETDATA_EBPF_SERVICES_HAS_CACHESTAT_CHART))
+ continue;
+
+ data_hit_ratio.id = data_dirty.id = data_hit.id = data_miss.id = w->name;
+ ebpf_create_charts_on_systemd(&data_hit_ratio);
+
+ ebpf_create_charts_on_systemd(&data_dirty);
+
+ ebpf_create_charts_on_systemd(&data_hit);
+
+ ebpf_create_charts_on_systemd(&data_miss);
+
+ w->flags |= NETDATA_EBPF_SERVICES_HAS_CACHESTAT_CHART;
+ }
}
/**
@@ -1130,37 +1202,27 @@ static void ebpf_send_systemd_cachestat_charts()
{
ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_HIT_RATIO_CHART, "");
for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_cachestat.ratio);
+ if (unlikely(!(ect->flags & NETDATA_EBPF_SERVICES_HAS_CACHESTAT_CHART)) ) {
+ continue;
}
- }
- ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_DIRTY_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_cachestat.dirty);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_CACHESTAT_HIT_RATIO_CHART);
+ write_chart_dimension("percentage", (long long)ect->publish_cachestat.ratio);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_HIT_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_cachestat.hit);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_CACHESTAT_DIRTY_CHART);
+ write_chart_dimension("pages", (long long)ect->publish_cachestat.dirty);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_MISSES_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_cachestat.miss);
- }
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_CACHESTAT_HIT_CHART);
+ write_chart_dimension("hits", (long long)ect->publish_cachestat.hit);
+ ebpf_write_end_chart();
+
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_CACHESTAT_MISSES_CHART);
+ write_chart_dimension("misses", (long long)ect->publish_cachestat.miss);
+ ebpf_write_end_chart();
}
- ebpf_write_end_chart();
}
/**
@@ -1199,7 +1261,7 @@ static void ebpf_create_specific_cachestat_charts(char *type, int update_every)
{
ebpf_create_chart(type, NETDATA_CACHESTAT_HIT_RATIO_CHART,
"Hit ratio",
- EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_CGROUP_SUBMENU,
+ EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_SUBMENU,
NETDATA_CGROUP_CACHESTAT_HIT_RATIO_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5200,
ebpf_create_global_dimension,
@@ -1207,7 +1269,7 @@ static void ebpf_create_specific_cachestat_charts(char *type, int update_every)
ebpf_create_chart(type, NETDATA_CACHESTAT_DIRTY_CHART,
"Number of dirty pages",
- EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_CGROUP_SUBMENU,
+ EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_SUBMENU,
NETDATA_CGROUP_CACHESTAT_MODIFIED_CACHE_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5201,
ebpf_create_global_dimension,
@@ -1216,7 +1278,7 @@ static void ebpf_create_specific_cachestat_charts(char *type, int update_every)
ebpf_create_chart(type, NETDATA_CACHESTAT_HIT_CHART,
"Number of accessed files",
- EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_CGROUP_SUBMENU,
+ EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_SUBMENU,
NETDATA_CGROUP_CACHESTAT_HIT_FILES_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5202,
ebpf_create_global_dimension,
@@ -1225,7 +1287,7 @@ static void ebpf_create_specific_cachestat_charts(char *type, int update_every)
ebpf_create_chart(type, NETDATA_CACHESTAT_MISSES_CHART,
"Files out of page cache",
- EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_CGROUP_SUBMENU,
+ EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_SUBMENU,
NETDATA_CGROUP_CACHESTAT_MISS_FILES_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5203,
ebpf_create_global_dimension,
@@ -1279,15 +1341,11 @@ static void ebpf_obsolete_specific_cachestat_charts(char *type, int update_every
*/
void ebpf_cachestat_send_cgroup_data(int update_every)
{
- if (!ebpf_cgroup_pids)
- return;
-
pthread_mutex_lock(&mutex_cgroup_shm);
ebpf_cgroup_target_t *ect;
ebpf_cachestat_calc_chart_values();
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
- if (has_systemd) {
+ if (shm_ebpf_cgroup.header->systemd_enabled) {
if (send_cgroup_chart) {
ebpf_create_systemd_cachestat_charts(update_every);
}
@@ -1344,12 +1402,9 @@ static void cachestat_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
ebpf_cachestat_read_global_tables(stats, maps_per_core);
- pthread_mutex_lock(&collect_data_mutex);
- if (apps)
- ebpf_read_cachestat_apps_table(maps_per_core);
if (cgroups)
- ebpf_update_cachestat_cgroup(maps_per_core);
+ ebpf_update_cachestat_cgroup();
pthread_mutex_lock(&lock);
@@ -1358,16 +1413,10 @@ static void cachestat_collector(ebpf_module_t *em)
if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
ebpf_cache_send_apps_data(apps_groups_root_target);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_cachestat_pid)
- ebpf_send_data_aral_chart(ebpf_aral_cachestat_pid, em);
-#endif
-
if (cgroups)
ebpf_cachestat_send_cgroup_data(update_every);
pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
pthread_mutex_lock(&ebpf_exit_cleanup);
if (running_time && !em->running_time)
@@ -1442,17 +1491,10 @@ static void ebpf_create_memory_charts(ebpf_module_t *em)
*
* We are not testing the return, because callocz does this and shutdown the software
* case it was not possible to allocate.
- *
- * @param apps is apps enabled?
*/
-static void ebpf_cachestat_allocate_global_vectors(int apps)
+static void ebpf_cachestat_allocate_global_vectors()
{
- if (apps) {
- cachestat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_cachestat_t *));
- ebpf_cachestat_aral_init();
- cachestat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_cachestat_pid_t));
- }
-
+ cachestat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_cachestat_pid_t));
cachestat_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
memset(cachestat_hash_values, 0, NETDATA_CACHESTAT_END * sizeof(netdata_idx_t));
@@ -1560,7 +1602,7 @@ void *ebpf_cachestat_thread(void *ptr)
goto endcachestat;
}
- ebpf_cachestat_allocate_global_vectors(em->apps_charts);
+ ebpf_cachestat_allocate_global_vectors();
int algorithms[NETDATA_CACHESTAT_END] = {
NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX
@@ -1574,13 +1616,16 @@ void *ebpf_cachestat_thread(void *ptr)
ebpf_update_stats(&plugin_statistics, em);
ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
ebpf_create_memory_charts(em);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_cachestat_pid)
- cachestat_disable_priority = ebpf_statistic_create_aral_chart(NETDATA_EBPF_CACHESTAT_ARAL_NAME, em);
-#endif
pthread_mutex_unlock(&lock);
+ ebpf_read_cachestat.thread = mallocz(sizeof(netdata_thread_t));
+ netdata_thread_create(ebpf_read_cachestat.thread,
+ ebpf_read_cachestat.name,
+ NETDATA_THREAD_OPTION_DEFAULT,
+ ebpf_read_cachestat_thread,
+ em);
+
cachestat_collector(em);
endcachestat:
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.h b/src/collectors/ebpf.plugin/ebpf_cachestat.h
index ba2b12833..4578fbe98 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.h
+++ b/src/collectors/ebpf.plugin/ebpf_cachestat.h
@@ -14,7 +14,6 @@
#define NETDATA_CACHESTAT_MISSES_CHART "cachestat_misses"
#define NETDATA_CACHESTAT_SUBMENU "page_cache"
-#define NETDATA_CACHESTAT_CGROUP_SUBMENU "page cache (eBPF)"
#define EBPF_CACHESTAT_DIMENSION_PAGE "pages/s"
#define EBPF_CACHESTAT_DIMENSION_HITS "hits/s"
@@ -29,13 +28,10 @@
#define NETDATA_CGROUP_CACHESTAT_HIT_FILES_CONTEXT "cgroup.cachestat_hits"
#define NETDATA_CGROUP_CACHESTAT_MISS_FILES_CONTEXT "cgroup.cachestat_misses"
-#define NETDATA_SYSTEMD_CACHESTAT_HIT_RATIO_CONTEXT "services.cachestat_ratio"
-#define NETDATA_SYSTEMD_CACHESTAT_MODIFIED_CACHE_CONTEXT "services.cachestat_dirties"
-#define NETDATA_SYSTEMD_CACHESTAT_HIT_FILE_CONTEXT "services.cachestat_hits"
-#define NETDATA_SYSTEMD_CACHESTAT_MISS_FILES_CONTEXT "services.cachestat_misses"
-
-// ARAL Name
-#define NETDATA_EBPF_CACHESTAT_ARAL_NAME "ebpf_cachestat"
+#define NETDATA_SYSTEMD_CACHESTAT_HIT_RATIO_CONTEXT "systemd.services.cachestat_ratio"
+#define NETDATA_SYSTEMD_CACHESTAT_MODIFIED_CACHE_CONTEXT "systemd.services.cachestat_dirties"
+#define NETDATA_SYSTEMD_CACHESTAT_HIT_FILE_CONTEXT "systemd.services.cachestat_hits"
+#define NETDATA_SYSTEMD_CACHESTAT_MISS_FILES_CONTEXT "systemd.services.cachestat_misses"
// variables
enum cachestat_counters {
@@ -69,6 +65,12 @@ enum cachestat_tables {
};
typedef struct netdata_publish_cachestat_pid {
+ uint64_t ct;
+ uint32_t tgid;
+ uint32_t uid;
+ uint32_t gid;
+ char name[TASK_COMM_LEN];
+
uint64_t add_to_page_cache_lru;
uint64_t mark_page_accessed;
uint64_t account_page_dirtied;
@@ -76,6 +78,8 @@ typedef struct netdata_publish_cachestat_pid {
} netdata_cachestat_pid_t;
typedef struct netdata_publish_cachestat {
+ uint64_t ct;
+
long long ratio;
long long dirty;
long long hit;
diff --git a/collectors/ebpf.plugin/ebpf_cgroup.c b/src/collectors/ebpf.plugin/ebpf_cgroup.c
index 1aadfbaf8..881a00ddf 100644
--- a/collectors/ebpf.plugin/ebpf_cgroup.c
+++ b/src/collectors/ebpf.plugin/ebpf_cgroup.c
@@ -327,17 +327,22 @@ void ebpf_parse_cgroup_shm_data()
* @param module chart module name, this is the eBPF thread.
* @param update_every value to overwrite the update frequency set by the server.
*/
-void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order,
- char *algorithm, char *context, char *module, int update_every)
+void ebpf_create_charts_on_systemd(ebpf_systemd_args_t *chart)
{
- ebpf_cgroup_target_t *w;
- ebpf_write_chart_cmd(NETDATA_SERVICE_FAMILY, id, "", title, units, family, charttype, context,
- order, update_every, module);
-
- for (w = ebpf_cgroup_pids; w; w = w->next) {
- if (unlikely(w->systemd) && unlikely(w->updated))
- fprintf(stdout, "DIMENSION %s '' %s 1 1\n", w->name, algorithm);
- }
+ ebpf_write_chart_cmd(NETDATA_SERVICE_FAMILY,
+ chart->id,
+ chart->suffix,
+ chart->title,
+ chart->units,
+ chart->family,
+ chart->charttype,
+ chart->context,
+ chart->order,
+ chart->update_every,
+ chart->module);
+ ebpf_create_chart_labels("service_name", chart->id, 0);
+ ebpf_commit_label();
+ fprintf(stdout, "DIMENSION %s '' %s 1 1\n", chart->dimension, chart->algorithm);
}
// --------------------------------------------------------------------------------------------------------------------
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/src/collectors/ebpf.plugin/ebpf_dcstat.c
index 4ff6c82ab..7fa9ca1f0 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.c
+++ b/src/collectors/ebpf.plugin/ebpf_dcstat.c
@@ -59,9 +59,16 @@ netdata_ebpf_targets_t dc_targets[] = { {.name = "lookup_fast", .mode = EBPF_LOA
{.name = "d_lookup", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
-#ifdef NETDATA_DEV_MODE
-int dcstat_disable_priority;
-#endif
+struct netdata_static_thread ebpf_read_dcstat = {
+ .name = "EBPF_READ_DCSTAT",
+ .config_section = NULL,
+ .config_name = NULL,
+ .env_name = NULL,
+ .enabled = 1,
+ .thread = NULL,
+ .init_routine = NULL,
+ .start_routine = NULL
+};
#ifdef LIBBPF_MAJOR_VERSION
/**
@@ -75,7 +82,6 @@ static inline void ebpf_dc_disable_probes(struct dc_bpf *obj)
{
bpf_program__set_autoload(obj->progs.netdata_lookup_fast_kprobe, false);
bpf_program__set_autoload(obj->progs.netdata_d_lookup_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_dcstat_release_task_kprobe, false);
}
/*
@@ -89,7 +95,6 @@ static inline void ebpf_dc_disable_trampoline(struct dc_bpf *obj)
{
bpf_program__set_autoload(obj->progs.netdata_lookup_fast_fentry, false);
bpf_program__set_autoload(obj->progs.netdata_d_lookup_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_dcstat_release_task_fentry, false);
}
/**
@@ -106,9 +111,6 @@ static void ebpf_dc_set_trampoline_target(struct dc_bpf *obj)
bpf_program__set_attach_target(obj->progs.netdata_d_lookup_fexit, 0,
dc_targets[NETDATA_DC_TARGET_D_LOOKUP].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_dcstat_release_task_fentry, 0,
- EBPF_COMMON_FNCT_CLEAN_UP);
}
/**
@@ -125,7 +127,7 @@ static int ebpf_dc_attach_probes(struct dc_bpf *obj)
obj->links.netdata_d_lookup_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_d_lookup_kretprobe,
true,
dc_targets[NETDATA_DC_TARGET_D_LOOKUP].name);
- int ret = libbpf_get_error(obj->links.netdata_d_lookup_kretprobe);
+ long ret = libbpf_get_error(obj->links.netdata_d_lookup_kretprobe);
if (ret)
return -1;
@@ -140,13 +142,6 @@ static int ebpf_dc_attach_probes(struct dc_bpf *obj)
if (ret)
return -1;
- obj->links.netdata_dcstat_release_task_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_dcstat_release_task_kprobe,
- false,
- EBPF_COMMON_FNCT_CLEAN_UP);
- ret = libbpf_get_error(obj->links.netdata_dcstat_release_task_kprobe);
- if (ret)
- return -1;
-
return 0;
}
@@ -206,19 +201,6 @@ netdata_ebpf_program_loaded_t ebpf_dc_update_load(ebpf_module_t *em)
}
/**
- * Disable Release Task
- *
- * Disable release task when apps is not enabled.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_dc_disable_release_task(struct dc_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_dcstat_release_task_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_dcstat_release_task_fentry, false);
-}
-
-/**
* Load and attach
*
* Load and attach the eBPF code in kernel.
@@ -241,9 +223,6 @@ static inline int ebpf_dc_load_and_attach(struct dc_bpf *obj, ebpf_module_t *em)
ebpf_dc_adjust_map(obj, em);
- if (!em->apps_charts && !em->cgroup_charts)
- ebpf_dc_disable_release_task(obj);
-
int ret = dc_bpf__load(obj);
if (ret) {
return ret;
@@ -298,11 +277,11 @@ static void ebpf_obsolete_specific_dc_charts(char *type, int update_every);
*
* @param em a pointer to `struct ebpf_module`
*/
-static void ebpf_obsolete_dc_services(ebpf_module_t *em)
+static void ebpf_obsolete_dc_services(ebpf_module_t *em, char *id)
{
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_DC_HIT_CHART,
- "",
"Percentage of files inside directory cache",
EBPF_COMMON_DIMENSION_PERCENTAGE,
NETDATA_DIRECTORY_CACHE_SUBMENU,
@@ -312,8 +291,8 @@ static void ebpf_obsolete_dc_services(ebpf_module_t *em)
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_DC_REFERENCE_CHART,
- "",
"Count file access",
EBPF_COMMON_DIMENSION_FILES,
NETDATA_DIRECTORY_CACHE_SUBMENU,
@@ -323,8 +302,8 @@ static void ebpf_obsolete_dc_services(ebpf_module_t *em)
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_DC_REQUEST_NOT_CACHE_CHART,
- "",
"Files not present inside directory cache",
EBPF_COMMON_DIMENSION_FILES,
NETDATA_DIRECTORY_CACHE_SUBMENU,
@@ -334,14 +313,14 @@ static void ebpf_obsolete_dc_services(ebpf_module_t *em)
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_DC_REQUEST_NOT_FOUND_CHART,
- "",
"Files not found",
EBPF_COMMON_DIMENSION_FILES,
NETDATA_DIRECTORY_CACHE_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_SYSTEMD_DC_NOT_FOUND_CONTEXT,
- 21202,
+ 21203,
em->update_every);
}
@@ -355,12 +334,13 @@ static void ebpf_obsolete_dc_services(ebpf_module_t *em)
static inline void ebpf_obsolete_dc_cgroup_charts(ebpf_module_t *em) {
pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_obsolete_dc_services(em);
-
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
+ if (ect->systemd) {
+ ebpf_obsolete_dc_services(em, ect->name);
+
continue;
+ }
ebpf_obsolete_specific_dc_charts(ect->name, em->update_every);
}
@@ -378,6 +358,7 @@ void ebpf_obsolete_dc_apps_charts(struct ebpf_module *em)
{
struct ebpf_target *w;
int update_every = em->update_every;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = apps_groups_root_target; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_DCSTAT_IDX))))
continue;
@@ -428,6 +409,7 @@ void ebpf_obsolete_dc_apps_charts(struct ebpf_module *em)
w->charts_created &= ~(1<<EBPF_MODULE_DCSTAT_IDX);
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -473,6 +455,9 @@ static void ebpf_dcstat_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (ebpf_read_dcstat.thread)
+ netdata_thread_cancel(*ebpf_read_dcstat.thread);
+
if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
pthread_mutex_lock(&lock);
if (em->cgroup_charts) {
@@ -486,11 +471,6 @@ static void ebpf_dcstat_exit(void *ptr)
ebpf_obsolete_dc_global(em);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_dcstat_pid)
- ebpf_statistic_obsolete_aral_chart(em, dcstat_disable_priority);
-#endif
-
fflush(stdout);
pthread_mutex_unlock(&lock);
}
@@ -523,6 +503,173 @@ static void ebpf_dcstat_exit(void *ptr)
*****************************************************************/
/**
+ * Apps Accumulator
+ *
+ * Sum all values read from kernel and store in the first address.
+ *
+ * @param out the vector with read values.
+ * @param maps_per_core do I need to read all cores?
+ */
+static void ebpf_dcstat_apps_accumulator(netdata_dcstat_pid_t *out, int maps_per_core)
+{
+ int i, end = (maps_per_core) ? ebpf_nprocs : 1;
+ netdata_dcstat_pid_t *total = &out[0];
+ uint64_t ct = total->ct;
+ for (i = 1; i < end; i++) {
+ netdata_dcstat_pid_t *w = &out[i];
+ total->cache_access += w->cache_access;
+ total->file_system += w->file_system;
+ total->not_found += w->not_found;
+
+ if (w->ct > ct)
+ ct = w->ct;
+ }
+ total->ct = ct;
+}
+
+/**
+ * Read Directory Cache APPS table
+ *
+ * Read the apps table and store data inside the structure.
+ *
+ * @param maps_per_core do I need to read all cores?
+ */
+static void ebpf_read_dc_apps_table(int maps_per_core, int max_period)
+{
+ netdata_dcstat_pid_t *cv = dcstat_vector;
+ int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd;
+ size_t length = sizeof(netdata_dcstat_pid_t);
+ if (maps_per_core)
+ length *= ebpf_nprocs;
+
+ uint32_t key = 0, next_key = 0;
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
+ if (bpf_map_lookup_elem(fd, &key, cv)) {
+ goto end_dc_loop;
+ }
+
+ ebpf_dcstat_apps_accumulator(cv, maps_per_core);
+
+ ebpf_pid_stat_t *pid_stat = ebpf_get_pid_entry(key, cv->tgid);
+ if (pid_stat) {
+ netdata_publish_dcstat_t *publish = &pid_stat->dc;
+ if (!publish->ct || publish->ct != cv->ct) {
+ memcpy(&publish->curr, &cv[0], sizeof(netdata_dcstat_pid_t));
+ pid_stat->not_updated = 0;
+ } else if (++pid_stat->not_updated >= max_period) {
+ bpf_map_delete_elem(fd, &key);
+ pid_stat->not_updated = 0;
+ }
+ }
+
+end_dc_loop:
+ // We are cleaning to avoid passing data read from one process to other.
+ memset(cv, 0, length);
+ key = next_key;
+ }
+}
+
+/**
+ * Cachestat sum PIDs
+ *
+ * Sum values for all PIDs associated to a group
+ *
+ * @param publish output structure.
+ * @param root structure with listed IPs
+ */
+void ebpf_dcstat_sum_pids(netdata_publish_dcstat_t *publish, struct ebpf_pid_on_target *root)
+{
+ memset(&publish->curr, 0, sizeof(netdata_dcstat_pid_t));
+ netdata_dcstat_pid_t *dst = &publish->curr;
+ while (root) {
+ int32_t pid = root->pid;
+ ebpf_pid_stat_t *pid_stat = ebpf_get_pid_entry(pid, 0);
+ if (pid_stat) {
+ netdata_publish_dcstat_t *w = &pid_stat->dc;
+ netdata_dcstat_pid_t *src = &w->curr;
+ dst->cache_access += src->cache_access;
+ dst->file_system += src->file_system;
+ dst->not_found += src->not_found;
+ }
+
+ root = root->next;
+ }
+}
+
+/**
+ * Resume apps data
+ */
+void ebpf_dc_resume_apps_data()
+{
+ struct ebpf_target *w;
+
+ for (w = apps_groups_root_target; w; w = w->next) {
+ if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_DCSTAT_IDX))))
+ continue;
+
+ ebpf_dcstat_sum_pids(&w->dcstat, w->root_pid);
+
+ uint64_t cache = w->dcstat.curr.cache_access;
+ uint64_t not_found = w->dcstat.curr.not_found;
+
+ dcstat_update_publish(&w->dcstat, cache, not_found);
+ }
+}
+
+/**
+ * DCstat thread
+ *
+ * Thread used to generate dcstat charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_read_dcstat_thread(void *ptr)
+{
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+
+ int maps_per_core = em->maps_per_core;
+ int update_every = em->update_every;
+
+ int counter = update_every - 1;
+
+ uint32_t lifetime = em->lifetime;
+ uint32_t running_time = 0;
+ usec_t period = update_every * USEC_PER_SEC;
+ int max_period = update_every * EBPF_CLEANUP_FACTOR;
+ while (!ebpf_plugin_exit && running_time < lifetime) {
+ (void)heartbeat_next(&hb, period);
+ if (ebpf_plugin_exit || ++counter != update_every)
+ continue;
+
+ netdata_thread_disable_cancelability();
+
+ pthread_mutex_lock(&collect_data_mutex);
+ ebpf_read_dc_apps_table(maps_per_core, max_period);
+ ebpf_dc_resume_apps_data();
+ pthread_mutex_unlock(&collect_data_mutex);
+
+ counter = 0;
+
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ if (running_time && !em->running_time)
+ running_time = update_every;
+ else
+ running_time += update_every;
+
+ em->running_time = running_time;
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
+ netdata_thread_enable_cancelability();
+ }
+
+ return NULL;
+}
+
+/**
* Create apps charts
*
* Call ebpf_create_chart to create the charts on apps submenu.
@@ -611,106 +758,14 @@ void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr)
*****************************************************************/
/**
- * Apps Accumulator
- *
- * Sum all values read from kernel and store in the first address.
- *
- * @param out the vector with read values.
- * @param maps_per_core do I need to read all cores?
- */
-static void dcstat_apps_accumulator(netdata_dcstat_pid_t *out, int maps_per_core)
-{
- int i, end = (maps_per_core) ? ebpf_nprocs : 1;
- netdata_dcstat_pid_t *total = &out[0];
- for (i = 1; i < end; i++) {
- netdata_dcstat_pid_t *w = &out[i];
- total->cache_access += w->cache_access;
- total->file_system += w->file_system;
- total->not_found += w->not_found;
- }
-}
-
-/**
- * Save PID values
- *
- * Save the current values inside the structure
- *
- * @param out vector used to plot charts
- * @param publish vector with values read from hash tables.
- */
-static inline void dcstat_save_pid_values(netdata_publish_dcstat_t *out, netdata_dcstat_pid_t *publish)
-{
- memcpy(&out->curr, &publish[0], sizeof(netdata_dcstat_pid_t));
-}
-
-/**
- * Fill PID
- *
- * Fill PID structures
- *
- * @param current_pid pid that we are collecting data
- * @param out values read from hash tables;
- */
-static void dcstat_fill_pid(uint32_t current_pid, netdata_dcstat_pid_t *publish)
-{
- netdata_publish_dcstat_t *curr = dcstat_pid[current_pid];
- if (!curr) {
- curr = ebpf_publish_dcstat_get();
- dcstat_pid[current_pid] = curr;
- }
-
- dcstat_save_pid_values(curr, publish);
-}
-
-/**
- * Read Directory Cache APPS table
- *
- * Read the apps table and store data inside the structure.
- *
- * @param maps_per_core do I need to read all cores?
- */
-static void read_dc_apps_table(int maps_per_core)
-{
- netdata_dcstat_pid_t *cv = dcstat_vector;
- uint32_t key;
- struct ebpf_pid_stat *pids = ebpf_root_of_pids;
- int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd;
- size_t length = sizeof(netdata_dcstat_pid_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- while (pids) {
- key = pids->pid;
-
- if (bpf_map_lookup_elem(fd, &key, cv)) {
- pids = pids->next;
- continue;
- }
-
- dcstat_apps_accumulator(cv, maps_per_core);
-
- dcstat_fill_pid(key, cv);
-
- // We are cleaning to avoid passing data read from one process to other.
- memset(cv, 0, length);
-
- pids = pids->next;
- }
-}
-
-/**
* Update cgroup
*
* Update cgroup data based in collected PID.
*
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_dc_cgroup(int maps_per_core)
+static void ebpf_update_dc_cgroup()
{
- netdata_dcstat_pid_t *cv = dcstat_vector;
- int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd;
- size_t length = sizeof(netdata_dcstat_pid_t)*ebpf_nprocs;
-
ebpf_cgroup_target_t *ect;
pthread_mutex_lock(&mutex_cgroup_shm);
for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
@@ -718,19 +773,11 @@ static void ebpf_update_dc_cgroup(int maps_per_core)
for (pids = ect->pids; pids; pids = pids->next) {
int pid = pids->pid;
netdata_dcstat_pid_t *out = &pids->dc;
- if (likely(dcstat_pid) && dcstat_pid[pid]) {
- netdata_publish_dcstat_t *in = dcstat_pid[pid];
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
+ if (local_pid) {
+ netdata_publish_dcstat_t *in = &local_pid->dc;
memcpy(out, &in->curr, sizeof(netdata_dcstat_pid_t));
- } else {
- memset(cv, 0, length);
- if (bpf_map_lookup_elem(fd, &pid, cv)) {
- continue;
- }
-
- dcstat_apps_accumulator(cv, maps_per_core);
-
- memcpy(out, cv, sizeof(netdata_dcstat_pid_t));
}
}
}
@@ -763,32 +810,6 @@ static void ebpf_dc_read_global_tables(netdata_idx_t *stats, int maps_per_core)
}
/**
- * Cachestat sum PIDs
- *
- * Sum values for all PIDs associated to a group
- *
- * @param publish output structure.
- * @param root structure with listed IPs
- */
-void ebpf_dcstat_sum_pids(netdata_publish_dcstat_t *publish, struct ebpf_pid_on_target *root)
-{
- memset(&publish->curr, 0, sizeof(netdata_dcstat_pid_t));
- netdata_dcstat_pid_t *dst = &publish->curr;
- while (root) {
- int32_t pid = root->pid;
- netdata_publish_dcstat_t *w = dcstat_pid[pid];
- if (w) {
- netdata_dcstat_pid_t *src = &w->curr;
- dst->cache_access += src->cache_access;
- dst->file_system += src->file_system;
- dst->not_found += src->not_found;
- }
-
- root = root->next;
- }
-}
-
-/**
* Send data to Netdata calling auxiliary functions.
*
* @param root the target list.
@@ -798,17 +819,11 @@ void ebpf_dcache_send_apps_data(struct ebpf_target *root)
struct ebpf_target *w;
collected_number value;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = root; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_DCSTAT_IDX))))
continue;
- ebpf_dcstat_sum_pids(&w->dcstat, w->root_pid);
-
- uint64_t cache = w->dcstat.curr.cache_access;
- uint64_t not_found = w->dcstat.curr.not_found;
-
- dcstat_update_publish(&w->dcstat, cache, not_found);
-
value = (collected_number) w->dcstat.ratio;
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_dc_hit");
write_chart_dimension("ratio", value);
@@ -845,6 +860,7 @@ void ebpf_dcache_send_apps_data(struct ebpf_target *root)
ebpf_write_end_chart();
w->dcstat.prev.not_found = w->dcstat.curr.not_found;
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -1024,45 +1040,82 @@ void ebpf_dc_calc_chart_values()
**/
static void ebpf_create_systemd_dc_charts(int update_every)
{
- ebpf_create_charts_on_systemd(NETDATA_DC_HIT_CHART,
- "Percentage of files inside directory cache",
- EBPF_COMMON_DIMENSION_PERCENTAGE,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21200,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_DC_HIT_RATIO_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT,
- update_every);
+ static ebpf_systemd_args_t data_dc_hit_ratio = {
+ .title = "Percentage of files inside directory cache",
+ .units = EBPF_COMMON_DIMENSION_PERCENTAGE,
+ .family = NETDATA_DIRECTORY_CACHE_SUBMENU,
+ .charttype = NETDATA_EBPF_CHART_TYPE_LINE,
+ .order = 21200,
+ .algorithm = EBPF_CHART_ALGORITHM_ABSOLUTE,
+ .context = NETDATA_SYSTEMD_DC_HIT_RATIO_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_DCSTAT,
+ .update_every = 0,
+ .suffix = NETDATA_DC_HIT_CHART,
+ .dimension = "percentage"
+ };
- ebpf_create_charts_on_systemd(NETDATA_DC_REFERENCE_CHART,
- "Count file access",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21201,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_DC_REFERENCE_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT,
- update_every);
+ static ebpf_systemd_args_t data_dc_references = {
+ .title = "Count file access",
+ .units = EBPF_COMMON_DIMENSION_FILES,
+ .family = NETDATA_DIRECTORY_CACHE_SUBMENU,
+ .charttype = NETDATA_EBPF_CHART_TYPE_LINE,
+ .order = 21201,
+ .algorithm = EBPF_CHART_ALGORITHM_ABSOLUTE,
+ .context = NETDATA_SYSTEMD_DC_REFERENCE_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_DCSTAT,
+ .update_every = 0,
+ .suffix = NETDATA_DC_REFERENCE_CHART,
+ .dimension = "files"
+ };
- ebpf_create_charts_on_systemd(NETDATA_DC_REQUEST_NOT_CACHE_CHART,
- "Files not present inside directory cache",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21202,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_DC_NOT_CACHE_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT,
- update_every);
+ static ebpf_systemd_args_t data_dc_not_cache = {
+ .title = "Files not present inside directory cache",
+ .units = EBPF_COMMON_DIMENSION_FILES,
+ .family = NETDATA_DIRECTORY_CACHE_SUBMENU,
+ .charttype = NETDATA_EBPF_CHART_TYPE_LINE,
+ .order = 21202,
+ .algorithm = EBPF_CHART_ALGORITHM_ABSOLUTE,
+ .context = NETDATA_SYSTEMD_DC_NOT_CACHE_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_DCSTAT,
+ .update_every = 0,
+ .suffix = NETDATA_DC_REQUEST_NOT_CACHE_CHART,
+ .dimension = "files"
+ };
- ebpf_create_charts_on_systemd(NETDATA_DC_REQUEST_NOT_FOUND_CHART,
- "Files not found",
- EBPF_COMMON_DIMENSION_FILES,
- NETDATA_DIRECTORY_CACHE_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21202,
- ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- NETDATA_SYSTEMD_DC_NOT_FOUND_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT,
- update_every);
+ static ebpf_systemd_args_t data_dc_not_found = {
+ .title = "Files not found",
+ .units = EBPF_COMMON_DIMENSION_FILES,
+ .family = NETDATA_DIRECTORY_CACHE_SUBMENU,
+ .charttype = NETDATA_EBPF_CHART_TYPE_LINE,
+ .order = 21203,
+ .algorithm = EBPF_CHART_ALGORITHM_ABSOLUTE,
+ .context = NETDATA_SYSTEMD_DC_NOT_CACHE_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_DCSTAT,
+ .update_every = 0,
+ .suffix = NETDATA_DC_REQUEST_NOT_FOUND_CHART,
+ .dimension = "files"
+ };
+
+ if (!data_dc_not_cache.update_every)
+ data_dc_hit_ratio.update_every = data_dc_not_cache.update_every =
+ data_dc_not_found.update_every = data_dc_references.update_every = update_every;
+
+ ebpf_cgroup_target_t *w;
+ for (w = ebpf_cgroup_pids; w; w = w->next) {
+ if (unlikely(!w->systemd || w->flags & NETDATA_EBPF_SERVICES_HAS_DC_CHART))
+ continue;
+
+ data_dc_hit_ratio.id = data_dc_not_cache.id = data_dc_not_found.id = data_dc_references.id = w->name;
+ ebpf_create_charts_on_systemd(&data_dc_hit_ratio);
+
+ ebpf_create_charts_on_systemd(&data_dc_not_found);
+
+ ebpf_create_charts_on_systemd(&data_dc_not_cache);
+
+ ebpf_create_charts_on_systemd(&data_dc_references);
+
+ w->flags |= NETDATA_EBPF_SERVICES_HAS_DC_CHART;
+ }
}
/**
@@ -1072,48 +1125,37 @@ static void ebpf_create_systemd_dc_charts(int update_every)
*/
static void ebpf_send_systemd_dc_charts()
{
- collected_number value;
ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_HIT_CHART, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long) ect->publish_dc.ratio);
+ collected_number value;
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ if (unlikely(!(ect->flags & NETDATA_EBPF_SERVICES_HAS_DC_CHART)) ) {
+ continue;
}
- }
- ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REFERENCE_CHART, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long) ect->publish_dc.cache_access);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_DC_HIT_CHART);
+ write_chart_dimension("percentage", (long long) ect->publish_dc.ratio);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REQUEST_NOT_CACHE_CHART, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- value = (collected_number) (!ect->publish_dc.cache_access) ? 0 :
- (long long )ect->publish_dc.curr.file_system - (long long)ect->publish_dc.prev.file_system;
- ect->publish_dc.prev.file_system = ect->publish_dc.curr.file_system;
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_DC_REFERENCE_CHART);
+ write_chart_dimension("files", (long long) ect->publish_dc.cache_access);
+ ebpf_write_end_chart();
- write_chart_dimension(ect->name, (long long) value);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_DC_REQUEST_NOT_CACHE_CHART);
+ value = (collected_number) (!ect->publish_dc.cache_access) ? 0 :
+ (long long )ect->publish_dc.curr.file_system - (long long)ect->publish_dc.prev.file_system;
+ ect->publish_dc.prev.file_system = ect->publish_dc.curr.file_system;
+ write_chart_dimension("files", (long long) value);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REQUEST_NOT_FOUND_CHART, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- value = (collected_number) (!ect->publish_dc.cache_access) ? 0 :
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_DC_REQUEST_NOT_FOUND_CHART);
+ value = (collected_number) (!ect->publish_dc.cache_access) ? 0 :
(long long)ect->publish_dc.curr.not_found - (long long)ect->publish_dc.prev.not_found;
- ect->publish_dc.prev.not_found = ect->publish_dc.curr.not_found;
+ ect->publish_dc.prev.not_found = ect->publish_dc.curr.not_found;
- write_chart_dimension(ect->name, (long long) value);
- }
+ write_chart_dimension("files", (long long) value);
+ ebpf_write_end_chart();
}
- ebpf_write_end_chart();
}
/**
@@ -1159,15 +1201,11 @@ static void ebpf_send_specific_dc_data(char *type, netdata_publish_dcstat_t *pdc
*/
void ebpf_dc_send_cgroup_data(int update_every)
{
- if (!ebpf_cgroup_pids)
- return;
-
pthread_mutex_lock(&mutex_cgroup_shm);
ebpf_cgroup_target_t *ect;
ebpf_dc_calc_chart_values();
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
- if (has_systemd) {
+ if (shm_ebpf_cgroup.header->systemd_enabled) {
if (send_cgroup_chart) {
ebpf_create_systemd_dc_charts(update_every);
}
@@ -1223,12 +1261,9 @@ static void dcstat_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
ebpf_dc_read_global_tables(stats, maps_per_core);
- pthread_mutex_lock(&collect_data_mutex);
- if (apps)
- read_dc_apps_table(maps_per_core);
if (cgroups)
- ebpf_update_dc_cgroup(maps_per_core);
+ ebpf_update_dc_cgroup();
pthread_mutex_lock(&lock);
@@ -1237,16 +1272,10 @@ static void dcstat_collector(ebpf_module_t *em)
if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
ebpf_dcache_send_apps_data(apps_groups_root_target);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_dcstat_pid)
- ebpf_send_data_aral_chart(ebpf_aral_dcstat_pid, em);
-#endif
-
if (cgroups)
ebpf_dc_send_cgroup_data(update_every);
pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
pthread_mutex_lock(&ebpf_exit_cleanup);
if (running_time && !em->running_time)
@@ -1301,17 +1330,10 @@ static void ebpf_create_dc_global_charts(int update_every)
*
* We are not testing the return, because callocz does this and shutdown the software
* case it was not possible to allocate.
- *
- * @param apps is apps enabled?
*/
-static void ebpf_dcstat_allocate_global_vectors(int apps)
+static void ebpf_dcstat_allocate_global_vectors()
{
- if (apps) {
- ebpf_dcstat_aral_init();
- dcstat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_dcstat_t *));
- dcstat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_dcstat_pid_t));
- }
-
+ dcstat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_dcstat_pid_t));
dcstat_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
memset(dcstat_counter_aggregated_data, 0, NETDATA_DCSTAT_IDX_END * sizeof(netdata_syscall_stat_t));
@@ -1388,7 +1410,7 @@ void *ebpf_dcstat_thread(void *ptr)
goto enddcstat;
}
- ebpf_dcstat_allocate_global_vectors(em->apps_charts);
+ ebpf_dcstat_allocate_global_vectors();
int algorithms[NETDATA_DCSTAT_IDX_END] = {
NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX,
@@ -1403,13 +1425,16 @@ void *ebpf_dcstat_thread(void *ptr)
ebpf_create_dc_global_charts(em->update_every);
ebpf_update_stats(&plugin_statistics, em);
ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_dcstat_pid)
- dcstat_disable_priority = ebpf_statistic_create_aral_chart(NETDATA_EBPF_DCSTAT_ARAL_NAME, em);
-#endif
pthread_mutex_unlock(&lock);
+ ebpf_read_dcstat.thread = mallocz(sizeof(netdata_thread_t));
+ netdata_thread_create(ebpf_read_dcstat.thread,
+ ebpf_read_dcstat.name,
+ NETDATA_THREAD_OPTION_DEFAULT,
+ ebpf_read_dcstat_thread,
+ em);
+
dcstat_collector(em);
enddcstat:
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.h b/src/collectors/ebpf.plugin/ebpf_dcstat.h
index 4d6aff12e..68f6d6131 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.h
+++ b/src/collectors/ebpf.plugin/ebpf_dcstat.h
@@ -24,10 +24,10 @@
#define NETDATA_CGROUP_DC_NOT_CACHE_CONTEXT "cgroup.dc_not_cache"
#define NETDATA_CGROUP_DC_NOT_FOUND_CONTEXT "cgroup.dc_not_found"
-#define NETDATA_SYSTEMD_DC_HIT_RATIO_CONTEXT "services.dc_ratio"
-#define NETDATA_SYSTEMD_DC_REFERENCE_CONTEXT "services.dc_reference"
-#define NETDATA_SYSTEMD_DC_NOT_CACHE_CONTEXT "services.dc_not_cache"
-#define NETDATA_SYSTEMD_DC_NOT_FOUND_CONTEXT "services.dc_not_found"
+#define NETDATA_SYSTEMD_DC_HIT_RATIO_CONTEXT "systemd.services.dc_ratio"
+#define NETDATA_SYSTEMD_DC_REFERENCE_CONTEXT "systemd.services.dc_reference"
+#define NETDATA_SYSTEMD_DC_NOT_CACHE_CONTEXT "systemd.services.dc_not_cache"
+#define NETDATA_SYSTEMD_DC_NOT_FOUND_CONTEXT "systemd.services.dc_not_found"
// ARAL name
#define NETDATA_EBPF_DCSTAT_ARAL_NAME "ebpf_dcstat"
@@ -64,12 +64,20 @@ enum directory_cache_targets {
};
typedef struct netdata_publish_dcstat_pid {
+ uint64_t ct;
+ uint32_t tgid;
+ uint32_t uid;
+ uint32_t gid;
+ char name[TASK_COMM_LEN];
+
uint64_t cache_access;
uint64_t file_system;
uint64_t not_found;
} netdata_dcstat_pid_t;
typedef struct netdata_publish_dcstat {
+ uint64_t ct;
+
long long ratio;
long long cache_access;
diff --git a/collectors/ebpf.plugin/ebpf_disk.c b/src/collectors/ebpf.plugin/ebpf_disk.c
index 466c2e3bb..466c2e3bb 100644
--- a/collectors/ebpf.plugin/ebpf_disk.c
+++ b/src/collectors/ebpf.plugin/ebpf_disk.c
diff --git a/collectors/ebpf.plugin/ebpf_disk.h b/src/collectors/ebpf.plugin/ebpf_disk.h
index 487ed376d..487ed376d 100644
--- a/collectors/ebpf.plugin/ebpf_disk.h
+++ b/src/collectors/ebpf.plugin/ebpf_disk.h
diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/src/collectors/ebpf.plugin/ebpf_fd.c
index 3c8f30d3e..e1dc5b356 100644
--- a/collectors/ebpf.plugin/ebpf_fd.c
+++ b/src/collectors/ebpf.plugin/ebpf_fd.c
@@ -57,9 +57,16 @@ netdata_ebpf_targets_t fd_targets[] = { {.name = "open", .mode = EBPF_LOAD_TRAMP
{.name = "close", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
-#ifdef NETDATA_DEV_MODE
-int fd_disable_priority;
-#endif
+struct netdata_static_thread ebpf_read_fd = {
+ .name = "EBPF_READ_FD",
+ .config_section = NULL,
+ .config_name = NULL,
+ .env_name = NULL,
+ .enabled = 1,
+ .thread = NULL,
+ .init_routine = NULL,
+ .start_routine = NULL
+};
#ifdef LIBBPF_MAJOR_VERSION
/**
@@ -73,7 +80,6 @@ static inline void ebpf_fd_disable_probes(struct fd_bpf *obj)
{
bpf_program__set_autoload(obj->progs.netdata_sys_open_kprobe, false);
bpf_program__set_autoload(obj->progs.netdata_sys_open_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_release_task_fd_kprobe, false);
if (!strcmp(fd_targets[NETDATA_FD_SYSCALL_CLOSE].name, close_targets[NETDATA_FD_CLOSE_FD])) {
bpf_program__set_autoload(obj->progs.netdata___close_fd_kretprobe, false);
bpf_program__set_autoload(obj->progs.netdata___close_fd_kprobe, false);
@@ -118,7 +124,6 @@ static inline void ebpf_disable_trampoline(struct fd_bpf *obj)
bpf_program__set_autoload(obj->progs.netdata_close_fd_fexit, false);
bpf_program__set_autoload(obj->progs.netdata___close_fd_fentry, false);
bpf_program__set_autoload(obj->progs.netdata___close_fd_fexit, false);
- bpf_program__set_autoload(obj->progs.netdata_release_task_fd_fentry, false);
}
/*
@@ -150,7 +155,6 @@ static void ebpf_set_trampoline_target(struct fd_bpf *obj)
{
bpf_program__set_attach_target(obj->progs.netdata_sys_open_fentry, 0, fd_targets[NETDATA_FD_SYSCALL_OPEN].name);
bpf_program__set_attach_target(obj->progs.netdata_sys_open_fexit, 0, fd_targets[NETDATA_FD_SYSCALL_OPEN].name);
- bpf_program__set_attach_target(obj->progs.netdata_release_task_fd_fentry, 0, EBPF_COMMON_FNCT_CLEAN_UP);
if (!strcmp(fd_targets[NETDATA_FD_SYSCALL_CLOSE].name, close_targets[NETDATA_FD_CLOSE_FD])) {
bpf_program__set_attach_target(
@@ -177,7 +181,7 @@ static int ebpf_fd_attach_probe(struct fd_bpf *obj)
{
obj->links.netdata_sys_open_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_sys_open_kprobe, false,
fd_targets[NETDATA_FD_SYSCALL_OPEN].name);
- int ret = libbpf_get_error(obj->links.netdata_sys_open_kprobe);
+ long ret = libbpf_get_error(obj->links.netdata_sys_open_kprobe);
if (ret)
return -1;
@@ -187,13 +191,6 @@ static int ebpf_fd_attach_probe(struct fd_bpf *obj)
if (ret)
return -1;
- obj->links.netdata_release_task_fd_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_release_task_fd_kprobe,
- false,
- EBPF_COMMON_FNCT_CLEAN_UP);
- ret = libbpf_get_error(obj->links.netdata_release_task_fd_kprobe);
- if (ret)
- return -1;
-
if (!strcmp(fd_targets[NETDATA_FD_SYSCALL_CLOSE].name, close_targets[NETDATA_FD_CLOSE_FD])) {
obj->links.netdata_close_fd_kretprobe = bpf_program__attach_kprobe(obj->progs.netdata_close_fd_kretprobe, true,
fd_targets[NETDATA_FD_SYSCALL_CLOSE].name);
@@ -302,19 +299,6 @@ static void ebpf_fd_adjust_map(struct fd_bpf *obj, ebpf_module_t *em)
}
/**
- * Disable Release Task
- *
- * Disable release task when apps is not enabled.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_fd_disable_release_task(struct fd_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_release_task_fd_kprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_release_task_fd_fentry, false);
-}
-
-/**
* Load and attach
*
* Load and attach the eBPF code in kernel.
@@ -339,8 +323,6 @@ static inline int ebpf_fd_load_and_attach(struct fd_bpf *obj, ebpf_module_t *em)
ebpf_disable_specific_trampoline(obj);
ebpf_set_trampoline_target(obj);
- // TODO: Remove this in next PR, because this specific trampoline has an error.
- bpf_program__set_autoload(obj->progs.netdata_release_task_fd_fentry, false);
} else {
ebpf_disable_trampoline(obj);
ebpf_disable_specific_probes(obj);
@@ -348,9 +330,6 @@ static inline int ebpf_fd_load_and_attach(struct fd_bpf *obj, ebpf_module_t *em)
ebpf_fd_adjust_map(obj, em);
- if (!em->apps_charts && !em->cgroup_charts)
- ebpf_fd_disable_release_task(obj);
-
int ret = fd_bpf__load(obj);
if (ret) {
return ret;
@@ -382,14 +361,14 @@ static void ebpf_obsolete_specific_fd_charts(char *type, ebpf_module_t *em);
*
* @param em a pointer to `struct ebpf_module`
*/
-static void ebpf_obsolete_fd_services(ebpf_module_t *em)
+static void ebpf_obsolete_fd_services(ebpf_module_t *em, char *id)
{
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_FILE_OPEN,
- "",
"Number of open files",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_CGROUP_GROUP,
+ NETDATA_APPS_FILE_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NETDATA_CGROUP_FD_OPEN_CONTEXT,
20270,
@@ -397,11 +376,11 @@ static void ebpf_obsolete_fd_services(ebpf_module_t *em)
if (em->mode < MODE_ENTRY) {
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR,
- "",
"Fails to open files",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_CGROUP_GROUP,
+ NETDATA_APPS_FILE_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NETDATA_CGROUP_FD_OPEN_ERR_CONTEXT,
20271,
@@ -409,11 +388,11 @@ static void ebpf_obsolete_fd_services(ebpf_module_t *em)
}
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_FILE_CLOSED,
- "",
"Files closed",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_CGROUP_GROUP,
+ NETDATA_APPS_FILE_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NETDATA_CGROUP_FD_CLOSE_CONTEXT,
20272,
@@ -421,11 +400,11 @@ static void ebpf_obsolete_fd_services(ebpf_module_t *em)
if (em->mode < MODE_ENTRY) {
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR,
- "",
"Fails to close files",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_CGROUP_GROUP,
+ NETDATA_APPS_FILE_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NETDATA_CGROUP_FD_CLOSE_ERR_CONTEXT,
20273,
@@ -443,12 +422,13 @@ static void ebpf_obsolete_fd_services(ebpf_module_t *em)
static inline void ebpf_obsolete_fd_cgroup_charts(ebpf_module_t *em) {
pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_obsolete_fd_services(em);
-
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
+ if (ect->systemd) {
+ ebpf_obsolete_fd_services(em, ect->name);
+
continue;
+ }
ebpf_obsolete_specific_fd_charts(ect->name, em);
}
@@ -466,6 +446,7 @@ void ebpf_obsolete_fd_apps_charts(struct ebpf_module *em)
{
struct ebpf_target *w;
int update_every = em->update_every;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = apps_groups_root_target; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_FD_IDX))))
continue;
@@ -519,6 +500,7 @@ void ebpf_obsolete_fd_apps_charts(struct ebpf_module *em)
}
w->charts_created &= ~(1<<EBPF_MODULE_FD_IDX);
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -566,6 +548,9 @@ static void ebpf_fd_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (ebpf_read_fd.thread)
+ netdata_thread_cancel(*ebpf_read_fd.thread);
+
if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
pthread_mutex_lock(&lock);
if (em->cgroup_charts) {
@@ -579,12 +564,6 @@ static void ebpf_fd_exit(void *ptr)
ebpf_obsolete_fd_global(em);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_fd_pid)
- ebpf_statistic_obsolete_aral_chart(em, fd_disable_priority);
-#endif
-
-
fflush(stdout);
pthread_mutex_unlock(&lock);
}
@@ -684,73 +663,152 @@ static void fd_apps_accumulator(netdata_fd_stat_t *out, int maps_per_core)
}
/**
- * Fill PID
- *
- * Fill PID structures
- *
- * @param current_pid pid that we are collecting data
- * @param out values read from hash tables;
- */
-static void fd_fill_pid(uint32_t current_pid, netdata_fd_stat_t *publish)
-{
- netdata_fd_stat_t *curr = fd_pid[current_pid];
- if (!curr) {
- curr = ebpf_fd_stat_get();
- fd_pid[current_pid] = curr;
- }
-
- memcpy(curr, &publish[0], sizeof(netdata_fd_stat_t));
-}
-
-/**
* Read APPS table
*
* Read the apps table and store data inside the structure.
*
* @param maps_per_core do I need to read all cores?
*/
-static void read_fd_apps_table(int maps_per_core)
+static void ebpf_read_fd_apps_table(int maps_per_core, int max_period)
{
netdata_fd_stat_t *fv = fd_vector;
- uint32_t key;
- struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = fd_maps[NETDATA_FD_PID_STATS].map_fd;
size_t length = sizeof(netdata_fd_stat_t);
if (maps_per_core)
length *= ebpf_nprocs;
- while (pids) {
- key = pids->pid;
-
+ uint32_t key = 0, next_key = 0;
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
if (bpf_map_lookup_elem(fd, &key, fv)) {
- pids = pids->next;
- continue;
+ goto end_fd_loop;
}
fd_apps_accumulator(fv, maps_per_core);
- fd_fill_pid(key, fv);
+ ebpf_pid_stat_t *pid_stat = ebpf_get_pid_entry(key, fv->tgid);
+ if (pid_stat) {
+ netdata_fd_stat_t *publish_fd = &pid_stat->fd;
+ if (!publish_fd->ct || publish_fd->ct != fv->ct) {
+ memcpy(publish_fd, &fv[0], sizeof(netdata_fd_stat_t));
+ pid_stat->not_updated = 0;
+ } else if (++pid_stat->not_updated >= max_period) {
+ bpf_map_delete_elem(fd, &key);
+ pid_stat->not_updated = 0;
+ }
+ }
+end_fd_loop:
// We are cleaning to avoid passing data read from one process to other.
memset(fv, 0, length);
+ key = next_key;
+ }
+}
- pids = pids->next;
+/**
+ * Sum PIDs
+ *
+ * Sum values for all targets.
+ *
+ * @param fd the output
+ * @param root list of pids
+ */
+static void ebpf_fd_sum_pids(netdata_fd_stat_t *fd, struct ebpf_pid_on_target *root)
+{
+ memset(fd, 0, sizeof(netdata_fd_stat_t));
+
+ while (root) {
+ int32_t pid = root->pid;
+ ebpf_pid_stat_t *pid_stat = ebpf_get_pid_entry(pid, 0);
+ if (pid_stat) {
+ netdata_fd_stat_t *w = &pid_stat->fd;
+ fd->open_call += w->open_call;
+ fd->close_call += w->close_call;
+ fd->open_err += w->open_err;
+ fd->close_err += w->close_err;
+ }
+
+ root = root->next;
+ }
+}
+
+/**
+ * Resume apps data
+ */
+void ebpf_fd_resume_apps_data()
+{
+ struct ebpf_target *w;
+
+ for (w = apps_groups_root_target; w; w = w->next) {
+ if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_FD_IDX))))
+ continue;
+
+ ebpf_fd_sum_pids(&w->fd, w->root_pid);
}
}
/**
+ * DCstat thread
+ *
+ * Thread used to generate dcstat charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_read_fd_thread(void *ptr)
+{
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+
+ int maps_per_core = em->maps_per_core;
+ int update_every = em->update_every;
+
+ int counter = update_every - 1;
+
+ uint32_t lifetime = em->lifetime;
+ uint32_t running_time = 0;
+ usec_t period = update_every * USEC_PER_SEC;
+ int max_period = update_every * EBPF_CLEANUP_FACTOR;
+ while (!ebpf_plugin_exit && running_time < lifetime) {
+ (void)heartbeat_next(&hb, period);
+ if (ebpf_plugin_exit || ++counter != update_every)
+ continue;
+
+ netdata_thread_disable_cancelability();
+
+ pthread_mutex_lock(&collect_data_mutex);
+ ebpf_read_fd_apps_table(maps_per_core, max_period);
+ ebpf_fd_resume_apps_data();
+ pthread_mutex_unlock(&collect_data_mutex);
+
+ counter = 0;
+
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ if (running_time && !em->running_time)
+ running_time = update_every;
+ else
+ running_time += update_every;
+
+ em->running_time = running_time;
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
+ netdata_thread_enable_cancelability();
+ }
+
+ return NULL;
+}
+
+/**
* Update cgroup
*
* Update cgroup data collected per PID.
*
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_fd_cgroup(int maps_per_core)
+static void ebpf_update_fd_cgroup()
{
ebpf_cgroup_target_t *ect ;
- netdata_fd_stat_t *fv = fd_vector;
- int fd = fd_maps[NETDATA_FD_PID_STATS].map_fd;
- size_t length = sizeof(netdata_fd_stat_t) * ebpf_nprocs;
pthread_mutex_lock(&mutex_cgroup_shm);
for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
@@ -758,17 +816,11 @@ static void ebpf_update_fd_cgroup(int maps_per_core)
for (pids = ect->pids; pids; pids = pids->next) {
int pid = pids->pid;
netdata_fd_stat_t *out = &pids->fd;
- if (likely(fd_pid) && fd_pid[pid]) {
- netdata_fd_stat_t *in = fd_pid[pid];
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
+ if (local_pid) {
+ netdata_fd_stat_t *in = &local_pid->fd;
memcpy(out, in, sizeof(netdata_fd_stat_t));
- } else {
- memset(fv, 0, length);
- if (!bpf_map_lookup_elem(fd, &pid, fv)) {
- fd_apps_accumulator(fv, maps_per_core);
-
- memcpy(out, fv, sizeof(netdata_fd_stat_t));
- }
}
}
}
@@ -776,41 +828,6 @@ static void ebpf_update_fd_cgroup(int maps_per_core)
}
/**
- * Sum PIDs
- *
- * Sum values for all targets.
- *
- * @param fd the output
- * @param root list of pids
- */
-static void ebpf_fd_sum_pids(netdata_fd_stat_t *fd, struct ebpf_pid_on_target *root)
-{
- uint32_t open_call = 0;
- uint32_t close_call = 0;
- uint32_t open_err = 0;
- uint32_t close_err = 0;
-
- while (root) {
- int32_t pid = root->pid;
- netdata_fd_stat_t *w = fd_pid[pid];
- if (w) {
- open_call += w->open_call;
- close_call += w->close_call;
- open_err += w->open_err;
- close_err += w->close_err;
- }
-
- root = root->next;
- }
-
- // These conditions were added, because we are using incremental algorithm
- fd->open_call = (open_call >= fd->open_call) ? open_call : fd->open_call;
- fd->close_call = (close_call >= fd->close_call) ? close_call : fd->close_call;
- fd->open_err = (open_err >= fd->open_err) ? open_err : fd->open_err;
- fd->close_err = (close_err >= fd->close_err) ? close_err : fd->close_err;
-}
-
-/**
* Send data to Netdata calling auxiliary functions.
*
* @param em the structure with thread information
@@ -819,12 +836,11 @@ static void ebpf_fd_sum_pids(netdata_fd_stat_t *fd, struct ebpf_pid_on_target *r
void ebpf_fd_send_apps_data(ebpf_module_t *em, struct ebpf_target *root)
{
struct ebpf_target *w;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = root; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_FD_IDX))))
continue;
- ebpf_fd_sum_pids(&w->fd, w->root_pid);
-
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_file_open");
write_chart_dimension("calls", w->fd.open_call);
ebpf_write_end_chart();
@@ -845,6 +861,7 @@ void ebpf_fd_send_apps_data(ebpf_module_t *em, struct ebpf_target *root)
ebpf_write_end_chart();
}
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -888,7 +905,7 @@ static void ebpf_fd_sum_cgroup_pids(netdata_fd_stat_t *fd, struct pid_on_target2
static void ebpf_create_specific_fd_charts(char *type, ebpf_module_t *em)
{
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN, "Number of open files",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP,
NETDATA_CGROUP_FD_OPEN_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5400,
ebpf_create_global_dimension,
@@ -897,7 +914,7 @@ static void ebpf_create_specific_fd_charts(char *type, ebpf_module_t *em)
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "Fails to open files",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP,
NETDATA_CGROUP_FD_OPEN_ERR_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5401,
ebpf_create_global_dimension,
@@ -907,7 +924,7 @@ static void ebpf_create_specific_fd_charts(char *type, ebpf_module_t *em)
}
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSED, "Files closed",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP,
NETDATA_CGROUP_FD_CLOSE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5402,
ebpf_create_global_dimension,
@@ -916,7 +933,7 @@ static void ebpf_create_specific_fd_charts(char *type, ebpf_module_t *em)
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "Fails to close files",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP,
NETDATA_CGROUP_FD_CLOSE_ERR_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5403,
ebpf_create_global_dimension,
@@ -1001,32 +1018,84 @@ static void ebpf_send_specific_fd_data(char *type, netdata_fd_stat_t *values, eb
**/
static void ebpf_create_systemd_fd_charts(ebpf_module_t *em)
{
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_OPEN, "Number of open files",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20061,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_OPEN_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_FD, em->update_every);
+ static ebpf_systemd_args_t data_open = {
+ .title = "Number of open files",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_FILE_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20061,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_FD_OPEN_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_FD,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_FILE_OPEN,
+ .dimension = "calls"
+ };
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "Fails to open files",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20062,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_OPEN_ERR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_FD, em->update_every);
- }
+ static ebpf_systemd_args_t data_open_error = {
+ .title = "Fails to open files",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_FILE_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20062,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_FD_OPEN_ERR_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_FD,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR,
+ .dimension = "calls"
+ };
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_CLOSED, "Files closed",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20063,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_CLOSE_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_FD, em->update_every);
+ static ebpf_systemd_args_t data_close = {
+ .title = "Files closed",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_FILE_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20063,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_FD_CLOSE_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_FD,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_FILE_CLOSED,
+ .dimension = "calls"
+ };
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "Fails to close files",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20064,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_CLOSE_ERR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_FD, em->update_every);
+ static ebpf_systemd_args_t data_close_error = {
+ .title = "Fails to close files",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_FILE_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20064,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_FD_OPEN_ERR_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_FD,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR,
+ .dimension = "calls"
+ };
+
+ if (!data_open.update_every)
+ data_open.update_every = data_open_error.update_every =
+ data_close.update_every = data_close_error.update_every = em->update_every;
+
+ ebpf_cgroup_target_t *w;
+ netdata_run_mode_t mode = em->mode;
+ for (w = ebpf_cgroup_pids; w; w = w->next) {
+ if (unlikely(!w->systemd || w->flags & NETDATA_EBPF_SERVICES_HAS_FD_CHART))
+ continue;
+
+ data_open.id = data_open_error.id = data_close.id = data_close_error.id = w->name;
+ ebpf_create_charts_on_systemd(&data_open);
+ if (mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(&data_open_error);
+ }
+
+ ebpf_create_charts_on_systemd(&data_close);
+ if (mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(&data_close_error);
+ }
+
+ w->flags |= NETDATA_EBPF_SERVICES_HAS_FD_CHART;
}
}
@@ -1040,40 +1109,30 @@ static void ebpf_create_systemd_fd_charts(ebpf_module_t *em)
static void ebpf_send_systemd_fd_charts(ebpf_module_t *em)
{
ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN, "");
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_fd.open_call);
+ if (unlikely(!(ect->flags & NETDATA_EBPF_SERVICES_HAS_FD_CHART)) ) {
+ continue;
}
- }
- ebpf_write_end_chart();
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_fd.open_err);
- }
- }
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_FILE_OPEN);
+ write_chart_dimension("calls", ect->publish_systemd_fd.open_call);
ebpf_write_end_chart();
- }
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_fd.close_call);
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR);
+ write_chart_dimension("calls", ect->publish_systemd_fd.open_err);
+ ebpf_write_end_chart();
}
- }
- ebpf_write_end_chart();
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_fd.close_err);
- }
- }
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_FILE_CLOSED);
+ write_chart_dimension("calls", ect->publish_systemd_fd.close_call);
ebpf_write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR);
+ write_chart_dimension("calls", ect->publish_systemd_fd.close_err);
+ ebpf_write_end_chart();
+ }
}
}
@@ -1084,17 +1143,13 @@ static void ebpf_send_systemd_fd_charts(ebpf_module_t *em)
*/
static void ebpf_fd_send_cgroup_data(ebpf_module_t *em)
{
- if (!ebpf_cgroup_pids)
- return;
-
pthread_mutex_lock(&mutex_cgroup_shm);
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
ebpf_fd_sum_cgroup_pids(&ect->publish_systemd_fd, ect->pids);
}
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
- if (has_systemd) {
+ if (shm_ebpf_cgroup.header->systemd_enabled) {
if (send_cgroup_chart) {
ebpf_create_systemd_fd_charts(em);
}
@@ -1148,30 +1203,21 @@ static void fd_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
ebpf_fd_read_global_tables(stats, maps_per_core);
- pthread_mutex_lock(&collect_data_mutex);
- if (apps)
- read_fd_apps_table(maps_per_core);
if (cgroups)
- ebpf_update_fd_cgroup(maps_per_core);
+ ebpf_update_fd_cgroup();
pthread_mutex_lock(&lock);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_fd_pid)
- ebpf_send_data_aral_chart(ebpf_aral_fd_pid, em);
-#endif
-
ebpf_fd_send_data(em);
if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
ebpf_fd_send_apps_data(em, apps_groups_root_target);
- if (cgroups)
+ if (cgroups && shm_ebpf_cgroup.header && ebpf_cgroup_pids)
ebpf_fd_send_cgroup_data(em);
pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
pthread_mutex_lock(&ebpf_exit_cleanup);
if (running_time && !em->running_time)
@@ -1327,17 +1373,10 @@ static void ebpf_create_fd_global_charts(ebpf_module_t *em)
*
* We are not testing the return, because callocz does this and shutdown the software
* case it was not possible to allocate.
- *
- * @param apps is apps enabled?
*/
-static void ebpf_fd_allocate_global_vectors(int apps)
+static inline void ebpf_fd_allocate_global_vectors()
{
- if (apps) {
- ebpf_fd_aral_init();
- fd_pid = callocz((size_t)pid_max, sizeof(netdata_fd_stat_t *));
- fd_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_fd_stat_t));
- }
-
+ fd_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_fd_stat_t));
fd_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
}
@@ -1401,7 +1440,7 @@ void *ebpf_fd_thread(void *ptr)
goto endfd;
}
- ebpf_fd_allocate_global_vectors(em->apps_charts);
+ ebpf_fd_allocate_global_vectors();
int algorithms[NETDATA_FD_SYSCALL_END] = {
NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX
@@ -1414,13 +1453,16 @@ void *ebpf_fd_thread(void *ptr)
ebpf_create_fd_global_charts(em);
ebpf_update_stats(&plugin_statistics, em);
ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_fd_pid)
- fd_disable_priority = ebpf_statistic_create_aral_chart(NETDATA_EBPF_FD_ARAL_NAME, em);
-#endif
pthread_mutex_unlock(&lock);
+ ebpf_read_fd.thread = mallocz(sizeof(netdata_thread_t));
+ netdata_thread_create(ebpf_read_fd.thread,
+ ebpf_read_fd.name,
+ NETDATA_THREAD_OPTION_DEFAULT,
+ ebpf_read_fd_thread,
+ em);
+
fd_collector(em);
endfd:
diff --git a/collectors/ebpf.plugin/ebpf_fd.h b/src/collectors/ebpf.plugin/ebpf_fd.h
index 00986673e..65658d9b3 100644
--- a/collectors/ebpf.plugin/ebpf_fd.h
+++ b/src/collectors/ebpf.plugin/ebpf_fd.h
@@ -29,15 +29,21 @@
#define NETDATA_CGROUP_FD_CLOSE_CONTEXT "cgroup.fd_close"
#define NETDATA_CGROUP_FD_CLOSE_ERR_CONTEXT "cgroup.fd_close_error"
-#define NETDATA_SYSTEMD_FD_OPEN_CONTEXT "services.fd_open"
-#define NETDATA_SYSTEMD_FD_OPEN_ERR_CONTEXT "services.fd_open_error"
-#define NETDATA_SYSTEMD_FD_CLOSE_CONTEXT "services.fd_close"
-#define NETDATA_SYSTEMD_FD_CLOSE_ERR_CONTEXT "services.fd_close_error"
+#define NETDATA_SYSTEMD_FD_OPEN_CONTEXT "systemd.services.fd_open"
+#define NETDATA_SYSTEMD_FD_OPEN_ERR_CONTEXT "systemd.services.fd_open_error"
+#define NETDATA_SYSTEMD_FD_CLOSE_CONTEXT "systemd.services.fd_close"
+#define NETDATA_SYSTEMD_FD_CLOSE_ERR_CONTEXT "systemd.services.fd_close_error"
// ARAL name
#define NETDATA_EBPF_FD_ARAL_NAME "ebpf_fd"
typedef struct netdata_fd_stat {
+ uint64_t ct;
+ uint32_t tgid;
+ uint32_t uid;
+ uint32_t gid;
+ char name[TASK_COMM_LEN];
+
uint32_t open_call; // Open syscalls (open and openat)
uint32_t close_call; // Close syscall (close)
diff --git a/collectors/ebpf.plugin/ebpf_filesystem.c b/src/collectors/ebpf.plugin/ebpf_filesystem.c
index b78e65532..b5c898232 100644
--- a/collectors/ebpf.plugin/ebpf_filesystem.c
+++ b/src/collectors/ebpf.plugin/ebpf_filesystem.c
@@ -321,7 +321,7 @@ static inline int ebpf_fs_load_and_attach(ebpf_local_maps_t *map, struct filesys
ret = ebpf_fs_attach_kprobe(obj, functions);
if (!ret)
- map->map_fd = bpf_map__fd(obj->maps.tbl_fs);;
+ map->map_fd = bpf_map__fd(obj->maps.tbl_fs);
return ret;
}
diff --git a/collectors/ebpf.plugin/ebpf_filesystem.h b/src/collectors/ebpf.plugin/ebpf_filesystem.h
index f58d7fbe4..cd54be57e 100644
--- a/collectors/ebpf.plugin/ebpf_filesystem.h
+++ b/src/collectors/ebpf.plugin/ebpf_filesystem.h
@@ -8,9 +8,6 @@
#define NETDATA_EBPF_FS_MODULE_DESC "Monitor filesystem latency for: btrfs, ext4, nfs, xfs and zfs."
#include "ebpf.h"
-#ifdef LIBBPF_MAJOR_VERSION
-#include "includes/filesystem.skel.h"
-#endif
#define NETDATA_FS_MAX_DIST_NAME 64UL
diff --git a/collectors/ebpf.plugin/ebpf_hardirq.c b/src/collectors/ebpf.plugin/ebpf_hardirq.c
index 465ee6434..465ee6434 100644
--- a/collectors/ebpf.plugin/ebpf_hardirq.c
+++ b/src/collectors/ebpf.plugin/ebpf_hardirq.c
diff --git a/collectors/ebpf.plugin/ebpf_hardirq.h b/src/collectors/ebpf.plugin/ebpf_hardirq.h
index 35b03b761..35b03b761 100644
--- a/collectors/ebpf.plugin/ebpf_hardirq.h
+++ b/src/collectors/ebpf.plugin/ebpf_hardirq.h
diff --git a/collectors/ebpf.plugin/ebpf_mdflush.c b/src/collectors/ebpf.plugin/ebpf_mdflush.c
index fe33ff6a4..fe33ff6a4 100644
--- a/collectors/ebpf.plugin/ebpf_mdflush.c
+++ b/src/collectors/ebpf.plugin/ebpf_mdflush.c
diff --git a/collectors/ebpf.plugin/ebpf_mdflush.h b/src/collectors/ebpf.plugin/ebpf_mdflush.h
index 629550746..629550746 100644
--- a/collectors/ebpf.plugin/ebpf_mdflush.h
+++ b/src/collectors/ebpf.plugin/ebpf_mdflush.h
diff --git a/collectors/ebpf.plugin/ebpf_mount.c b/src/collectors/ebpf.plugin/ebpf_mount.c
index 05c76540a..05c76540a 100644
--- a/collectors/ebpf.plugin/ebpf_mount.c
+++ b/src/collectors/ebpf.plugin/ebpf_mount.c
diff --git a/collectors/ebpf.plugin/ebpf_mount.h b/src/collectors/ebpf.plugin/ebpf_mount.h
index 768914b02..768914b02 100644
--- a/collectors/ebpf.plugin/ebpf_mount.h
+++ b/src/collectors/ebpf.plugin/ebpf_mount.h
diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/src/collectors/ebpf.plugin/ebpf_oomkill.c
index 2c34650c3..e7604a2db 100644
--- a/collectors/ebpf.plugin/ebpf_oomkill.c
+++ b/src/collectors/ebpf.plugin/ebpf_oomkill.c
@@ -53,11 +53,11 @@ static void ebpf_create_specific_oomkill_charts(char *type, int update_every);
*
* @param em a pointer to `struct ebpf_module`
*/
-static void ebpf_obsolete_oomkill_services(ebpf_module_t *em)
+static void ebpf_obsolete_oomkill_services(ebpf_module_t *em, char *id)
{
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_OOMKILL_CHART,
- "",
"OOM kills. This chart is provided by eBPF plugin.",
EBPF_COMMON_DIMENSION_KILLS,
NETDATA_EBPF_MEMORY_GROUP,
@@ -78,12 +78,13 @@ static inline void ebpf_obsolete_oomkill_cgroup_charts(ebpf_module_t *em)
{
pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_obsolete_oomkill_services(em);
-
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
+ if (ect->systemd) {
+ ebpf_obsolete_oomkill_services(em, ect->name);
+
continue;
+ }
ebpf_create_specific_oomkill_charts(ect->name, em->update_every);
}
@@ -101,6 +102,7 @@ static void ebpf_obsolete_oomkill_apps(ebpf_module_t *em)
{
struct ebpf_target *w;
int update_every = em->update_every;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = apps_groups_root_target; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_OOMKILL_IDX))))
continue;
@@ -118,6 +120,7 @@ static void ebpf_obsolete_oomkill_apps(ebpf_module_t *em)
w->charts_created &= ~(1<<EBPF_MODULE_OOMKILL_IDX);
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -231,11 +234,33 @@ static void ebpf_create_specific_oomkill_charts(char *type, int update_every)
**/
static void ebpf_create_systemd_oomkill_charts(int update_every)
{
- ebpf_create_charts_on_systemd(NETDATA_OOMKILL_CHART, "OOM kills. This chart is provided by eBPF plugin.",
- EBPF_COMMON_DIMENSION_KILLS, NETDATA_EBPF_MEMORY_GROUP,
- NETDATA_EBPF_CHART_TYPE_LINE, 20191,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL,
- NETDATA_EBPF_MODULE_NAME_OOMKILL, update_every);
+ static ebpf_systemd_args_t data_oom = {
+ .title = "OOM kills. This chart is provided by eBPF plugin.",
+ .units = EBPF_COMMON_DIMENSION_KILLS,
+ .family = NETDATA_EBPF_MEMORY_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20191,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_EBPF_MODULE_NAME_OOMKILL,
+ .module = NETDATA_EBPF_MODULE_NAME_SWAP,
+ .update_every = 0,
+ .suffix = NETDATA_OOMKILL_CHART,
+ .dimension = "oom"
+ };
+
+ if (!data_oom.update_every)
+ data_oom.update_every = update_every;
+
+ ebpf_cgroup_target_t *w;
+ for (w = ebpf_cgroup_pids; w ; w = w->next) {
+ if (unlikely(!w->systemd || w->flags & NETDATA_EBPF_SERVICES_HAS_OOMKILL_CHART))
+ continue;
+
+ data_oom.id = w->name;
+ ebpf_create_charts_on_systemd(&data_oom);
+
+ w->flags |= NETDATA_EBPF_SERVICES_HAS_OOMKILL_CHART;
+ }
}
/**
@@ -246,14 +271,15 @@ static void ebpf_create_systemd_oomkill_charts(int update_every)
static void ebpf_send_systemd_oomkill_charts()
{
ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_OOMKILL_CHART, "");
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long) ect->oomkill);
- ect->oomkill = 0;
+ if (unlikely(!(ect->flags & NETDATA_EBPF_SERVICES_HAS_OOMKILL_CHART)) ) {
+ continue;
}
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_OOMKILL_CHART);
+ write_chart_dimension("oom", (long long) ect->oomkill);
+ ect->oomkill = 0;
+ ebpf_write_end_chart();
}
- ebpf_write_end_chart();
}
/*
@@ -294,14 +320,10 @@ static void ebpf_obsolete_specific_oomkill_charts(char *type, int update_every)
*/
void ebpf_oomkill_send_cgroup_data(int update_every)
{
- if (!ebpf_cgroup_pids)
- return;
-
pthread_mutex_lock(&mutex_cgroup_shm);
ebpf_cgroup_target_t *ect;
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
- if (has_systemd) {
+ if (shm_ebpf_cgroup.header->systemd_enabled) {
if (send_cgroup_chart) {
ebpf_create_systemd_oomkill_charts(update_every);
}
@@ -443,26 +465,23 @@ static void oomkill_collector(ebpf_module_t *em)
counter = 0;
uint32_t count = oomkill_read_data(keys);
- if (!count) {
- running_time = ebpf_update_oomkill_period(running_time, em);
- }
stats[NETDATA_CONTROLLER_PID_TABLE_ADD] += (uint64_t) count;
stats[NETDATA_CONTROLLER_PID_TABLE_DEL] += (uint64_t) count;
- pthread_mutex_lock(&collect_data_mutex);
pthread_mutex_lock(&lock);
- if (cgroups && count) {
+ if (cgroups && shm_ebpf_cgroup.header && ebpf_cgroup_pids) {
ebpf_update_oomkill_cgroup(keys, count);
// write everything from the ebpf map.
ebpf_oomkill_send_cgroup_data(update_every);
}
if (em->apps_charts & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
+ pthread_mutex_lock(&collect_data_mutex);
oomkill_write_data(keys, count);
+ pthread_mutex_unlock(&collect_data_mutex);
}
pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
running_time = ebpf_update_oomkill_period(running_time, em);
}
diff --git a/collectors/ebpf.plugin/ebpf_oomkill.h b/src/collectors/ebpf.plugin/ebpf_oomkill.h
index 4a5fa62aa..4a5fa62aa 100644
--- a/collectors/ebpf.plugin/ebpf_oomkill.h
+++ b/src/collectors/ebpf.plugin/ebpf_oomkill.h
diff --git a/collectors/ebpf.plugin/ebpf_process.c b/src/collectors/ebpf.plugin/ebpf_process.c
index e3e2b884e..306b59639 100644
--- a/collectors/ebpf.plugin/ebpf_process.c
+++ b/src/collectors/ebpf.plugin/ebpf_process.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-3.0-or-later
-#include <sys/resource.h>
-
#include "ebpf.h"
#include "ebpf_process.h"
@@ -65,10 +63,6 @@ struct config process_config = { .first_section = NULL,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
-#ifdef NETDATA_DEV_MODE
-int process_disable_priority;
-#endif
-
/*****************************************************************
*
* PROCESS DATA AND SEND TO NETDATA
@@ -147,53 +141,6 @@ static void ebpf_process_send_data(ebpf_module_t *em)
}
/**
- * Sum values for pid
- *
- * @param root the structure with all available PIDs
- * @param offset the address that we are reading
- *
- * @return it returns the sum of all PIDs
- */
-long long ebpf_process_sum_values_for_pids(struct ebpf_pid_on_target *root, size_t offset)
-{
- long long ret = 0;
- while (root) {
- int32_t pid = root->pid;
- ebpf_process_stat_t *w = global_process_stats[pid];
- if (w) {
- uint32_t *value = (uint32_t *)((char *)w + offset);
- ret += *value;
- }
-
- root = root->next;
- }
-
- return ret;
-}
-
-/**
- * Remove process pid
- *
- * Remove from PID task table when task_release was called.
- */
-void ebpf_process_remove_pids()
-{
- struct ebpf_pid_stat *pids = ebpf_root_of_pids;
- int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
- while (pids) {
- uint32_t pid = pids->pid;
- ebpf_process_stat_t *w = global_process_stats[pid];
- if (w) {
- ebpf_process_stat_release(w);
- global_process_stats[pid] = NULL;
- bpf_map_delete_elem(pid_fd, &pid);
- }
-
- pids = pids->next;
- }
-}
-
-/**
* Send data to Netdata calling auxiliary functions.
*
* @param root the target list.
@@ -201,46 +148,33 @@ void ebpf_process_remove_pids()
void ebpf_process_send_apps_data(struct ebpf_target *root, ebpf_module_t *em)
{
struct ebpf_target *w;
- // This algorithm is improved in https://github.com/netdata/netdata/pull/16030
- collected_number values[5];
for (w = root; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_PROCESS_IDX))))
continue;
- values[0] = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, create_process));
- values[1] = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t, create_thread));
- values[2] = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t,
- exit_call));
- values[3] = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t,
- release_call));
- values[4] = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_stat_t,
- task_err));
-
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_process_start");
- write_chart_dimension("calls", values[0]);
+ write_chart_dimension("calls", w->process.create_process);
ebpf_write_end_chart();
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_thread_start");
- write_chart_dimension("calls", values[1]);
+ write_chart_dimension("calls", w->process.create_thread);
ebpf_write_end_chart();
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_task_exit");
- write_chart_dimension("calls", values[2]);
+ write_chart_dimension("calls", w->process.exit_call);
ebpf_write_end_chart();
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_task_released");
- write_chart_dimension("calls", values[3]);
+ write_chart_dimension("calls", w->process.release_call);
ebpf_write_end_chart();
if (em->mode < MODE_ENTRY) {
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_task_error");
- write_chart_dimension("calls", values[4]);
+ write_chart_dimension("calls", w->process.task_err);
ebpf_write_end_chart();
}
}
-
- ebpf_process_remove_pids();
}
/*****************************************************************
@@ -287,34 +221,20 @@ static void ebpf_read_process_hash_global_tables(netdata_idx_t *stats, int maps_
*
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_process_cgroup(int maps_per_core)
+static void ebpf_update_process_cgroup()
{
ebpf_cgroup_target_t *ect ;
- int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
-
- size_t length = sizeof(ebpf_process_stat_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
pthread_mutex_lock(&mutex_cgroup_shm);
for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
struct pid_on_target2 *pids;
for (pids = ect->pids; pids; pids = pids->next) {
int pid = pids->pid;
ebpf_process_stat_t *out = &pids->ps;
- if (global_process_stats[pid]) {
- ebpf_process_stat_t *in = global_process_stats[pid];
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
+ if (local_pid) {
+ ebpf_process_stat_t *in = &local_pid->process;
memcpy(out, in, sizeof(ebpf_process_stat_t));
- } else {
- if (bpf_map_lookup_elem(pid_fd, &pid, process_stat_vector)) {
- memset(out, 0, sizeof(ebpf_process_stat_t));
- }
-
- ebpf_process_apps_accumulator(process_stat_vector, maps_per_core);
-
- memcpy(out, process_stat_vector, sizeof(ebpf_process_stat_t));
-
- memset(process_stat_vector, 0, length);
}
}
}
@@ -439,7 +359,7 @@ void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_MODULE_NAME_PROCESS);
ebpf_create_chart_labels("app_group", w->name, 1);
ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+ fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
w->clean_name,
@@ -454,7 +374,7 @@ void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_MODULE_NAME_PROCESS);
ebpf_create_chart_labels("app_group", w->name, 1);
ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+ fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
w->clean_name,
@@ -469,7 +389,7 @@ void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_MODULE_NAME_PROCESS);
ebpf_create_chart_labels("app_group", w->name, 1);
ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+ fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
w->clean_name,
@@ -484,7 +404,7 @@ void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_MODULE_NAME_PROCESS);
ebpf_create_chart_labels("app_group", w->name, 1);
ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+ fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
if (em->mode < MODE_ENTRY) {
ebpf_write_chart_cmd(NETDATA_APP_FAMILY,
@@ -500,7 +420,7 @@ void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_MODULE_NAME_PROCESS);
ebpf_create_chart_labels("app_group", w->name, 1);
ebpf_commit_label();
- fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+ fprintf(stdout, "DIMENSION calls '' %s 1 1\n", ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
}
w->charts_created |= 1<<EBPF_MODULE_PROCESS_IDX;
}
@@ -523,11 +443,11 @@ static void ebpf_obsolete_specific_process_charts(char *type, ebpf_module_t *em)
*
* @param em a pointer to `struct ebpf_module`
*/
-static void ebpf_obsolete_process_services(ebpf_module_t *em)
+static void ebpf_obsolete_process_services(ebpf_module_t *em, char *id)
{
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_TASK_PROCESS,
- "",
"Process started",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_PROCESS_GROUP,
@@ -537,8 +457,8 @@ static void ebpf_obsolete_process_services(ebpf_module_t *em)
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_TASK_THREAD,
- "",
"Threads started",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_PROCESS_GROUP,
@@ -548,8 +468,8 @@ static void ebpf_obsolete_process_services(ebpf_module_t *em)
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_TASK_CLOSE,
- "",
"Tasks starts exit process.",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_PROCESS_GROUP,
@@ -559,8 +479,8 @@ static void ebpf_obsolete_process_services(ebpf_module_t *em)
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_TASK_EXIT,
- "",
"Tasks closed",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_PROCESS_GROUP,
@@ -571,8 +491,8 @@ static void ebpf_obsolete_process_services(ebpf_module_t *em)
if (em->mode < MODE_ENTRY) {
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_TASK_ERROR,
- "",
"Errors to create process or threads.",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_PROCESS_GROUP,
@@ -593,12 +513,13 @@ static void ebpf_obsolete_process_services(ebpf_module_t *em)
static inline void ebpf_obsolete_process_cgroup_charts(ebpf_module_t *em) {
pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_obsolete_process_services(em);
-
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
+ if (ect->systemd) {
+ ebpf_obsolete_process_services(em, ect->name);
+
continue;
+ }
ebpf_obsolete_specific_process_charts(ect->name, em);
}
@@ -785,11 +706,6 @@ static void ebpf_process_exit(void *ptr)
ebpf_obsolete_process_global(em);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_process_stat)
- ebpf_statistic_obsolete_aral_chart(em, process_disable_priority);
-#endif
-
fflush(stdout);
pthread_mutex_unlock(&lock);
}
@@ -905,14 +821,14 @@ static void ebpf_send_specific_process_data(char *type, ebpf_process_stat_t *val
static void ebpf_create_specific_process_charts(char *type, ebpf_module_t *em)
{
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_PROCESS, "Process started",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP,
NETDATA_CGROUP_PROCESS_CREATE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5000,
ebpf_create_global_dimension, &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK],
1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_THREAD, "Threads started",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP,
NETDATA_CGROUP_THREAD_CREATE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5001,
ebpf_create_global_dimension,
@@ -920,7 +836,7 @@ static void ebpf_create_specific_process_charts(char *type, ebpf_module_t *em)
1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_EXIT, "Tasks starts exit process.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP,
NETDATA_CGROUP_PROCESS_EXIT_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5002,
ebpf_create_global_dimension,
@@ -928,7 +844,7 @@ static void ebpf_create_specific_process_charts(char *type, ebpf_module_t *em)
1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_CLOSE, "Tasks closed",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP,
NETDATA_CGROUP_PROCESS_CLOSE_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5003,
ebpf_create_global_dimension,
@@ -937,7 +853,7 @@ static void ebpf_create_specific_process_charts(char *type, ebpf_module_t *em)
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_ERROR, "Errors to create process or threads.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP,
NETDATA_CGROUP_PROCESS_ERROR_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5004,
ebpf_create_global_dimension,
@@ -993,36 +909,98 @@ static void ebpf_obsolete_specific_process_charts(char *type, ebpf_module_t *em)
**/
static void ebpf_create_systemd_process_charts(ebpf_module_t *em)
{
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_PROCESS, "Process started",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20065,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_CREATE_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_THREAD, "Threads started",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20066,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_THREAD_CREATE_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_CLOSE, "Tasks starts exit process.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20067,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_EXIT_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_EXIT, "Tasks closed",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20068,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_CLOSE_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
+ static ebpf_systemd_args_t data_process = {
+ .title = "Process started",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_PROCESS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20065,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_PROCESS_CREATE_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_PROCESS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_TASK_PROCESS,
+ .dimension = "calls"
+ };
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_ERROR, "Errors to create process or threads.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20069,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_ERROR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
+ static ebpf_systemd_args_t data_thread = {
+ .title = "Threads started",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_PROCESS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20066,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_THREAD_CREATE_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_PROCESS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_TASK_THREAD,
+ .dimension = "calls"
+ };
+
+ static ebpf_systemd_args_t task_exit = {
+ .title = "Tasks starts exit process.",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_PROCESS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20067,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_PROCESS_EXIT_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_PROCESS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_TASK_CLOSE,
+ .dimension = "calls"
+ };
+
+ static ebpf_systemd_args_t task_closed = {
+ .title = "Tasks closed",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_PROCESS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20068,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_PROCESS_CLOSE_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_PROCESS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_TASK_EXIT,
+ .dimension = "calls"
+ };
+
+ static ebpf_systemd_args_t task_error = {
+ .title = "Errors to create process or threads.",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_PROCESS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20069,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_PROCESS_ERROR_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_PROCESS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_TASK_ERROR,
+ .dimension = "calls"
+ };
+
+ ebpf_cgroup_target_t *w;
+ netdata_run_mode_t mode = em->mode;
+ if (!task_exit.update_every)
+ data_process.update_every = data_thread.update_every = task_exit.update_every =
+ task_closed.update_every = task_error.update_every = em->update_every;
+
+ for (w = ebpf_cgroup_pids; w; w = w->next) {
+ if (unlikely(!w->systemd || w->flags & NETDATA_EBPF_SERVICES_HAS_PROCESS_CHART))
+ continue;
+
+ data_process.id = data_thread.id = task_exit.id = task_closed.id = task_error.id = w->name;
+ ebpf_create_charts_on_systemd(&data_process);
+
+ ebpf_create_charts_on_systemd(&data_thread);
+
+ ebpf_create_charts_on_systemd(&task_exit);
+
+ ebpf_create_charts_on_systemd(&task_closed);
+ if (mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(&task_error);
+ }
+ w->flags |= NETDATA_EBPF_SERVICES_HAS_PROCESS_CHART;
}
}
@@ -1036,46 +1014,32 @@ static void ebpf_create_systemd_process_charts(ebpf_module_t *em)
static void ebpf_send_systemd_process_charts(ebpf_module_t *em)
{
ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS, "");
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_ps.create_process);
+ if (unlikely(!(ect->flags & NETDATA_EBPF_SERVICES_HAS_PROCESS_CHART)) ) {
+ continue;
}
- }
- ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_ps.create_thread);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_TASK_PROCESS);
+ write_chart_dimension("calls", ect->publish_systemd_ps.create_process);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_EXIT, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_ps.exit_call);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_TASK_THREAD);
+ write_chart_dimension("calls", ect->publish_systemd_ps.create_thread);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_CLOSE, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_ps.release_call);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_TASK_EXIT);
+ write_chart_dimension("calls", ect->publish_systemd_ps.exit_call);
+ ebpf_write_end_chart();
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_ps.task_err);
- }
- }
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_TASK_CLOSE);
+ write_chart_dimension("calls", ect->publish_systemd_ps.release_call);
ebpf_write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_TASK_ERROR);
+ write_chart_dimension("calls", ect->publish_systemd_ps.task_err);
+ ebpf_write_end_chart();
+ }
}
}
@@ -1086,18 +1050,13 @@ static void ebpf_send_systemd_process_charts(ebpf_module_t *em)
*/
static void ebpf_process_send_cgroup_data(ebpf_module_t *em)
{
- if (!ebpf_cgroup_pids)
- return;
-
pthread_mutex_lock(&mutex_cgroup_shm);
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
ebpf_process_sum_cgroup_pids(&ect->publish_systemd_ps, ect->pids);
}
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
-
- if (has_systemd) {
+ if (shm_ebpf_cgroup.header->systemd_enabled) {
if (send_cgroup_chart) {
ebpf_create_systemd_process_charts(em);
}
@@ -1181,7 +1140,7 @@ static void process_collector(ebpf_module_t *em)
if (ebpf_all_pids_count > 0) {
if (cgroups && shm_ebpf_cgroup.header) {
- ebpf_update_process_cgroup(maps_per_core);
+ ebpf_update_process_cgroup();
}
}
@@ -1195,11 +1154,6 @@ static void process_collector(ebpf_module_t *em)
ebpf_process_send_apps_data(apps_groups_root_target, em);
}
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_process_stat)
- ebpf_send_data_aral_chart(ebpf_aral_process_stat, em);
-#endif
-
if (cgroups && shm_ebpf_cgroup.header) {
ebpf_process_send_cgroup_data(em);
}
@@ -1240,8 +1194,6 @@ static void ebpf_process_allocate_global_vectors(size_t length)
memset(process_publish_aggregated, 0, length * sizeof(netdata_publish_syscall_t));
process_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
process_stat_vector = callocz(ebpf_nprocs, sizeof(ebpf_process_stat_t));
-
- global_process_stats = callocz((size_t)pid_max, sizeof(ebpf_process_stat_t *));
}
static void change_syscalls()
@@ -1351,11 +1303,6 @@ void *ebpf_process_thread(void *ptr)
ebpf_update_stats(&plugin_statistics, em);
ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_process_stat)
- process_disable_priority = ebpf_statistic_create_aral_chart(NETDATA_EBPF_PROC_ARAL_NAME, em);
-#endif
-
pthread_mutex_unlock(&lock);
process_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_process.h b/src/collectors/ebpf.plugin/ebpf_process.h
index 310b321d6..18ffec1ff 100644
--- a/collectors/ebpf.plugin/ebpf_process.h
+++ b/src/collectors/ebpf.plugin/ebpf_process.h
@@ -9,7 +9,6 @@
// Groups used on Dashboard
#define NETDATA_PROCESS_GROUP "processes"
-#define NETDATA_PROCESS_CGROUP_GROUP "processes (eBPF)"
// Global chart name
#define NETDATA_EXIT_SYSCALL "exit"
@@ -34,11 +33,11 @@
#define NETDATA_CGROUP_PROCESS_EXIT_CONTEXT "cgroup.task_exit"
#define NETDATA_CGROUP_PROCESS_ERROR_CONTEXT "cgroup.task_error"
-#define NETDATA_SYSTEMD_PROCESS_CREATE_CONTEXT "services.process_create"
-#define NETDATA_SYSTEMD_THREAD_CREATE_CONTEXT "services.thread_create"
-#define NETDATA_SYSTEMD_PROCESS_CLOSE_CONTEXT "services.task_close"
-#define NETDATA_SYSTEMD_PROCESS_EXIT_CONTEXT "services.task_exit"
-#define NETDATA_SYSTEMD_PROCESS_ERROR_CONTEXT "services.task_error"
+#define NETDATA_SYSTEMD_PROCESS_CREATE_CONTEXT "systemd.services.process_create"
+#define NETDATA_SYSTEMD_THREAD_CREATE_CONTEXT "systemd.services.thread_create"
+#define NETDATA_SYSTEMD_PROCESS_CLOSE_CONTEXT "systemd.services.task_close"
+#define NETDATA_SYSTEMD_PROCESS_EXIT_CONTEXT "systemd.services.task_exit"
+#define NETDATA_SYSTEMD_PROCESS_ERROR_CONTEXT "systemd.services.task_error"
#define NETDATA_EBPF_CGROUP_UPDATE 30
diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/src/collectors/ebpf.plugin/ebpf_shm.c
index f14eb67d0..816e68cfd 100644
--- a/collectors/ebpf.plugin/ebpf_shm.c
+++ b/src/collectors/ebpf.plugin/ebpf_shm.c
@@ -54,6 +54,17 @@ netdata_ebpf_targets_t shm_targets[] = { {.name = "shmget", .mode = EBPF_LOAD_TR
int shm_disable_priority;
#endif
+struct netdata_static_thread ebpf_read_shm = {
+ .name = "EBPF_READ_SHM",
+ .config_section = NULL,
+ .config_name = NULL,
+ .env_name = NULL,
+ .enabled = 1,
+ .thread = NULL,
+ .init_routine = NULL,
+ .start_routine = NULL
+};
+
#ifdef LIBBPF_MAJOR_VERSION
/*****************************************************************
*
@@ -89,7 +100,6 @@ static void ebpf_disable_probe(struct shm_bpf *obj)
bpf_program__set_autoload(obj->progs.netdata_shmat_probe, false);
bpf_program__set_autoload(obj->progs.netdata_shmdt_probe, false);
bpf_program__set_autoload(obj->progs.netdata_shmctl_probe, false);
- bpf_program__set_autoload(obj->progs.netdata_shm_release_task_probe, false);
}
/*
@@ -105,7 +115,6 @@ static void ebpf_disable_trampoline(struct shm_bpf *obj)
bpf_program__set_autoload(obj->progs.netdata_shmat_fentry, false);
bpf_program__set_autoload(obj->progs.netdata_shmdt_fentry, false);
bpf_program__set_autoload(obj->progs.netdata_shmctl_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_shm_release_task_fentry, false);
}
/**
@@ -138,9 +147,6 @@ static void ebpf_set_trampoline_target(struct shm_bpf *obj)
shm_targets[NETDATA_KEY_SHMCTL_CALL].name, running_on_kernel);
bpf_program__set_attach_target(obj->progs.netdata_shmctl_fentry, 0,
syscall);
-
- bpf_program__set_attach_target(obj->progs.netdata_shm_release_task_fentry, 0,
- EBPF_COMMON_FNCT_CLEAN_UP);
}
/**
@@ -160,7 +166,7 @@ static int ebpf_shm_attach_probe(struct shm_bpf *obj)
obj->links.netdata_shmget_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmget_probe,
false, syscall);
- int ret = (int)libbpf_get_error(obj->links.netdata_shmget_probe);
+ long ret = libbpf_get_error(obj->links.netdata_shmget_probe);
if (ret)
return -1;
@@ -168,7 +174,7 @@ static int ebpf_shm_attach_probe(struct shm_bpf *obj)
shm_targets[NETDATA_KEY_SHMAT_CALL].name, running_on_kernel);
obj->links.netdata_shmat_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmat_probe,
false, syscall);
- ret = (int)libbpf_get_error(obj->links.netdata_shmat_probe);
+ ret = libbpf_get_error(obj->links.netdata_shmat_probe);
if (ret)
return -1;
@@ -176,7 +182,7 @@ static int ebpf_shm_attach_probe(struct shm_bpf *obj)
shm_targets[NETDATA_KEY_SHMDT_CALL].name, running_on_kernel);
obj->links.netdata_shmdt_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmdt_probe,
false, syscall);
- ret = (int)libbpf_get_error(obj->links.netdata_shmdt_probe);
+ ret = libbpf_get_error(obj->links.netdata_shmdt_probe);
if (ret)
return -1;
@@ -184,17 +190,10 @@ static int ebpf_shm_attach_probe(struct shm_bpf *obj)
shm_targets[NETDATA_KEY_SHMCTL_CALL].name, running_on_kernel);
obj->links.netdata_shmctl_probe = bpf_program__attach_kprobe(obj->progs.netdata_shmctl_probe,
false, syscall);
- ret = (int)libbpf_get_error(obj->links.netdata_shmctl_probe);
- if (ret)
- return -1;
-
- obj->links.netdata_shm_release_task_probe = bpf_program__attach_kprobe(obj->progs.netdata_shm_release_task_probe,
- false, EBPF_COMMON_FNCT_CLEAN_UP);
- ret = (int)libbpf_get_error(obj->links.netdata_shm_release_task_probe);
+ ret = libbpf_get_error(obj->links.netdata_shmctl_probe);
if (ret)
return -1;
-
return 0;
}
@@ -211,19 +210,6 @@ static void ebpf_shm_set_hash_tables(struct shm_bpf *obj)
}
/**
- * Disable Release Task
- *
- * Disable release task when apps is not enabled.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_shm_disable_release_task(struct shm_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_shm_release_task_probe, false);
- bpf_program__set_autoload(obj->progs.netdata_shm_release_task_fentry, false);
-}
-
-/**
* Adjust Map Size
*
* Resize maps according input from users.
@@ -271,8 +257,6 @@ static inline int ebpf_shm_load_and_attach(struct shm_bpf *obj, ebpf_module_t *e
}
ebpf_shm_adjust_map(obj, em);
- if (!em->apps_charts && !em->cgroup_charts)
- ebpf_shm_disable_release_task(obj);
int ret = shm_bpf__load(obj);
if (!ret) {
@@ -301,11 +285,11 @@ static void ebpf_obsolete_specific_shm_charts(char *type, int update_every);
*
* @param em a pointer to `struct ebpf_module`
*/
-static void ebpf_obsolete_shm_services(ebpf_module_t *em)
+static void ebpf_obsolete_shm_services(ebpf_module_t *em, char *id)
{
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SHMGET_CHART,
- "",
"Calls to syscall shmget(2).",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_IPC_SHM_GROUP,
@@ -315,8 +299,8 @@ static void ebpf_obsolete_shm_services(ebpf_module_t *em)
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SHMAT_CHART,
- "",
"Calls to syscall shmat(2).",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_IPC_SHM_GROUP,
@@ -326,8 +310,8 @@ static void ebpf_obsolete_shm_services(ebpf_module_t *em)
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SHMDT_CHART,
- "",
"Calls to syscall shmdt(2).",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_IPC_SHM_GROUP,
@@ -337,8 +321,8 @@ static void ebpf_obsolete_shm_services(ebpf_module_t *em)
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SHMCTL_CHART,
- "",
"Calls to syscall shmctl(2).",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_IPC_SHM_GROUP,
@@ -358,12 +342,13 @@ static void ebpf_obsolete_shm_services(ebpf_module_t *em)
static inline void ebpf_obsolete_shm_cgroup_charts(ebpf_module_t *em) {
pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_obsolete_shm_services(em);
-
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
+ if (ect->systemd) {
+ ebpf_obsolete_shm_services(em, ect->name);
+
continue;
+ }
ebpf_obsolete_specific_shm_charts(ect->name, em->update_every);
}
@@ -381,6 +366,7 @@ void ebpf_obsolete_shm_apps_charts(struct ebpf_module *em)
{
struct ebpf_target *w;
int update_every = em->update_every;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = apps_groups_root_target; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_SHM_IDX))))
continue;
@@ -431,6 +417,7 @@ void ebpf_obsolete_shm_apps_charts(struct ebpf_module *em)
w->charts_created &= ~(1<<EBPF_MODULE_SHM_IDX);
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -465,6 +452,9 @@ static void ebpf_shm_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (ebpf_read_shm.thread)
+ netdata_thread_cancel(*ebpf_read_shm.thread);
+
if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
pthread_mutex_lock(&lock);
if (em->cgroup_charts) {
@@ -478,11 +468,6 @@ static void ebpf_shm_exit(void *ptr)
ebpf_obsolete_shm_global(em);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_shm_pid)
- ebpf_statistic_obsolete_aral_chart(em, shm_disable_priority);
-#endif
-
fflush(stdout);
pthread_mutex_unlock(&lock);
}
@@ -534,38 +519,16 @@ static void shm_apps_accumulator(netdata_publish_shm_t *out, int maps_per_core)
}
/**
- * Fill PID
- *
- * Fill PID structures
- *
- * @param current_pid pid that we are collecting data
- * @param out values read from hash tables;
- */
-static void shm_fill_pid(uint32_t current_pid, netdata_publish_shm_t *publish)
-{
- netdata_publish_shm_t *curr = shm_pid[current_pid];
- if (!curr) {
- curr = ebpf_shm_stat_get( );
- shm_pid[current_pid] = curr;
- }
-
- memcpy(curr, publish, sizeof(netdata_publish_shm_t));
-}
-
-/**
* Update cgroup
*
* Update cgroup data based in
*
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_shm_cgroup(int maps_per_core)
+static void ebpf_update_shm_cgroup()
{
netdata_publish_shm_t *cv = shm_vector;
- int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd;
size_t length = sizeof(netdata_publish_shm_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
ebpf_cgroup_target_t *ect;
@@ -577,20 +540,11 @@ static void ebpf_update_shm_cgroup(int maps_per_core)
for (pids = ect->pids; pids; pids = pids->next) {
int pid = pids->pid;
netdata_publish_shm_t *out = &pids->shm;
- if (likely(shm_pid) && shm_pid[pid]) {
- netdata_publish_shm_t *in = shm_pid[pid];
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
+ if (local_pid) {
+ netdata_publish_shm_t *in = &local_pid->shm;
memcpy(out, in, sizeof(netdata_publish_shm_t));
- } else {
- if (!bpf_map_lookup_elem(fd, &pid, cv)) {
- shm_apps_accumulator(cv, maps_per_core);
-
- memcpy(out, cv, sizeof(netdata_publish_shm_t));
-
- // now that we've consumed the value, zero it out in the map.
- memset(cv, 0, length);
- bpf_map_update_elem(fd, &pid, cv, BPF_EXIST);
- }
}
}
}
@@ -604,33 +558,42 @@ static void ebpf_update_shm_cgroup(int maps_per_core)
*
* @param maps_per_core do I need to read all cores?
*/
-static void read_shm_apps_table(int maps_per_core)
+static void ebpf_read_shm_apps_table(int maps_per_core, int max_period)
{
netdata_publish_shm_t *cv = shm_vector;
- uint32_t key;
- struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd;
size_t length = sizeof(netdata_publish_shm_t);
if (maps_per_core)
length *= ebpf_nprocs;
- while (pids) {
- key = pids->pid;
-
+ uint32_t key = 0, next_key = 0;
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
if (bpf_map_lookup_elem(fd, &key, cv)) {
- pids = pids->next;
- continue;
+ goto end_shm_loop;
}
shm_apps_accumulator(cv, maps_per_core);
- shm_fill_pid(key, cv);
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(key, 0);
+ if (!local_pid)
+ goto end_shm_loop;
+
+ netdata_publish_shm_t *publish = &local_pid->shm;
+ if (!publish->ct || publish->ct != cv->ct) {
+ memcpy(publish, &cv[0], sizeof(netdata_publish_shm_t));
+ local_pid->not_updated = 0;
+ } else if (++local_pid->not_updated >= max_period){
+ bpf_map_delete_elem(fd, &key);
+ local_pid->not_updated = 0;
+ }
+
+end_shm_loop:
// now that we've consumed the value, zero it out in the map.
memset(cv, 0, length);
bpf_map_update_elem(fd, &key, cv, BPF_EXIST);
- pids = pids->next;
+ key = next_key;
}
}
@@ -689,10 +652,12 @@ static void ebpf_shm_read_global_table(netdata_idx_t *stats, int maps_per_core)
*/
static void ebpf_shm_sum_pids(netdata_publish_shm_t *shm, struct ebpf_pid_on_target *root)
{
+ memset(shm, 0, sizeof(netdata_publish_shm_t));
while (root) {
int32_t pid = root->pid;
- netdata_publish_shm_t *w = shm_pid[pid];
- if (w) {
+ ebpf_pid_stat_t *pid_stat = ebpf_get_pid_entry(pid, 0);
+ if (pid_stat) {
+ netdata_publish_shm_t *w = &pid_stat->shm;
shm->get += w->get;
shm->at += w->at;
shm->dt += w->dt;
@@ -716,12 +681,11 @@ static void ebpf_shm_sum_pids(netdata_publish_shm_t *shm, struct ebpf_pid_on_tar
void ebpf_shm_send_apps_data(struct ebpf_target *root)
{
struct ebpf_target *w;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = root; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_SHM_IDX))))
continue;
- ebpf_shm_sum_pids(&w->shm, w->root_pid);
-
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_shmget_call");
write_chart_dimension("calls", (long long) w->shm.get);
ebpf_write_end_chart();
@@ -738,6 +702,7 @@ void ebpf_shm_send_apps_data(struct ebpf_target *root)
write_chart_dimension("calls", (long long) w->shm.ctl);
ebpf_write_end_chart();
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -875,41 +840,82 @@ static void ebpf_obsolete_specific_shm_charts(char *type, int update_every)
**/
static void ebpf_create_systemd_shm_charts(int update_every)
{
- ebpf_create_charts_on_systemd(NETDATA_SHMGET_CHART,
- "Calls to syscall shmget(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20191,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_SHM_GET_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
+ static ebpf_systemd_args_t data_shmget = {
+ .title = "Calls to syscall shmget(2).",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_IPC_SHM_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20191,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_SHM_GET_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SHM,
+ .update_every = 0,
+ .suffix = NETDATA_SHMGET_CHART,
+ .dimension = "calls"
+ };
- ebpf_create_charts_on_systemd(NETDATA_SHMAT_CHART,
- "Calls to syscall shmat(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20192,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_SHM_AT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
+ static ebpf_systemd_args_t data_shmat = {
+ .title = "Calls to syscall shmat(2).",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_IPC_SHM_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20192,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_SHM_AT_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SHM,
+ .update_every = 0,
+ .suffix = NETDATA_SHMAT_CHART,
+ .dimension = "calls"
+ };
- ebpf_create_charts_on_systemd(NETDATA_SHMDT_CHART,
- "Calls to syscall shmdt(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20193,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_SHM_DT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
+ static ebpf_systemd_args_t data_shmdt = {
+ .title = "Calls to syscall shmdt(2).",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_IPC_SHM_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20193,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_SHM_DT_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SHM,
+ .update_every = 0,
+ .suffix = NETDATA_SHMDT_CHART,
+ .dimension = "calls"
+ };
- ebpf_create_charts_on_systemd(NETDATA_SHMCTL_CHART,
- "Calls to syscall shmctl(2).",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_IPC_SHM_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20193,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_SHM_CTL_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
+ static ebpf_systemd_args_t data_shmctl = {
+ .title = "Calls to syscall shmctl(2).",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_IPC_SHM_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20194,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_SHM_CTL_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SHM,
+ .update_every = 0,
+ .suffix = NETDATA_SHMCTL_CHART,
+ .dimension = "calls"
+ };
+
+ if (!data_shmget.update_every)
+ data_shmat.update_every = data_shmctl.update_every =
+ data_shmdt.update_every = data_shmget.update_every = update_every;
+
+ ebpf_cgroup_target_t *w;
+ for (w = ebpf_cgroup_pids; w; w = w->next) {
+ if (unlikely(!w->systemd || w->flags & NETDATA_EBPF_SERVICES_HAS_SHM_CHART))
+ continue;
+
+ data_shmat.id = data_shmctl.id = data_shmdt.id = data_shmget.id = w->name;
+ ebpf_create_charts_on_systemd(&data_shmat);
+
+ ebpf_create_charts_on_systemd(&data_shmctl);
+
+ ebpf_create_charts_on_systemd(&data_shmdt);
+
+ ebpf_create_charts_on_systemd(&data_shmget);
+
+ w->flags |= NETDATA_EBPF_SERVICES_HAS_SHM_CHART;
+ }
}
/**
@@ -920,37 +926,27 @@ static void ebpf_create_systemd_shm_charts(int update_every)
static void ebpf_send_systemd_shm_charts()
{
ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMGET_CHART, "");
for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_shm.get);
+ if (unlikely(!(ect->flags & NETDATA_EBPF_SERVICES_HAS_SHM_CHART)) ) {
+ continue;
}
- }
- ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMAT_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_shm.at);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SHMGET_CHART);
+ write_chart_dimension("calls", (long long)ect->publish_shm.get);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMDT_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_shm.dt);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SHMAT_CHART);
+ write_chart_dimension("calls", (long long)ect->publish_shm.at);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMCTL_CHART, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_shm.ctl);
- }
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SHMDT_CHART);
+ write_chart_dimension("calls", (long long)ect->publish_shm.dt);
+ ebpf_write_end_chart();
+
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SHMCTL_CHART);
+ write_chart_dimension("calls", (long long)ect->publish_shm.ctl);
+ ebpf_write_end_chart();
}
- ebpf_write_end_chart();
}
/*
@@ -987,17 +983,13 @@ static void ebpf_send_specific_shm_data(char *type, netdata_publish_shm_t *value
*/
void ebpf_shm_send_cgroup_data(int update_every)
{
- if (!ebpf_cgroup_pids)
- return;
-
pthread_mutex_lock(&mutex_cgroup_shm);
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
ebpf_shm_sum_cgroup_pids(&ect->publish_shm, ect->pids);
}
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
- if (has_systemd) {
+ if (shm_ebpf_cgroup.header->systemd_enabled) {
if (send_cgroup_chart) {
ebpf_create_systemd_shm_charts(update_every);
}
@@ -1028,6 +1020,72 @@ void ebpf_shm_send_cgroup_data(int update_every)
}
/**
+ * Resume apps data
+ */
+void ebpf_shm_resume_apps_data() {
+ struct ebpf_target *w;
+ for (w = apps_groups_root_target; w; w = w->next) {
+ if (unlikely(!(w->charts_created & (1 << EBPF_MODULE_SHM_IDX))))
+ continue;
+
+ ebpf_shm_sum_pids(&w->shm, w->root_pid);
+ }
+}
+
+/**
+ * DCstat thread
+ *
+ * Thread used to generate dcstat charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_read_shm_thread(void *ptr)
+{
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+
+ int maps_per_core = em->maps_per_core;
+ int update_every = em->update_every;
+
+ int counter = update_every - 1;
+
+ uint32_t lifetime = em->lifetime;
+ uint32_t running_time = 0;
+ usec_t period = update_every * USEC_PER_SEC;
+ int max_period = update_every * EBPF_CLEANUP_FACTOR;
+ while (!ebpf_plugin_exit && running_time < lifetime) {
+ (void)heartbeat_next(&hb, period);
+ if (ebpf_plugin_exit || ++counter != update_every)
+ continue;
+
+ netdata_thread_disable_cancelability();
+
+ pthread_mutex_lock(&collect_data_mutex);
+ ebpf_read_shm_apps_table(maps_per_core, max_period);
+ ebpf_shm_resume_apps_data();
+ pthread_mutex_unlock(&collect_data_mutex);
+
+ counter = 0;
+
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ if (running_time && !em->running_time)
+ running_time = update_every;
+ else
+ running_time += update_every;
+
+ em->running_time = running_time;
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
+ netdata_thread_enable_cancelability();
+ }
+
+ return NULL;
+}
+
+/**
* Main loop for this collector.
*/
static void shm_collector(ebpf_module_t *em)
@@ -1050,34 +1108,23 @@ static void shm_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
ebpf_shm_read_global_table(stats, maps_per_core);
- pthread_mutex_lock(&collect_data_mutex);
- if (apps) {
- read_shm_apps_table(maps_per_core);
- }
+ pthread_mutex_lock(&lock);
if (cgroups) {
- ebpf_update_shm_cgroup(maps_per_core);
+ ebpf_update_shm_cgroup();
}
- pthread_mutex_lock(&lock);
-
shm_send_global();
if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED) {
ebpf_shm_send_apps_data(apps_groups_root_target);
}
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_shm_pid)
- ebpf_send_data_aral_chart(ebpf_aral_shm_pid, em);
-#endif
-
if (cgroups) {
ebpf_shm_send_cgroup_data(update_every);
}
pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
pthread_mutex_lock(&ebpf_exit_cleanup);
if (running_time && !em->running_time)
@@ -1186,12 +1233,8 @@ void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr)
*/
static void ebpf_shm_allocate_global_vectors(int apps)
{
- if (apps) {
- ebpf_shm_aral_init();
- shm_pid = callocz((size_t)pid_max, sizeof(netdata_publish_shm_t *));
- shm_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_publish_shm_t));
- }
-
+ UNUSED(apps);
+ shm_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_publish_shm_t));
shm_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
memset(shm_hash_values, 0, sizeof(shm_hash_values));
@@ -1310,13 +1353,15 @@ void *ebpf_shm_thread(void *ptr)
ebpf_create_shm_charts(em->update_every);
ebpf_update_stats(&plugin_statistics, em);
ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_shm_pid)
- shm_disable_priority = ebpf_statistic_create_aral_chart(NETDATA_EBPF_SHM_ARAL_NAME, em);
-#endif
-
pthread_mutex_unlock(&lock);
+ ebpf_read_shm.thread = mallocz(sizeof(netdata_thread_t));
+ netdata_thread_create(ebpf_read_shm.thread,
+ ebpf_read_shm.name,
+ NETDATA_THREAD_OPTION_DEFAULT,
+ ebpf_read_shm_thread,
+ em);
+
shm_collector(em);
endshm:
diff --git a/collectors/ebpf.plugin/ebpf_shm.h b/src/collectors/ebpf.plugin/ebpf_shm.h
index a415006e6..5a670b1b5 100644
--- a/collectors/ebpf.plugin/ebpf_shm.h
+++ b/src/collectors/ebpf.plugin/ebpf_shm.h
@@ -23,15 +23,15 @@
#define NETDATA_CGROUP_SHM_DT_CONTEXT "cgroup.shmdt"
#define NETDATA_CGROUP_SHM_CTL_CONTEXT "cgroup.shmctl"
-#define NETDATA_SYSTEMD_SHM_GET_CONTEXT "services.shmget"
-#define NETDATA_SYSTEMD_SHM_AT_CONTEXT "services.shmat"
-#define NETDATA_SYSTEMD_SHM_DT_CONTEXT "services.shmdt"
-#define NETDATA_SYSTEMD_SHM_CTL_CONTEXT "services.shmctl"
-
-// ARAL name
-#define NETDATA_EBPF_SHM_ARAL_NAME "ebpf_shm"
+#define NETDATA_SYSTEMD_SHM_GET_CONTEXT "systemd.services.shmget"
+#define NETDATA_SYSTEMD_SHM_AT_CONTEXT "systemd.services.shmat"
+#define NETDATA_SYSTEMD_SHM_DT_CONTEXT "systemd.services.shmdt"
+#define NETDATA_SYSTEMD_SHM_CTL_CONTEXT "systemd.services.shmctl"
typedef struct netdata_publish_shm {
+ uint64_t ct;
+ char name[TASK_COMM_LEN];
+
uint64_t get;
uint64_t at;
uint64_t dt;
diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/src/collectors/ebpf.plugin/ebpf_socket.c
index bbb5dca1b..5361526df 100644
--- a/collectors/ebpf.plugin/ebpf_socket.c
+++ b/src/collectors/ebpf.plugin/ebpf_socket.c
@@ -107,10 +107,6 @@ struct netdata_static_thread ebpf_read_socket = {
ARAL *aral_socket_table = NULL;
-#ifdef NETDATA_DEV_MODE
-int socket_disable_priority;
-#endif
-
#ifdef LIBBPF_MAJOR_VERSION
/**
* Disable Probe
@@ -510,12 +506,12 @@ static void ebpf_socket_free(ebpf_module_t *em )
*
* @param update_every value to overwrite the update frequency set by the server.
**/
-static void ebpf_obsolete_systemd_socket_charts(int update_every)
+static void ebpf_obsolete_systemd_socket_charts(int update_every, char *id)
{
int order = 20080;
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_NET_APPS_CONNECTION_TCP_V4,
- "",
"Calls to tcp_v4_connection",
EBPF_COMMON_DIMENSION_CONNECTIONS,
NETDATA_APPS_NET_GROUP,
@@ -526,8 +522,8 @@ static void ebpf_obsolete_systemd_socket_charts(int update_every)
if (tcp_v6_connect_address.type == 'T') {
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_NET_APPS_CONNECTION_TCP_V6,
- "",
"Calls to tcp_v6_connection",
EBPF_COMMON_DIMENSION_CONNECTIONS,
NETDATA_APPS_NET_GROUP,
@@ -538,8 +534,8 @@ static void ebpf_obsolete_systemd_socket_charts(int update_every)
}
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_NET_APPS_BANDWIDTH_RECV,
- "",
"Bytes received",
EBPF_COMMON_DIMENSION_BITS,
NETDATA_APPS_NET_GROUP,
@@ -549,8 +545,8 @@ static void ebpf_obsolete_systemd_socket_charts(int update_every)
update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_NET_APPS_BANDWIDTH_SENT,
- "",
"Bytes sent",
EBPF_COMMON_DIMENSION_BITS,
NETDATA_APPS_NET_GROUP,
@@ -560,8 +556,8 @@ static void ebpf_obsolete_systemd_socket_charts(int update_every)
update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS,
- "",
"Calls to tcp_cleanup_rbuf.",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_NET_GROUP,
@@ -571,8 +567,8 @@ static void ebpf_obsolete_systemd_socket_charts(int update_every)
update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS,
- "",
"Calls to tcp_sendmsg.",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_NET_GROUP,
@@ -582,8 +578,8 @@ static void ebpf_obsolete_systemd_socket_charts(int update_every)
update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT,
- "",
"Calls to tcp_retransmit",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_NET_GROUP,
@@ -593,8 +589,8 @@ static void ebpf_obsolete_systemd_socket_charts(int update_every)
update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS,
- "",
"Calls to udp_sendmsg",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_NET_GROUP,
@@ -604,8 +600,8 @@ static void ebpf_obsolete_systemd_socket_charts(int update_every)
update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS,
- "",
"Calls to udp_recvmsg",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_APPS_NET_GROUP,
@@ -626,12 +622,13 @@ static void ebpf_obsolete_specific_socket_charts(char *type, int update_every);
static inline void ebpf_obsolete_socket_cgroup_charts(ebpf_module_t *em) {
pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_obsolete_systemd_socket_charts(em->update_every);
-
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
+ if (ect->systemd) {
+ ebpf_obsolete_systemd_socket_charts(em->update_every, ect->name);
+
continue;
+ }
ebpf_obsolete_specific_socket_charts(ect->name, em->update_every);
}
@@ -650,6 +647,7 @@ void ebpf_socket_obsolete_apps_charts(struct ebpf_module *em)
int order = 20130;
struct ebpf_target *w;
int update_every = em->update_every;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = apps_groups_root_target; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_SOCKET_IDX))))
continue;
@@ -757,6 +755,7 @@ void ebpf_socket_obsolete_apps_charts(struct ebpf_module *em)
w->charts_created &= ~(1<<EBPF_MODULE_SOCKET_IDX);
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -904,10 +903,6 @@ static void ebpf_socket_exit(void *ptr)
ebpf_socket_obsolete_global_charts(em);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_socket_pid)
- ebpf_statistic_obsolete_aral_chart(em, socket_disable_priority);
-#endif
pthread_mutex_unlock(&lock);
}
@@ -1031,83 +1026,57 @@ static void ebpf_socket_send_data(ebpf_module_t *em)
/**
* Send data to Netdata calling auxiliary functions.
- *
- * @param em the structure with thread information
- * @param root the target list.
*/
-void ebpf_socket_send_apps_data(ebpf_module_t *em, struct ebpf_target *root)
+void ebpf_socket_send_apps_data()
{
- UNUSED(em);
-
struct ebpf_target *w;
- // This algorithm is improved in https://github.com/netdata/netdata/pull/16030
- collected_number values[9];
-
- for (w = root; w; w = w->next) {
+ pthread_mutex_lock(&collect_data_mutex);
+ for (w = apps_groups_root_target; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_SOCKET_IDX))))
continue;
- struct ebpf_pid_on_target *move = w->root_pid;
- // Simplify algorithm, but others will appear only in https://github.com/netdata/netdata/pull/16030
- memset(values, 0, sizeof(values));
- while (move) {
- int32_t pid = move->pid;
- ebpf_socket_publish_apps_t *ws = socket_bandwidth_curr[pid];
- if (ws) {
- values[0] += (collected_number) ws->call_tcp_v4_connection;
- values[1] += (collected_number) ws->call_tcp_v6_connection;
- values[2] += (collected_number) ws->bytes_sent;
- values[3] += (collected_number) ws->bytes_received;
- values[4] += (collected_number) ws->call_tcp_sent;
- values[5] += (collected_number) ws->call_tcp_received;
- values[6] += (collected_number) ws->retransmit;
- values[7] += (collected_number) ws->call_udp_sent;
- values[8] += (collected_number) ws->call_udp_received;
- }
-
- move = move->next;
- }
-
+ ebpf_socket_publish_apps_t *values = &w->socket;
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_tcp_v4_connection");
- write_chart_dimension("connections", values[0]);
+ write_chart_dimension("connections", (collected_number) values->call_tcp_v4_connection);
ebpf_write_end_chart();
if (tcp_v6_connect_address.type == 'T') {
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_call_tcp_v6_connection");
- write_chart_dimension("calls", values[1]);
+ write_chart_dimension("calls", (collected_number) values->call_tcp_v6_connection);
ebpf_write_end_chart();
}
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_sock_bytes_sent");
// We multiply by 0.008, because we read bytes, but we display bits
- write_chart_dimension("bandwidth", ((values[2])*8)/1000);
+ write_chart_dimension("bandwidth", (collected_number) ((values->bytes_sent)*8)/1000);
ebpf_write_end_chart();
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_sock_bytes_received");
// We multiply by 0.008, because we read bytes, but we display bits
- write_chart_dimension("bandwidth", ((values[3])*8)/1000);
+ write_chart_dimension("bandwidth", (collected_number) ((values->bytes_received)*8)/1000);
ebpf_write_end_chart();
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_tcp_sendmsg");
- write_chart_dimension("calls", values[4]);
+ write_chart_dimension("calls", (collected_number) values->call_tcp_sent);
ebpf_write_end_chart();
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_tcp_cleanup_rbuf");
- write_chart_dimension("calls", values[5]);
+ write_chart_dimension("calls", (collected_number) values->call_tcp_received);
ebpf_write_end_chart();
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_tcp_retransmit");
- write_chart_dimension("calls", values[6]);
+ write_chart_dimension("calls", (collected_number) values->retransmit);
ebpf_write_end_chart();
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_udp_sendmsg");
- write_chart_dimension("calls", values[7]);
+ write_chart_dimension("calls", (collected_number) values->call_udp_sent);
ebpf_write_end_chart();
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_udp_recvmsg");
- write_chart_dimension("calls", values[8]);
+ write_chart_dimension("calls", (collected_number) values->call_udp_received);
ebpf_write_end_chart();
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/*****************************************************************
@@ -1809,6 +1778,41 @@ end_socket_loop:
}
netdata_thread_enable_cancelability();
}
+/**
+ * Resume apps data
+ */
+void ebpf_socket_resume_apps_data()
+{
+ struct ebpf_target *w;
+
+ for (w = apps_groups_root_target; w; w = w->next) {
+ if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_SOCKET_IDX))))
+ continue;
+
+ struct ebpf_pid_on_target *move = w->root_pid;
+
+ ebpf_socket_publish_apps_t *values = &w->socket;
+ memset(&w->socket, 0, sizeof(ebpf_socket_publish_apps_t));
+ while (move) {
+ int32_t pid = move->pid;
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
+ if (local_pid) {
+ ebpf_socket_publish_apps_t *ws = &local_pid->socket;
+ values->call_tcp_v4_connection = ws->call_tcp_v4_connection;
+ values->call_tcp_v6_connection = ws->call_tcp_v6_connection;
+ values->bytes_sent = ws->bytes_sent;
+ values->bytes_received = ws->bytes_received;
+ values->call_tcp_sent = ws->call_tcp_sent;
+ values->call_tcp_received = ws->call_tcp_received;
+ values->retransmit = ws->retransmit;
+ values->call_udp_sent = ws->call_udp_sent;
+ values->call_udp_received = ws->call_udp_received;
+ }
+
+ move = move->next;
+ }
+ }
+}
/**
* Socket thread
@@ -1839,7 +1843,10 @@ void *ebpf_read_socket_thread(void *ptr)
if (ebpf_plugin_exit || ++counter != update_every)
continue;
+ pthread_mutex_lock(&collect_data_mutex);
ebpf_update_array_vectors(em);
+ ebpf_socket_resume_apps_data();
+ pthread_mutex_unlock(&collect_data_mutex);
counter = 0;
}
@@ -1992,23 +1999,23 @@ static void ebpf_socket_read_hash_global_tables(netdata_idx_t *stats, int maps_p
*/
void ebpf_socket_fill_publish_apps(uint32_t current_pid, netdata_socket_t *ns)
{
- ebpf_socket_publish_apps_t *curr = socket_bandwidth_curr[current_pid];
- if (!curr) {
- curr = ebpf_socket_stat_get();
- socket_bandwidth_curr[current_pid] = curr;
- }
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(current_pid, 0);
+ if (!local_pid)
+ return;
- curr->bytes_sent += ns->tcp.tcp_bytes_sent;
- curr->bytes_received += ns->tcp.tcp_bytes_received;
- curr->call_tcp_sent += ns->tcp.call_tcp_sent;
- curr->call_tcp_received += ns->tcp.call_tcp_received;
- curr->retransmit += ns->tcp.retransmit;
- curr->call_close += ns->tcp.close;
- curr->call_tcp_v4_connection += ns->tcp.ipv4_connect;
- curr->call_tcp_v6_connection += ns->tcp.ipv6_connect;
-
- curr->call_udp_sent += ns->udp.call_udp_sent;
- curr->call_udp_received += ns->udp.call_udp_received;
+ ebpf_socket_publish_apps_t *curr = &local_pid->socket;
+
+ curr->bytes_sent = ns->tcp.tcp_bytes_sent;
+ curr->bytes_received = ns->tcp.tcp_bytes_received;
+ curr->call_tcp_sent = ns->tcp.call_tcp_sent;
+ curr->call_tcp_received = ns->tcp.call_tcp_received;
+ curr->retransmit = ns->tcp.retransmit;
+ curr->call_close = ns->tcp.close;
+ curr->call_tcp_v4_connection = ns->tcp.ipv4_connect;
+ curr->call_tcp_v6_connection = ns->tcp.ipv6_connect;
+
+ curr->call_udp_sent = ns->udp.call_udp_sent;
+ curr->call_udp_received = ns->udp.call_udp_received;
}
/**
@@ -2026,8 +2033,9 @@ static void ebpf_update_socket_cgroup()
for (pids = ect->pids; pids; pids = pids->next) {
int pid = pids->pid;
ebpf_socket_publish_apps_t *publish = &ect->publish_socket;
- if (likely(socket_bandwidth_curr) && socket_bandwidth_curr[pid]) {
- ebpf_socket_publish_apps_t *in = socket_bandwidth_curr[pid];
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
+ if (local_pid) {
+ ebpf_socket_publish_apps_t *in = &local_pid->socket;
publish->bytes_sent = in->bytes_sent;
publish->bytes_received = in->bytes_received;
@@ -2329,96 +2337,166 @@ static void ebpf_send_specific_socket_data(char *type, ebpf_socket_publish_apps_
**/
static void ebpf_create_systemd_socket_charts(int update_every)
{
- int order = 20080;
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_CONNECTION_TCP_V4,
- "Calls to tcp_v4_connection", EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_TCP_V4_CONN_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
+ static ebpf_systemd_args_t data_tcp_v4 = {
+ .title = "Calls to tcp_v4_connection",
+ .units = EBPF_COMMON_DIMENSION_CONNECTIONS,
+ .family = NETDATA_APPS_NET_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20080,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SERVICES_SOCKET_TCP_V4_CONN_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SOCKET,
+ .update_every = 0,
+ .suffix = NETDATA_NET_APPS_CONNECTION_TCP_V4,
+ .dimension = "connections"
+ };
- if (tcp_v6_connect_address.type == 'T') {
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_CONNECTION_TCP_V6,
- "Calls to tcp_v6_connection",
- EBPF_COMMON_DIMENSION_CONNECTIONS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_TCP_V6_CONN_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
- }
+ static ebpf_systemd_args_t data_tcp_v6 = {
+ .title = "Calls to tcp_v6_connection",
+ .units = EBPF_COMMON_DIMENSION_CONNECTIONS,
+ .family = NETDATA_APPS_NET_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20081,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SERVICES_SOCKET_TCP_V6_CONN_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SOCKET,
+ .update_every = 0,
+ .suffix = NETDATA_NET_APPS_CONNECTION_TCP_V6,
+ .dimension = "connections"
+ };
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_RECV,
- "Bytes received", EBPF_COMMON_DIMENSION_BITS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
+ static ebpf_systemd_args_t data_bandwith_recv = {
+ .title = "Bytes received",
+ .units = EBPF_COMMON_DIMENSION_BITS,
+ .family = NETDATA_APPS_NET_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20082,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SOCKET,
+ .update_every = 0,
+ .suffix = NETDATA_NET_APPS_BANDWIDTH_RECV,
+ .dimension = "bits"
+ };
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_SENT,
- "Bytes sent", EBPF_COMMON_DIMENSION_BITS,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
+ static ebpf_systemd_args_t data_bandwith_sent = {
+ .title = "Bytes sent",
+ .units = EBPF_COMMON_DIMENSION_BITS,
+ .family = NETDATA_APPS_NET_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20083,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SOCKET,
+ .update_every = 0,
+ .suffix = NETDATA_NET_APPS_BANDWIDTH_SENT,
+ .dimension = "bits"
+ };
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS,
- "Calls to tcp_cleanup_rbuf.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
+ static ebpf_systemd_args_t data_tcp_cleanup = {
+ .title = "Calls to tcp_cleanup_rbuf.",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_NET_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20084,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SOCKET,
+ .update_every = 0,
+ .suffix = NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS,
+ .dimension = "calls"
+ };
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS,
- "Calls to tcp_sendmsg.",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
+ static ebpf_systemd_args_t data_tcp_sendmsg = {
+ .title = "Calls to tcp_sendmsg.",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_NET_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20085,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SOCKET,
+ .update_every = 0,
+ .suffix = NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS,
+ .dimension = "calls"
+ };
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT,
- "Calls to tcp_retransmit",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
+ static ebpf_systemd_args_t data_tcp_retransmit = {
+ .title = "Calls to tcp_retransmit",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_NET_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20086,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SOCKET,
+ .update_every = 0,
+ .suffix = NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT,
+ .dimension = "calls"
+ };
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS,
- "Calls to udp_sendmsg",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
+ static ebpf_systemd_args_t data_udp_send = {
+ .title = "Calls to udp_sendmsg",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_NET_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20087,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SOCKET,
+ .update_every = 0,
+ .suffix = NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS,
+ .dimension = "calls"
+ };
- ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS,
- "Calls to udp_recvmsg",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_NET_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- order++,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
- update_every);
+ static ebpf_systemd_args_t data_udp_recv = {
+ .title = "Calls to udp_recvmsg",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_APPS_NET_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20088,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SOCKET,
+ .update_every = 0,
+ .suffix = NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS,
+ .dimension = "calls"
+ };
+
+ if (!data_tcp_v4.update_every)
+ data_tcp_v4.update_every = data_tcp_v6.update_every = data_bandwith_recv.update_every =
+ data_bandwith_sent.update_every = data_tcp_cleanup.update_every = data_tcp_sendmsg.update_every =
+ data_tcp_retransmit.update_every = data_udp_send.update_every = data_udp_recv.update_every = update_every;
+
+ ebpf_cgroup_target_t *w;
+ for (w = ebpf_cgroup_pids; w ; w = w->next) {
+ if (unlikely(!w->systemd || w->flags & NETDATA_EBPF_SERVICES_HAS_SOCKET_CHART))
+ continue;
+
+ data_tcp_v4.id = data_tcp_v6.id = data_bandwith_recv.id =
+ data_bandwith_sent.id = data_tcp_cleanup.id = data_tcp_sendmsg.id =
+ data_tcp_retransmit.id = data_udp_send.id = data_udp_recv.id = w->name;
+
+ ebpf_create_charts_on_systemd(&data_tcp_v4);
+ if (tcp_v6_connect_address.type == 'T') {
+ ebpf_create_charts_on_systemd(&data_tcp_v6);
+ }
+
+ ebpf_create_charts_on_systemd(&data_bandwith_recv);
+ ebpf_create_charts_on_systemd(&data_bandwith_sent);
+
+ ebpf_create_charts_on_systemd(&data_tcp_cleanup);
+
+ ebpf_create_charts_on_systemd(&data_tcp_sendmsg);
+
+ ebpf_create_charts_on_systemd(&data_tcp_retransmit);
+
+ ebpf_create_charts_on_systemd(&data_udp_recv);
+
+ ebpf_create_charts_on_systemd(&data_udp_send);
+
+ w->flags |= NETDATA_EBPF_SERVICES_HAS_SOCKET_CHART;
+ }
}
/**
@@ -2429,79 +2507,49 @@ static void ebpf_create_systemd_socket_charts(int update_every)
static void ebpf_send_systemd_socket_charts()
{
ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_CONNECTION_TCP_V4, "");
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_v4_connection);
+ if (unlikely(!(ect->flags & NETDATA_EBPF_SERVICES_HAS_SOCKET_CHART)) ) {
+ continue;
}
- }
- ebpf_write_end_chart();
- if (tcp_v6_connect_address.type == 'T') {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_CONNECTION_TCP_V6, "");
- for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_v6_connection);
- }
- }
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_NET_APPS_CONNECTION_TCP_V4);
+ write_chart_dimension("connections", (long long)ect->publish_socket.call_tcp_v4_connection);
ebpf_write_end_chart();
- }
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_SENT, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.bytes_sent);
+ if (tcp_v6_connect_address.type == 'T') {
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_NET_APPS_CONNECTION_TCP_V6);
+ write_chart_dimension("connections", (long long)ect->publish_socket.call_tcp_v6_connection);
+ ebpf_write_end_chart();
}
- }
- ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_RECV, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.bytes_received);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_NET_APPS_BANDWIDTH_SENT);
+ write_chart_dimension("bits", (long long)ect->publish_socket.bytes_sent);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_sent);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_NET_APPS_BANDWIDTH_RECV);
+ write_chart_dimension("bits", (long long)ect->publish_socket.bytes_received);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_received);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS);
+ write_chart_dimension("calls", (long long)ect->publish_socket.call_tcp_sent);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.retransmit);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS);
+ write_chart_dimension("calls", (long long)ect->publish_socket.call_tcp_received);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.call_udp_sent);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT);
+ write_chart_dimension("calls", (long long)ect->publish_socket.retransmit);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long)ect->publish_socket.call_udp_received);
- }
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS);
+ write_chart_dimension("calls", (long long)ect->publish_socket.call_udp_sent);
+ ebpf_write_end_chart();
+
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS);
+ write_chart_dimension("calls", (long long)ect->publish_socket.call_udp_received);
+ ebpf_write_end_chart();
}
- ebpf_write_end_chart();
}
/**
@@ -2525,17 +2573,13 @@ void ebpf_socket_update_cgroup_algorithm()
*/
static void ebpf_socket_send_cgroup_data(int update_every)
{
- if (!ebpf_cgroup_pids)
- return;
-
pthread_mutex_lock(&mutex_cgroup_shm);
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
ebpf_socket_sum_cgroup_pids(&ect->publish_socket, ect->pids);
}
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
- if (has_systemd) {
+ if (shm_ebpf_cgroup.header->systemd_enabled) {
if (send_cgroup_chart) {
ebpf_create_systemd_socket_charts(update_every);
}
@@ -2602,7 +2646,6 @@ static void socket_collector(ebpf_module_t *em)
ebpf_socket_read_hash_global_tables(stats, maps_per_core);
}
- pthread_mutex_lock(&collect_data_mutex);
if (cgroups)
ebpf_update_socket_cgroup();
@@ -2611,20 +2654,14 @@ static void socket_collector(ebpf_module_t *em)
ebpf_socket_send_data(em);
if (socket_apps_enabled & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
- ebpf_socket_send_apps_data(em, apps_groups_root_target);
-
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_socket_pid)
- ebpf_send_data_aral_chart(ebpf_aral_socket_pid, em);
-#endif
+ ebpf_socket_send_apps_data();
- if (cgroups)
+ if (cgroups && shm_ebpf_cgroup.header && ebpf_cgroup_pids)
ebpf_socket_send_cgroup_data(update_every);
fflush(stdout);
pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
pthread_mutex_lock(&ebpf_exit_cleanup);
if (running_time && !em->running_time)
@@ -2655,9 +2692,6 @@ static void ebpf_socket_initialize_global_vectors()
memset(socket_publish_aggregated, 0 ,NETDATA_MAX_SOCKET_VECTOR * sizeof(netdata_publish_syscall_t));
socket_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
- ebpf_socket_aral_init();
- socket_bandwidth_curr = callocz((size_t)pid_max, sizeof(ebpf_socket_publish_apps_t *));
-
aral_socket_table = ebpf_allocate_pid_aral(NETDATA_EBPF_SOCKET_ARAL_TABLE_NAME,
sizeof(netdata_socket_plus_t));
@@ -2878,11 +2912,6 @@ void *ebpf_socket_thread(void *ptr)
ebpf_update_stats(&plugin_statistics, em);
ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_socket_pid)
- socket_disable_priority = ebpf_statistic_create_aral_chart(NETDATA_EBPF_SOCKET_ARAL_NAME, em);
-#endif
-
pthread_mutex_unlock(&lock);
socket_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_socket.h b/src/collectors/ebpf.plugin/ebpf_socket.h
index a6d3e03b6..64201149f 100644
--- a/collectors/ebpf.plugin/ebpf_socket.h
+++ b/src/collectors/ebpf.plugin/ebpf_socket.h
@@ -95,8 +95,7 @@ typedef enum ebpf_socket_idx {
} ebpf_socket_index_t;
#define NETDATA_SOCKET_KERNEL_FUNCTIONS "kernel"
-#define NETDATA_NETWORK_CONNECTIONS_GROUP "network connections"
-#define NETDATA_CGROUP_NET_GROUP "network (eBPF)"
+#define NETDATA_CGROUP_NET_GROUP "network"
// Global chart name
#define NETDATA_TCP_OUTBOUND_CONNECTIONS "tcp_outbound_conn"
@@ -142,15 +141,15 @@ typedef enum ebpf_socket_idx {
#define NETDATA_CGROUP_SOCKET_UDP_RECV_CONTEXT "cgroup.net_udp_recv"
#define NETDATA_CGROUP_SOCKET_UDP_SEND_CONTEXT "cgroup.net_udp_send"
-#define NETDATA_SERVICES_SOCKET_TCP_V4_CONN_CONTEXT "services.net_conn_ipv4"
-#define NETDATA_SERVICES_SOCKET_TCP_V6_CONN_CONTEXT "services.net_conn_ipv6"
-#define NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT "services.net_bytes_recv"
-#define NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT "services.net_bytes_send"
-#define NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT "services.net_tcp_recv"
-#define NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT "services.net_tcp_send"
-#define NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT "services.net_retransmit"
-#define NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT "services.net_udp_recv"
-#define NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT "services.net_udp_send"
+#define NETDATA_SERVICES_SOCKET_TCP_V4_CONN_CONTEXT "systemd.services.net_conn_ipv4"
+#define NETDATA_SERVICES_SOCKET_TCP_V6_CONN_CONTEXT "systemd.services.net_conn_ipv6"
+#define NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT "systemd.services.net_bytes_recv"
+#define NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT "systemd.services.net_bytes_send"
+#define NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT "systemd.services.net_tcp_recv"
+#define NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT "systemd.services.net_tcp_send"
+#define NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT "systemd.services.net_retransmit"
+#define NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT "systemd.services.net_udp_recv"
+#define NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT "systemd.services.net_udp_send"
// ARAL name
#define NETDATA_EBPF_SOCKET_ARAL_NAME "ebpf_socket"
@@ -269,6 +268,8 @@ extern ebpf_network_viewer_options_t network_viewer_opt;
* Structure to store socket information
*/
typedef struct netdata_socket {
+ char name[TASK_COMM_LEN];
+
// Timestamp
uint64_t first_timestamp;
uint64_t current_timestamp;
@@ -285,6 +286,7 @@ typedef struct netdata_socket {
uint32_t retransmit; //It is never used with UDP
uint32_t ipv4_connect;
uint32_t ipv6_connect;
+ uint32_t state; // We do not have charts for it, because we are using network viewer plugin
} tcp;
struct {
diff --git a/collectors/ebpf.plugin/ebpf_softirq.c b/src/collectors/ebpf.plugin/ebpf_softirq.c
index 106ff4f29..106ff4f29 100644
--- a/collectors/ebpf.plugin/ebpf_softirq.c
+++ b/src/collectors/ebpf.plugin/ebpf_softirq.c
diff --git a/collectors/ebpf.plugin/ebpf_softirq.h b/src/collectors/ebpf.plugin/ebpf_softirq.h
index 4ef36775a..4ef36775a 100644
--- a/collectors/ebpf.plugin/ebpf_softirq.h
+++ b/src/collectors/ebpf.plugin/ebpf_softirq.h
diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/src/collectors/ebpf.plugin/ebpf_swap.c
index fb007f928..42aa04ad8 100644
--- a/collectors/ebpf.plugin/ebpf_swap.c
+++ b/src/collectors/ebpf.plugin/ebpf_swap.c
@@ -52,6 +52,17 @@ netdata_ebpf_targets_t swap_targets[] = { {.name = "swap_readpage", .mode = EBPF
{.name = "swap_writepage", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
+struct netdata_static_thread ebpf_read_swap = {
+ .name = "EBPF_READ_SWAP",
+ .config_section = NULL,
+ .config_name = NULL,
+ .env_name = NULL,
+ .enabled = 1,
+ .thread = NULL,
+ .init_routine = NULL,
+ .start_routine = NULL
+};
+
#ifdef LIBBPF_MAJOR_VERSION
/**
* Disable probe
@@ -77,7 +88,6 @@ static void ebpf_swap_disable_trampoline(struct swap_bpf *obj)
{
bpf_program__set_autoload(obj->progs.netdata_swap_readpage_fentry, false);
bpf_program__set_autoload(obj->progs.netdata_swap_writepage_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_release_task_fentry, false);
}
/**
@@ -94,9 +104,6 @@ static void ebpf_swap_set_trampoline_target(struct swap_bpf *obj)
bpf_program__set_attach_target(obj->progs.netdata_swap_writepage_fentry, 0,
swap_targets[NETDATA_KEY_SWAP_WRITEPAGE_CALL].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_release_task_fentry, 0,
- EBPF_COMMON_FNCT_CLEAN_UP);
}
/**
@@ -160,18 +167,6 @@ static void ebpf_swap_adjust_map(struct swap_bpf *obj, ebpf_module_t *em)
}
/**
- * Disable Release Task
- *
- * Disable release task when apps is not enabled.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_swap_disable_release_task(struct swap_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_release_task_fentry, false);
-}
-
-/**
* Load and attach
*
* Load and attach the eBPF code in kernel.
@@ -196,9 +191,6 @@ static inline int ebpf_swap_load_and_attach(struct swap_bpf *obj, ebpf_module_t
ebpf_swap_adjust_map(obj, em);
- if (!em->apps_charts && !em->cgroup_charts)
- ebpf_swap_disable_release_task(obj);
-
int ret = swap_bpf__load(obj);
if (ret) {
return ret;
@@ -230,25 +222,25 @@ static void ebpf_obsolete_specific_swap_charts(char *type, int update_every);
*
* @param em a pointer to `struct ebpf_module`
*/
-static void ebpf_obsolete_swap_services(ebpf_module_t *em)
+static void ebpf_obsolete_swap_services(ebpf_module_t *em, char *id)
{
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_MEM_SWAP_READ_CHART,
- "",
"Calls to function swap_readpage.",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
+ NETDATA_SYSTEM_SWAP_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CGROUP_SWAP_READ_CONTEXT,
NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5100,
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_MEM_SWAP_WRITE_CHART,
- "",
"Calls to function swap_writepage.",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
+ NETDATA_SYSTEM_SWAP_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CGROUP_SWAP_WRITE_CONTEXT,
NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5101,
@@ -265,12 +257,13 @@ static void ebpf_obsolete_swap_services(ebpf_module_t *em)
static inline void ebpf_obsolete_swap_cgroup_charts(ebpf_module_t *em) {
pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_obsolete_swap_services(em);
-
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
+ if (ect->systemd) {
+ ebpf_obsolete_swap_services(em, ect->name);
+
continue;
+ }
ebpf_obsolete_specific_swap_charts(ect->name, em->update_every);
}
@@ -288,6 +281,7 @@ void ebpf_obsolete_swap_apps_charts(struct ebpf_module *em)
{
struct ebpf_target *w;
int update_every = em->update_every;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = apps_groups_root_target; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_SWAP_IDX))))
continue;
@@ -315,6 +309,7 @@ void ebpf_obsolete_swap_apps_charts(struct ebpf_module *em)
update_every);
w->charts_created &= ~(1<<EBPF_MODULE_SWAP_IDX);
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -348,6 +343,9 @@ static void ebpf_swap_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (ebpf_read_swap.thread)
+ netdata_thread_cancel(*ebpf_read_swap.thread);
+
if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
pthread_mutex_lock(&lock);
if (em->cgroup_charts) {
@@ -411,59 +409,24 @@ static void swap_apps_accumulator(netdata_publish_swap_t *out, int maps_per_core
}
/**
- * Fill PID
- *
- * Fill PID structures
- *
- * @param current_pid pid that we are collecting data
- * @param out values read from hash tables;
- */
-static void swap_fill_pid(uint32_t current_pid, netdata_publish_swap_t *publish)
-{
- netdata_publish_swap_t *curr = swap_pid[current_pid];
- if (!curr) {
- curr = callocz(1, sizeof(netdata_publish_swap_t));
- swap_pid[current_pid] = curr;
- }
-
- memcpy(curr, publish, sizeof(netdata_publish_swap_t));
-}
-
-/**
* Update cgroup
*
* Update cgroup data based in
- *
- * @param maps_per_core do I need to read all cores?
*/
-static void ebpf_update_swap_cgroup(int maps_per_core)
+static void ebpf_update_swap_cgroup()
{
ebpf_cgroup_target_t *ect ;
- netdata_publish_swap_t *cv = swap_vector;
- int fd = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd;
- size_t length = sizeof(netdata_publish_swap_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
pthread_mutex_lock(&mutex_cgroup_shm);
for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
struct pid_on_target2 *pids;
for (pids = ect->pids; pids; pids = pids->next) {
int pid = pids->pid;
netdata_publish_swap_t *out = &pids->swap;
- if (likely(swap_pid) && swap_pid[pid]) {
- netdata_publish_swap_t *in = swap_pid[pid];
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
+ if (local_pid) {
+ netdata_publish_swap_t *in = &local_pid->swap;
memcpy(out, in, sizeof(netdata_publish_swap_t));
- } else {
- memset(cv, 0, length);
- if (!bpf_map_lookup_elem(fd, &pid, cv)) {
- swap_apps_accumulator(cv, maps_per_core);
-
- memcpy(out, cv, sizeof(netdata_publish_swap_t));
-
- // We are cleaning to avoid passing data read from one process to other.
- memset(cv, 0, length);
- }
}
}
}
@@ -471,38 +434,143 @@ static void ebpf_update_swap_cgroup(int maps_per_core)
}
/**
+ * Sum PIDs
+ *
+ * Sum values for all targets.
+ *
+ * @param swap
+ * @param root
+ */
+static void ebpf_swap_sum_pids(netdata_publish_swap_t *swap, struct ebpf_pid_on_target *root)
+{
+ uint64_t local_read = 0;
+ uint64_t local_write = 0;
+
+ while (root) {
+ int32_t pid = root->pid;
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
+ if (local_pid) {
+ netdata_publish_swap_t *w = &local_pid->swap;
+ local_write += w->write;
+ local_read += w->read;
+ }
+ root = root->next;
+ }
+
+ // These conditions were added, because we are using incremental algorithm
+ swap->write = (local_write >= swap->write) ? local_write : swap->write;
+ swap->read = (local_read >= swap->read) ? local_read : swap->read;
+ }
+
+
+/**
+ * Resume apps data
+ */
+void ebpf_swap_resume_apps_data() {
+ struct ebpf_target *w;
+ for (w = apps_groups_root_target; w; w = w->next) {
+ if (unlikely(!(w->charts_created & (1 << EBPF_MODULE_SWAP_IDX))))
+ continue;
+
+ ebpf_swap_sum_pids(&w->swap, w->root_pid);
+ }
+}
+
+/**
* Read APPS table
*
* Read the apps table and store data inside the structure.
*
* @param maps_per_core do I need to read all cores?
*/
-static void read_swap_apps_table(int maps_per_core)
+static void ebpf_read_swap_apps_table(int maps_per_core, int max_period)
{
netdata_publish_swap_t *cv = swap_vector;
- uint32_t key;
- struct ebpf_pid_stat *pids = ebpf_root_of_pids;
int fd = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd;
size_t length = sizeof(netdata_publish_swap_t);
if (maps_per_core)
length *= ebpf_nprocs;
- while (pids) {
- key = pids->pid;
+ uint32_t key = 0, next_key = 0;
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
if (bpf_map_lookup_elem(fd, &key, cv)) {
- pids = pids->next;
- continue;
+ goto end_swap_loop;
}
swap_apps_accumulator(cv, maps_per_core);
- swap_fill_pid(key, cv);
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(key, cv->tgid);
+ if (!local_pid)
+ goto end_swap_loop;
+
+ netdata_publish_swap_t *publish = &local_pid->swap;
+ if (!publish->ct || publish->ct != cv->ct) {
+ memcpy(publish, cv, sizeof(netdata_publish_swap_t));
+ local_pid->not_updated = 0;
+ } else if (++local_pid->not_updated >= max_period) {
+ bpf_map_delete_elem(fd, &key);
+ local_pid->not_updated = 0;
+ }
// We are cleaning to avoid passing data read from one process to other.
+end_swap_loop:
memset(cv, 0, length);
+ key = next_key;
+ }
+}
- pids = pids->next;
+/**
+ * SWAP thread
+ *
+ * Thread used to generate swap charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_read_swap_thread(void *ptr)
+{
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+
+ int maps_per_core = em->maps_per_core;
+ int update_every = em->update_every;
+
+ int counter = update_every - 1;
+
+ uint32_t lifetime = em->lifetime;
+ uint32_t running_time = 0;
+ usec_t period = update_every * USEC_PER_SEC;
+ int max_period = update_every * EBPF_CLEANUP_FACTOR;
+
+ while (!ebpf_plugin_exit && running_time < lifetime) {
+ (void)heartbeat_next(&hb, period);
+ if (ebpf_plugin_exit || ++counter != update_every)
+ continue;
+
+ netdata_thread_disable_cancelability();
+
+ pthread_mutex_lock(&collect_data_mutex);
+ ebpf_read_swap_apps_table(maps_per_core, max_period);
+ ebpf_swap_resume_apps_data();
+ pthread_mutex_unlock(&collect_data_mutex);
+
+ counter = 0;
+
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ if (running_time && !em->running_time)
+ running_time = update_every;
+ else
+ running_time += update_every;
+
+ em->running_time = running_time;
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
+ netdata_thread_enable_cancelability();
}
+
+ return NULL;
}
/**
@@ -545,34 +613,6 @@ static void ebpf_swap_read_global_table(netdata_idx_t *stats, int maps_per_core)
}
/**
- * Sum PIDs
- *
- * Sum values for all targets.
- *
- * @param swap
- * @param root
- */
-static void ebpf_swap_sum_pids(netdata_publish_swap_t *swap, struct ebpf_pid_on_target *root)
-{
- uint64_t local_read = 0;
- uint64_t local_write = 0;
-
- while (root) {
- int32_t pid = root->pid;
- netdata_publish_swap_t *w = swap_pid[pid];
- if (w) {
- local_write += w->write;
- local_read += w->read;
- }
- root = root->next;
- }
-
- // These conditions were added, because we are using incremental algorithm
- swap->write = (local_write >= swap->write) ? local_write : swap->write;
- swap->read = (local_read >= swap->read) ? local_read : swap->read;
-}
-
-/**
* Send data to Netdata calling auxiliary functions.
*
* @param root the target list.
@@ -580,12 +620,11 @@ static void ebpf_swap_sum_pids(netdata_publish_swap_t *swap, struct ebpf_pid_on_
void ebpf_swap_send_apps_data(struct ebpf_target *root)
{
struct ebpf_target *w;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = root; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_SWAP_IDX))))
continue;
- ebpf_swap_sum_pids(&w->swap, w->root_pid);
-
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_swap_readpage");
write_chart_dimension("calls", (long long) w->swap.read);
ebpf_write_end_chart();
@@ -594,6 +633,7 @@ void ebpf_swap_send_apps_data(struct ebpf_target *root)
write_chart_dimension("calls", (long long) w->swap.write);
ebpf_write_end_chart();
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -630,21 +670,19 @@ static void ebpf_swap_sum_cgroup_pids(netdata_publish_swap_t *swap, struct pid_o
static void ebpf_send_systemd_swap_charts()
{
ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_MEM_SWAP_READ_CHART, "");
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long) ect->publish_systemd_swap.read);
+ if (unlikely(!(ect->flags & NETDATA_EBPF_SERVICES_HAS_SWAP_CHART)) ) {
+ continue;
}
- }
- ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_MEM_SWAP_WRITE_CHART, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, (long long) ect->publish_systemd_swap.write);
- }
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_MEM_SWAP_READ_CHART);
+ write_chart_dimension("calls", (long long) ect->publish_systemd_swap.read);
+ ebpf_write_end_chart();
+
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_MEM_SWAP_WRITE_CHART);
+ write_chart_dimension("calls", (long long) ect->publish_systemd_swap.write);
+ ebpf_write_end_chart();
}
- ebpf_write_end_chart();
}
/**
@@ -659,7 +697,7 @@ static void ebpf_create_specific_swap_charts(char *type, int update_every)
{
ebpf_create_chart(type, NETDATA_MEM_SWAP_READ_CHART,
"Calls to function swap_readpage.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_SWAP_SUBMENU,
NETDATA_CGROUP_SWAP_READ_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5100,
ebpf_create_global_dimension,
@@ -667,7 +705,7 @@ static void ebpf_create_specific_swap_charts(char *type, int update_every)
ebpf_create_chart(type, NETDATA_MEM_SWAP_WRITE_CHART,
"Calls to function swap_writepage.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_SWAP_SUBMENU,
NETDATA_CGROUP_SWAP_WRITE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5101,
ebpf_create_global_dimension,
@@ -686,12 +724,12 @@ static void ebpf_create_specific_swap_charts(char *type, int update_every)
static void ebpf_obsolete_specific_swap_charts(char *type, int update_every)
{
ebpf_write_chart_obsolete(type, NETDATA_MEM_SWAP_READ_CHART, "", "Calls to function swap_readpage.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_SWAP_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SWAP_READ_CONTEXT,
NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5100, update_every);
ebpf_write_chart_obsolete(type, NETDATA_MEM_SWAP_WRITE_CHART, "", "Calls to function swap_writepage.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_SWAP_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SWAP_WRITE_CONTEXT,
NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5101, update_every);
}
@@ -724,19 +762,49 @@ static void ebpf_send_specific_swap_data(char *type, netdata_publish_swap_t *val
**/
static void ebpf_create_systemd_swap_charts(int update_every)
{
- ebpf_create_charts_on_systemd(NETDATA_MEM_SWAP_READ_CHART,
- "Calls to swap_readpage.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20191,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_SWAP_READ_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_SWAP, update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_MEM_SWAP_WRITE_CHART,
- "Calls to function swap_writepage.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20192,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_SWAP_WRITE_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_SWAP, update_every);
+ static ebpf_systemd_args_t data_read = {
+ .title = "Calls to swap_readpage.",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_SYSTEM_SWAP_SUBMENU,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20191,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_SWAP_READ_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SWAP,
+ .update_every = 0,
+ .suffix = NETDATA_MEM_SWAP_READ_CHART,
+ .dimension = "calls"
+ };
+
+ static ebpf_systemd_args_t data_write = {
+ .title = "Calls to function swap_writepage.",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_SYSTEM_SWAP_SUBMENU,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20192,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_SWAP_WRITE_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_SWAP,
+ .update_every = 0,
+ .suffix = NETDATA_MEM_SWAP_WRITE_CHART,
+ .dimension = "calls"
+ };
+
+ if (!data_write.update_every)
+ data_read.update_every = data_write.update_every = update_every;
+
+ ebpf_cgroup_target_t *w;
+ for (w = ebpf_cgroup_pids; w ; w = w->next) {
+ if (unlikely(!w->systemd || w->flags & NETDATA_EBPF_SERVICES_HAS_SWAP_CHART))
+ continue;
+
+ data_read.id = data_write.id = w->name;
+ ebpf_create_charts_on_systemd(&data_read);
+
+ ebpf_create_charts_on_systemd(&data_write);
+
+ w->flags |= NETDATA_EBPF_SERVICES_HAS_SWAP_CHART;
+ }
}
/**
@@ -746,18 +814,13 @@ static void ebpf_create_systemd_swap_charts(int update_every)
*/
void ebpf_swap_send_cgroup_data(int update_every)
{
- if (!ebpf_cgroup_pids)
- return;
-
pthread_mutex_lock(&mutex_cgroup_shm);
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
ebpf_swap_sum_cgroup_pids(&ect->publish_systemd_swap, ect->pids);
}
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
-
- if (has_systemd) {
+ if (shm_ebpf_cgroup.header->systemd_enabled) {
if (send_cgroup_chart) {
ebpf_create_systemd_swap_charts(update_every);
fflush(stdout);
@@ -810,12 +873,9 @@ static void swap_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
ebpf_swap_read_global_table(stats, maps_per_core);
- pthread_mutex_lock(&collect_data_mutex);
- if (apps)
- read_swap_apps_table(maps_per_core);
if (cgroup)
- ebpf_update_swap_cgroup(maps_per_core);
+ ebpf_update_swap_cgroup();
pthread_mutex_lock(&lock);
@@ -824,11 +884,10 @@ static void swap_collector(ebpf_module_t *em)
if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
ebpf_swap_send_apps_data(apps_groups_root_target);
- if (cgroup)
+ if (cgroup && shm_ebpf_cgroup.header && ebpf_cgroup_pids)
ebpf_swap_send_cgroup_data(update_every);
pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
pthread_mutex_lock(&ebpf_exit_cleanup);
if (running_time && !em->running_time)
@@ -903,14 +962,9 @@ void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr)
*
* We are not testing the return, because callocz does this and shutdown the software
* case it was not possible to allocate.
- *
- * @param apps is apps enabled?
*/
-static void ebpf_swap_allocate_global_vectors(int apps)
+static void ebpf_swap_allocate_global_vectors()
{
- if (apps)
- swap_pid = callocz((size_t)pid_max, sizeof(netdata_publish_swap_t *));
-
swap_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_publish_swap_t));
swap_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
@@ -1008,7 +1062,7 @@ void *ebpf_swap_thread(void *ptr)
goto endswap;
}
- ebpf_swap_allocate_global_vectors(em->apps_charts);
+ ebpf_swap_allocate_global_vectors();
int algorithms[NETDATA_SWAP_END] = { NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX };
ebpf_global_labels(swap_aggregated_data, swap_publish_aggregated, swap_dimension_name, swap_dimension_name,
@@ -1020,6 +1074,13 @@ void *ebpf_swap_thread(void *ptr)
ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
pthread_mutex_unlock(&lock);
+ ebpf_read_swap.thread = mallocz(sizeof(netdata_thread_t));
+ netdata_thread_create(ebpf_read_swap.thread,
+ ebpf_read_swap.name,
+ NETDATA_THREAD_OPTION_DEFAULT,
+ ebpf_read_swap_thread,
+ em);
+
swap_collector(em);
endswap:
diff --git a/collectors/ebpf.plugin/ebpf_swap.h b/src/collectors/ebpf.plugin/ebpf_swap.h
index 79e9a01ac..38e700ee0 100644
--- a/collectors/ebpf.plugin/ebpf_swap.h
+++ b/src/collectors/ebpf.plugin/ebpf_swap.h
@@ -19,12 +19,18 @@
#define NETDATA_DIRECTORY_SWAP_CONFIG_FILE "swap.conf"
// Contexts
-#define NETDATA_CGROUP_SWAP_READ_CONTEXT "cgroup.swap_read"
-#define NETDATA_CGROUP_SWAP_WRITE_CONTEXT "cgroup.swap_write"
-#define NETDATA_SYSTEMD_SWAP_READ_CONTEXT "services.swap_read"
-#define NETDATA_SYSTEMD_SWAP_WRITE_CONTEXT "services.swap_write"
+#define NETDATA_CGROUP_SWAP_READ_CONTEXT "systemd.cgroup.swap_read"
+#define NETDATA_CGROUP_SWAP_WRITE_CONTEXT "systemd.cgroup.swap_write"
+#define NETDATA_SYSTEMD_SWAP_READ_CONTEXT "systemd.services.swap_read"
+#define NETDATA_SYSTEMD_SWAP_WRITE_CONTEXT "systemd.services.swap_write"
typedef struct netdata_publish_swap {
+ uint64_t ct;
+ uint32_t tgid;
+ uint32_t uid;
+ uint32_t gid;
+ char name[TASK_COMM_LEN];
+
uint64_t read;
uint64_t write;
} netdata_publish_swap_t;
diff --git a/collectors/ebpf.plugin/ebpf_sync.c b/src/collectors/ebpf.plugin/ebpf_sync.c
index a16318107..a16318107 100644
--- a/collectors/ebpf.plugin/ebpf_sync.c
+++ b/src/collectors/ebpf.plugin/ebpf_sync.c
diff --git a/collectors/ebpf.plugin/ebpf_sync.h b/src/collectors/ebpf.plugin/ebpf_sync.h
index bd1bb78b0..373695565 100644
--- a/collectors/ebpf.plugin/ebpf_sync.h
+++ b/src/collectors/ebpf.plugin/ebpf_sync.h
@@ -3,10 +3,6 @@
#ifndef NETDATA_EBPF_SYNC_H
#define NETDATA_EBPF_SYNC_H 1
-#ifdef LIBBPF_MAJOR_VERSION
-#include "includes/sync.skel.h"
-#endif
-
// Module name & description
#define NETDATA_EBPF_MODULE_NAME_SYNC "sync"
#define NETDATA_EBPF_SYNC_MODULE_DESC "Monitor calls to syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2)."
diff --git a/collectors/ebpf.plugin/ebpf_unittest.c b/src/collectors/ebpf.plugin/ebpf_unittest.c
index 11b449e03..11b449e03 100644
--- a/collectors/ebpf.plugin/ebpf_unittest.c
+++ b/src/collectors/ebpf.plugin/ebpf_unittest.c
diff --git a/collectors/ebpf.plugin/ebpf_unittest.h b/src/collectors/ebpf.plugin/ebpf_unittest.h
index 429cbe628..429cbe628 100644
--- a/collectors/ebpf.plugin/ebpf_unittest.h
+++ b/src/collectors/ebpf.plugin/ebpf_unittest.h
diff --git a/collectors/ebpf.plugin/ebpf_vfs.c b/src/collectors/ebpf.plugin/ebpf_vfs.c
index 354901c9c..cb7500aab 100644
--- a/collectors/ebpf.plugin/ebpf_vfs.c
+++ b/src/collectors/ebpf.plugin/ebpf_vfs.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-3.0-or-later
-#include <sys/resource.h>
-
#include "ebpf.h"
#include "ebpf_vfs.h"
@@ -43,6 +41,17 @@ static ebpf_local_maps_t vfs_maps[] = {{.name = "tbl_vfs_pid", .internal_input =
#endif
}};
+struct netdata_static_thread ebpf_read_vfs = {
+ .name = "EBPF_READ_VFS",
+ .config_section = NULL,
+ .config_name = NULL,
+ .env_name = NULL,
+ .enabled = 1,
+ .thread = NULL,
+ .init_routine = NULL,
+ .start_routine = NULL
+};
+
struct config vfs_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
@@ -60,10 +69,6 @@ netdata_ebpf_targets_t vfs_targets[] = { {.name = "vfs_write", .mode = EBPF_LOAD
{.name = "release_task", .mode = EBPF_LOAD_TRAMPOLINE},
{.name = NULL, .mode = EBPF_LOAD_TRAMPOLINE}};
-#ifdef NETDATA_DEV_MODE
-int vfs_disable_priority;
-#endif
-
#ifdef LIBBPF_MAJOR_VERSION
/**
* Disable probe
@@ -90,7 +95,6 @@ static void ebpf_vfs_disable_probes(struct vfs_bpf *obj)
bpf_program__set_autoload(obj->progs.netdata_vfs_open_kretprobe, false);
bpf_program__set_autoload(obj->progs.netdata_vfs_create_kprobe, false);
bpf_program__set_autoload(obj->progs.netdata_vfs_create_kretprobe, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_release_task_kprobe, false);
}
/*
@@ -116,7 +120,6 @@ static void ebpf_vfs_disable_trampoline(struct vfs_bpf *obj)
bpf_program__set_autoload(obj->progs.netdata_vfs_open_fentry, false);
bpf_program__set_autoload(obj->progs.netdata_vfs_open_fexit, false);
bpf_program__set_autoload(obj->progs.netdata_vfs_create_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_release_task_fentry, false);
}
/**
@@ -155,8 +158,6 @@ static void ebpf_vfs_set_trampoline_target(struct vfs_bpf *obj)
bpf_program__set_attach_target(obj->progs.netdata_vfs_open_fexit, 0, vfs_targets[NETDATA_EBPF_VFS_OPEN].name);
bpf_program__set_attach_target(obj->progs.netdata_vfs_create_fentry, 0, vfs_targets[NETDATA_EBPF_VFS_CREATE].name);
-
- bpf_program__set_attach_target(obj->progs.netdata_vfs_release_task_fentry, 0, EBPF_COMMON_FNCT_CLEAN_UP);
}
/**
@@ -172,7 +173,7 @@ static int ebpf_vfs_attach_probe(struct vfs_bpf *obj)
{
obj->links.netdata_vfs_write_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_write_kprobe, false,
vfs_targets[NETDATA_EBPF_VFS_WRITE].name);
- int ret = libbpf_get_error(obj->links.netdata_vfs_write_kprobe);
+ long ret = libbpf_get_error(obj->links.netdata_vfs_write_kprobe);
if (ret)
return -1;
@@ -302,13 +303,6 @@ static int ebpf_vfs_attach_probe(struct vfs_bpf *obj)
if (ret)
return -1;
- obj->links.netdata_vfs_release_task_kprobe = bpf_program__attach_kprobe(obj->progs.netdata_vfs_release_task_fentry,
- true,
- EBPF_COMMON_FNCT_CLEAN_UP);
- ret = libbpf_get_error(obj->links.netdata_vfs_release_task_kprobe);
- if (ret)
- return -1;
-
return 0;
}
@@ -345,19 +339,6 @@ static void ebpf_vfs_set_hash_tables(struct vfs_bpf *obj)
}
/**
- * Disable Release Task
- *
- * Disable release task when apps is not enabled.
- *
- * @param obj is the main structure for bpf objects.
- */
-static void ebpf_vfs_disable_release_task(struct vfs_bpf *obj)
-{
- bpf_program__set_autoload(obj->progs.netdata_vfs_release_task_fentry, false);
- bpf_program__set_autoload(obj->progs.netdata_vfs_release_task_kprobe, false);
-}
-
-/**
* Load and attach
*
* Load and attach the eBPF code in kernel.
@@ -382,9 +363,6 @@ static inline int ebpf_vfs_load_and_attach(struct vfs_bpf *obj, ebpf_module_t *e
ebpf_vfs_adjust_map(obj, em);
- if (!em->apps_charts && !em->cgroup_charts)
- ebpf_vfs_disable_release_task(obj);
-
int ret = vfs_bpf__load(obj);
if (ret) {
return ret;
@@ -416,25 +394,25 @@ static void ebpf_obsolete_specific_vfs_charts(char *type, ebpf_module_t *em);
*
* @param em a pointer to `struct ebpf_module`
*/
-static void ebpf_obsolete_vfs_services(ebpf_module_t *em)
+static void ebpf_obsolete_vfs_services(ebpf_module_t *em, char *id)
{
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_FILE_DELETED,
- "",
"Files deleted",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NULL,
20065,
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS,
- "",
"Write to disk",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NULL,
20066,
@@ -442,11 +420,11 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em)
if (em->mode < MODE_ENTRY) {
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR,
- "",
"Fails to write",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NULL,
20067,
@@ -454,11 +432,11 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em)
}
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_VFS_READ_CALLS,
- "",
"Read from disk",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NULL,
20068,
@@ -466,11 +444,11 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em)
if (em->mode < MODE_ENTRY) {
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR,
- "",
"Fails to read",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NULL,
20069,
@@ -478,33 +456,33 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em)
}
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES,
- "",
"Bytes written on disk",
EBPF_COMMON_DIMENSION_BYTES,
- NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NULL,
20070,
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_VFS_READ_BYTES,
- "",
"Bytes read from disk",
EBPF_COMMON_DIMENSION_BYTES,
- NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NULL,
20071,
em->update_every);
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_VFS_FSYNC,
- "",
"Calls to vfs_fsync.",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NULL,
20072,
@@ -512,22 +490,22 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em)
if (em->mode < MODE_ENTRY) {
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR,
- "",
"Sync error",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NULL,
20073,
em->update_every);
}
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_VFS_OPEN,
- "",
"Calls to vfs_open.",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NULL,
20074,
@@ -535,11 +513,11 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em)
if (em->mode < MODE_ENTRY) {
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR,
- "",
"Open error",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NULL,
20075,
@@ -547,11 +525,11 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em)
}
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_VFS_CREATE,
- "",
"Calls to vfs_create.",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NULL,
20076,
@@ -559,11 +537,11 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em)
if (em->mode < MODE_ENTRY) {
ebpf_write_chart_obsolete(NETDATA_SERVICE_FAMILY,
+ id,
NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR,
- "",
"Create error",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_VFS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
NULL,
20077,
@@ -581,12 +559,13 @@ static void ebpf_obsolete_vfs_services(ebpf_module_t *em)
static inline void ebpf_obsolete_vfs_cgroup_charts(ebpf_module_t *em) {
pthread_mutex_lock(&mutex_cgroup_shm);
- ebpf_obsolete_vfs_services(em);
-
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (ect->systemd)
+ if (ect->systemd) {
+ ebpf_obsolete_vfs_services(em, ect->name);
+
continue;
+ }
ebpf_obsolete_specific_vfs_charts(ect->name, em);
}
@@ -605,6 +584,7 @@ void ebpf_obsolete_vfs_apps_charts(struct ebpf_module *em)
int order = 20275;
struct ebpf_target *w;
int update_every = em->update_every;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = apps_groups_root_target; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_VFS_IDX))))
continue;
@@ -763,6 +743,7 @@ void ebpf_obsolete_vfs_apps_charts(struct ebpf_module *em)
}
w->charts_created &= ~(1<<EBPF_MODULE_VFS_IDX);
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -904,6 +885,9 @@ static void ebpf_vfs_exit(void *ptr)
{
ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (ebpf_read_vfs.thread)
+ netdata_thread_cancel(*ebpf_read_vfs.thread);
+
if (em->enabled == NETDATA_THREAD_EBPF_FUNCTION_RUNNING) {
pthread_mutex_lock(&lock);
if (em->cgroup_charts) {
@@ -917,11 +901,6 @@ static void ebpf_vfs_exit(void *ptr)
ebpf_obsolete_vfs_global(em);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_vfs_pid)
- ebpf_statistic_obsolete_aral_chart(em, vfs_disable_priority);
-#endif
-
fflush(stdout);
pthread_mutex_unlock(&lock);
}
@@ -1068,8 +1047,9 @@ static void ebpf_vfs_sum_pids(netdata_publish_vfs_t *vfs, struct ebpf_pid_on_tar
while (root) {
int32_t pid = root->pid;
- netdata_publish_vfs_t *w = vfs_pid[pid];
- if (w) {
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
+ if (local_pid) {
+ netdata_publish_vfs_t *w = &local_pid->vfs;
accumulator.write_call += w->write_call;
accumulator.writev_call += w->writev_call;
accumulator.read_call += w->read_call;
@@ -1130,12 +1110,11 @@ static void ebpf_vfs_sum_pids(netdata_publish_vfs_t *vfs, struct ebpf_pid_on_tar
void ebpf_vfs_send_apps_data(ebpf_module_t *em, struct ebpf_target *root)
{
struct ebpf_target *w;
+ pthread_mutex_lock(&collect_data_mutex);
for (w = root; w; w = w->next) {
if (unlikely(!(w->charts_created & (1<<EBPF_MODULE_VFS_IDX))))
continue;
- ebpf_vfs_sum_pids(&w->vfs, w->root_pid);
-
ebpf_write_begin_chart(NETDATA_APP_FAMILY, w->clean_name, "_ebpf_call_vfs_unlink");
write_chart_dimension("calls", w->vfs.unlink_call);
ebpf_write_end_chart();
@@ -1198,6 +1177,7 @@ void ebpf_vfs_send_apps_data(ebpf_module_t *em, struct ebpf_target *root)
ebpf_write_end_chart();
}
}
+ pthread_mutex_unlock(&collect_data_mutex);
}
/**
@@ -1234,52 +1214,41 @@ static void vfs_apps_accumulator(netdata_publish_vfs_t *out, int maps_per_core)
}
/**
- * Fill PID
- *
- * Fill PID structures
- *
- * @param current_pid pid that we are collecting data
- * @param out values read from hash tables;
- */
-static void vfs_fill_pid(uint32_t current_pid, netdata_publish_vfs_t *publish)
-{
- netdata_publish_vfs_t *curr = vfs_pid[current_pid];
- if (!curr) {
- curr = ebpf_vfs_get();
- vfs_pid[current_pid] = curr;
- }
-
- memcpy(curr, &publish[0], sizeof(netdata_publish_vfs_t));
-}
-
-/**
* Read the hash table and store data to allocated vectors.
*/
-static void ebpf_vfs_read_apps(int maps_per_core)
+static void ebpf_vfs_read_apps(int maps_per_core, int max_period)
{
- struct ebpf_pid_stat *pids = ebpf_root_of_pids;
netdata_publish_vfs_t *vv = vfs_vector;
int fd = vfs_maps[NETDATA_VFS_PID].map_fd;
size_t length = sizeof(netdata_publish_vfs_t);
if (maps_per_core)
length *= ebpf_nprocs;
- while (pids) {
- uint32_t key = pids->pid;
-
+ uint32_t key = 0, next_key = 0;
+ while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
if (bpf_map_lookup_elem(fd, &key, vv)) {
- pids = pids->next;
- continue;
+ goto end_vfs_loop;
}
vfs_apps_accumulator(vv, maps_per_core);
- vfs_fill_pid(key, vv);
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(key, vv->tgid);
+ if (!local_pid)
+ goto end_vfs_loop;
+
+ netdata_publish_vfs_t *publish = &local_pid->vfs;
+ if (!publish->ct || publish->ct != vv->ct) {
+ memcpy(publish, vv, sizeof(netdata_publish_vfs_t));
+ local_pid->not_updated = 0;
+ } else if (++local_pid->not_updated >= max_period){
+ bpf_map_delete_elem(fd, &key);
+ local_pid->not_updated = 0;
+ }
+end_vfs_loop:
// We are cleaning to avoid passing data read from one process to other.
memset(vv, 0, length);
-
- pids = pids->next;
+ key = next_key;
}
}
@@ -1290,32 +1259,20 @@ static void ebpf_vfs_read_apps(int maps_per_core)
*
* @param maps_per_core do I need to read all cores?
*/
-static void read_update_vfs_cgroup(int maps_per_core)
+static void read_update_vfs_cgroup()
{
ebpf_cgroup_target_t *ect ;
- netdata_publish_vfs_t *vv = vfs_vector;
- int fd = vfs_maps[NETDATA_VFS_PID].map_fd;
- size_t length = sizeof(netdata_publish_vfs_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
pthread_mutex_lock(&mutex_cgroup_shm);
for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
struct pid_on_target2 *pids;
for (pids = ect->pids; pids; pids = pids->next) {
int pid = pids->pid;
netdata_publish_vfs_t *out = &pids->vfs;
- if (likely(vfs_pid) && vfs_pid[pid]) {
- netdata_publish_vfs_t *in = vfs_pid[pid];
+ ebpf_pid_stat_t *local_pid = ebpf_get_pid_entry(pid, 0);
+ if (local_pid) {
+ netdata_publish_vfs_t *in = &local_pid->vfs;
memcpy(out, in, sizeof(netdata_publish_vfs_t));
- } else {
- memset(vv, 0, length);
- if (!bpf_map_lookup_elem(fd, &pid, vv)) {
- vfs_apps_accumulator(vv, maps_per_core);
-
- memcpy(out, vv, sizeof(netdata_publish_vfs_t));
- }
}
}
}
@@ -1400,88 +1357,88 @@ static void ebpf_vfs_sum_cgroup_pids(netdata_publish_vfs_t *vfs, struct pid_on_t
static void ebpf_create_specific_vfs_charts(char *type, ebpf_module_t *em)
{
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_DELETED,"Files deleted",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_UNLINK_CONTEXT,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_CGROUP_VFS_UNLINK_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5500,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK],
1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "Write to disk",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_WRITE_CONTEXT,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_CGROUP_VFS_WRITE_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5501,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE],
1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "Fails to write",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_WRITE_ERROR_CONTEXT,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_CGROUP_VFS_WRITE_ERROR_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5502,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE],
1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
}
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "Read from disk",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_READ_CONTEXT,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_CGROUP_VFS_READ_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5503,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "Fails to read",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_READ_ERROR_CONTEXT,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_CGROUP_VFS_READ_ERROR_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5504,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
}
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "Bytes written on disk",
- EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_WRITE_BYTES_CONTEXT,
+ EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_GROUP, NETDATA_CGROUP_VFS_WRITE_BYTES_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5505,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE],
1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "Bytes read from disk",
- EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_READ_BYTES_CONTEXT,
+ EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_GROUP, NETDATA_CGROUP_VFS_READ_BYTES_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5506,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls to vfs_fsync.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_FSYNC_CONTEXT,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_CGROUP_VFS_FSYNC_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5507,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC],
1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "Sync error",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_FSYNC_ERROR_CONTEXT,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_CGROUP_VFS_FSYNC_ERROR_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5508,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC],
1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
}
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls to vfs_open.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_OPEN_CONTEXT,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_CGROUP_VFS_OPEN_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5509,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN],
1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "Open error",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_OPEN_ERROR_CONTEXT,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_CGROUP_VFS_OPEN_ERROR_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5510,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN],
1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
}
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls to vfs_create.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_CREATE_CONTEXT,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_CGROUP_VFS_CREATE_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5511,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
if (em->mode < MODE_ENTRY) {
ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "Create error",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_CREATE_ERROR_CONTEXT,
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP, NETDATA_CGROUP_VFS_CREATE_ERROR_CONTEXT,
NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5512,
ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
@@ -1668,93 +1625,235 @@ static void ebpf_send_specific_vfs_data(char *type, netdata_publish_vfs_t *value
**/
static void ebpf_create_systemd_vfs_charts(ebpf_module_t *em)
{
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_DELETED, "Files deleted",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20065,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_UNLINK_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
-
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "Write to disk",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20066,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_WRITE_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+ static ebpf_systemd_args_t data_vfs_unlink = {
+ .title = "Files deleted",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_VFS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20065,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_VFS_UNLINK_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_VFS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_FILE_DELETED,
+ .dimension = "calls"
+ };
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "Fails to write",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20067,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_VFS_WRITE_ERROR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
- }
+ static ebpf_systemd_args_t data_vfs_write = {
+ .title = "Write to disk",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_VFS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20066,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_VFS_WRITE_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_VFS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS,
+ .dimension = "calls"
+ };
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "Read from disk",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20068,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_READ_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+ static ebpf_systemd_args_t data_vfs_write_err = {
+ .title = "Fails to write",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_VFS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20067,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_VFS_WRITE_ERROR_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_VFS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR,
+ .dimension = "calls"
+ };
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "Fails to read",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20069,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- NETDATA_SYSTEMD_VFS_READ_ERROR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
- }
+ static ebpf_systemd_args_t data_vfs_read = {
+ .title = "Read from disk",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_VFS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20068,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_VFS_READ_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_VFS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_VFS_READ_CALLS,
+ .dimension = "calls"
+ };
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "Bytes written on disk",
- EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20070,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_WRITE_BYTES_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+ static ebpf_systemd_args_t data_vfs_read_err = {
+ .title = "Fails to read",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_VFS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20069,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_VFS_READ_ERROR_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_VFS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR,
+ .dimension = "calls"
+ };
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "Bytes read from disk",
- EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20071,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_READ_BYTES_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+ static ebpf_systemd_args_t data_vfs_write_bytes = {
+ .title = "Bytes written on disk",
+ .units = EBPF_COMMON_DIMENSION_BYTES,
+ .family = NETDATA_VFS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20070,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_VFS_WRITE_BYTES_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_VFS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES,
+ .dimension = "bytes"
+ };
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls to vfs_fsync.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20072,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_FSYNC_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+ static ebpf_systemd_args_t data_vfs_read_bytes = {
+ .title = "Bytes read from disk",
+ .units = EBPF_COMMON_DIMENSION_BYTES,
+ .family = NETDATA_VFS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20071,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_VFS_READ_BYTES_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_VFS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_VFS_READ_BYTES,
+ .dimension = "bytes"
+ };
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "Sync error",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20073,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_FSYNC_ERROR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
- }
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls to vfs_open.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20074,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_OPEN_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+ static ebpf_systemd_args_t data_vfs_fsync = {
+ .title = "Calls to vfs_fsync.",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_VFS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20072,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_VFS_FSYNC_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_VFS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_VFS_FSYNC,
+ .dimension = "calls"
+ };
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "Open error",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20075,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_OPEN_ERROR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
- }
+ static ebpf_systemd_args_t data_vfs_fsync_err = {
+ .title = "Sync error",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_VFS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20073,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_VFS_FSYNC_ERROR_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_VFS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR,
+ .dimension = "calls"
+ };
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls to vfs_create.",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20076,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_CREATE_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+ static ebpf_systemd_args_t data_vfs_open = {
+ .title = "Calls to vfs_open.",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_VFS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20074,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_VFS_OPEN_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_VFS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_VFS_OPEN,
+ .dimension = "calls"
+ };
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "Create error",
- EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED, 20077,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_CREATE_ERROR_CONTEXT,
- NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+ static ebpf_systemd_args_t data_vfs_open_err = {
+ .title = "Open error",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_VFS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20075,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_VFS_OPEN_ERROR_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_VFS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR,
+ .dimension = "calls"
+ };
+
+ static ebpf_systemd_args_t data_vfs_create = {
+ .title = "Calls to vfs_create.",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_VFS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20076,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_VFS_CREATE_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_VFS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_VFS_CREATE,
+ .dimension = "calls"
+ };
+
+ static ebpf_systemd_args_t data_vfs_create_err = {
+ .title = "Create error",
+ .units = EBPF_COMMON_DIMENSION_CALL,
+ .family = NETDATA_VFS_GROUP,
+ .charttype = NETDATA_EBPF_CHART_TYPE_STACKED,
+ .order = 20077,
+ .algorithm = EBPF_CHART_ALGORITHM_INCREMENTAL,
+ .context = NETDATA_SYSTEMD_VFS_CREATE_ERROR_CONTEXT,
+ .module = NETDATA_EBPF_MODULE_NAME_VFS,
+ .update_every = 0,
+ .suffix = NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR,
+ .dimension = "calls"
+ };
+
+ if (!data_vfs_create.update_every)
+ data_vfs_unlink.update_every = data_vfs_write.update_every = data_vfs_write_err.update_every =
+ data_vfs_read.update_every = data_vfs_read_err.update_every = data_vfs_write_bytes.update_every =
+ data_vfs_read_bytes.update_every = data_vfs_fsync.update_every = data_vfs_fsync_err.update_every =
+ data_vfs_open.update_every = data_vfs_open_err.update_every = data_vfs_create.update_every =
+ data_vfs_create_err.update_every = em->update_every;
+
+ ebpf_cgroup_target_t *w;
+ for (w = ebpf_cgroup_pids; w ; w = w->next) {
+ if (unlikely(!w->systemd || w->flags & NETDATA_EBPF_SERVICES_HAS_VFS_CHART))
+ continue;
+
+ data_vfs_unlink.id = data_vfs_write.id = data_vfs_write_err.id =
+ data_vfs_read.id = data_vfs_read_err.id = data_vfs_write_bytes.id = data_vfs_read_bytes.id =
+ data_vfs_fsync.id = data_vfs_fsync_err.id = data_vfs_open.id =
+ data_vfs_open_err.id = data_vfs_create.id = data_vfs_create_err.id = w->name;
+ ebpf_create_charts_on_systemd(&data_vfs_unlink);
+
+ ebpf_create_charts_on_systemd(&data_vfs_write);
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(&data_vfs_write_err);
+ }
+
+ ebpf_create_charts_on_systemd(&data_vfs_read);
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(&data_vfs_read_err);
+ }
+
+ ebpf_create_charts_on_systemd(&data_vfs_write_bytes);
+ ebpf_create_charts_on_systemd(&data_vfs_read_bytes);
+
+ ebpf_create_charts_on_systemd(&data_vfs_fsync);
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(&data_vfs_fsync_err);
+ }
+
+ ebpf_create_charts_on_systemd(&data_vfs_open);
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(&data_vfs_open_err);
+ }
+
+ ebpf_create_charts_on_systemd(&data_vfs_create);
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(&data_vfs_create_err);
+ }
+
+ w->flags |= NETDATA_EBPF_SERVICES_HAS_VFS_CHART;
}
}
@@ -1768,124 +1867,78 @@ static void ebpf_create_systemd_vfs_charts(ebpf_module_t *em)
static void ebpf_send_systemd_vfs_charts(ebpf_module_t *em)
{
ebpf_cgroup_target_t *ect;
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED, "");
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.unlink_call);
+ if (unlikely(!(ect->flags & NETDATA_EBPF_SERVICES_HAS_VFS_CHART)) ) {
+ continue;
}
- }
- ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.write_call +
- ect->publish_systemd_vfs.writev_call);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_FILE_DELETED);
+ write_chart_dimension("calls", ect->publish_systemd_vfs.unlink_call);
+ ebpf_write_end_chart();
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.write_err +
- ect->publish_systemd_vfs.writev_err);
- }
- }
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS);
+ write_chart_dimension("calls", ect->publish_systemd_vfs.write_call +
+ ect->publish_systemd_vfs.writev_call);
ebpf_write_end_chart();
- }
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.read_call +
- ect->publish_systemd_vfs.readv_call);
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR);
+ write_chart_dimension("calls", ect->publish_systemd_vfs.write_err +
+ ect->publish_systemd_vfs.writev_err);
+ ebpf_write_end_chart();
}
- }
- ebpf_write_end_chart();
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.read_err +
- ect->publish_systemd_vfs.readv_err);
- }
- }
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_VFS_READ_CALLS);
+ write_chart_dimension("calls", ect->publish_systemd_vfs.read_call +
+ ect->publish_systemd_vfs.readv_call);
ebpf_write_end_chart();
- }
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.write_bytes +
- ect->publish_systemd_vfs.writev_bytes);
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR);
+ write_chart_dimension("calls", ect->publish_systemd_vfs.read_err +
+ ect->publish_systemd_vfs.readv_err);
+ ebpf_write_end_chart();
}
- }
- ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.read_bytes +
- ect->publish_systemd_vfs.readv_bytes);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES);
+ write_chart_dimension("bytes", ect->publish_systemd_vfs.write_bytes +
+ ect->publish_systemd_vfs.writev_bytes);
+ ebpf_write_end_chart();
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.fsync_call);
- }
- }
- ebpf_write_end_chart();
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_VFS_READ_BYTES);
+ write_chart_dimension("bytes", ect->publish_systemd_vfs.read_bytes +
+ ect->publish_systemd_vfs.readv_bytes);
+ ebpf_write_end_chart();
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.fsync_err);
- }
- }
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_VFS_FSYNC);
+ write_chart_dimension("calls", ect->publish_systemd_vfs.fsync_call);
ebpf_write_end_chart();
- }
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.open_call);
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR);
+ write_chart_dimension("calls", ect->publish_systemd_vfs.fsync_err);
+ ebpf_write_end_chart();
}
- }
- ebpf_write_end_chart();
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.open_err);
- }
- }
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_VFS_OPEN);
+ write_chart_dimension("calls", ect->publish_systemd_vfs.open_call);
ebpf_write_end_chart();
- }
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.create_call);
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR);
+ write_chart_dimension("calls", ect->publish_systemd_vfs.open_err);
+ ebpf_write_end_chart();
}
- }
- ebpf_write_end_chart();
- if (em->mode < MODE_ENTRY) {
- ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "");
- for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
- if (unlikely(ect->systemd) && unlikely(ect->updated)) {
- write_chart_dimension(ect->name, ect->publish_systemd_vfs.create_err);
- }
- }
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_VFS_CREATE);
+ write_chart_dimension("calls", ect->publish_systemd_vfs.create_call);
ebpf_write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_begin_chart(NETDATA_SERVICE_FAMILY, ect->name, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR);
+ write_chart_dimension("calls", ect->publish_systemd_vfs.create_err);
+ ebpf_write_end_chart();
+ }
}
}
@@ -1896,17 +1949,13 @@ static void ebpf_send_systemd_vfs_charts(ebpf_module_t *em)
*/
static void ebpf_vfs_send_cgroup_data(ebpf_module_t *em)
{
- if (!ebpf_cgroup_pids)
- return;
-
pthread_mutex_lock(&mutex_cgroup_shm);
ebpf_cgroup_target_t *ect;
for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
ebpf_vfs_sum_cgroup_pids(&ect->publish_systemd_vfs, ect->pids);
}
- int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
- if (has_systemd) {
+ if (shm_ebpf_cgroup.header->systemd_enabled) {
if (send_cgroup_chart) {
ebpf_create_systemd_vfs_charts(em);
}
@@ -1936,6 +1985,72 @@ static void ebpf_vfs_send_cgroup_data(ebpf_module_t *em)
}
/**
+ * Resume apps data
+ */
+void ebpf_vfs_resume_apps_data() {
+ struct ebpf_target *w;
+ for (w = apps_groups_root_target; w; w = w->next) {
+ if (unlikely(!(w->charts_created & (1 << EBPF_MODULE_VFS_IDX))))
+ continue;
+
+ ebpf_vfs_sum_pids(&w->vfs, w->root_pid);
+ }
+}
+
+/**
+ * VFS thread
+ *
+ * Thread used to generate charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_read_vfs_thread(void *ptr)
+{
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+
+ int maps_per_core = em->maps_per_core;
+ int update_every = em->update_every;
+
+ int counter = update_every - 1;
+
+ uint32_t lifetime = em->lifetime;
+ uint32_t running_time = 0;
+ usec_t period = update_every * USEC_PER_SEC;
+ int max_period = update_every * EBPF_CLEANUP_FACTOR;
+ while (!ebpf_plugin_exit && running_time < lifetime) {
+ (void)heartbeat_next(&hb, period);
+ if (ebpf_plugin_exit || ++counter != update_every)
+ continue;
+
+ netdata_thread_disable_cancelability();
+
+ pthread_mutex_lock(&collect_data_mutex);
+ ebpf_vfs_read_apps(maps_per_core, max_period);
+ ebpf_vfs_resume_apps_data();
+ pthread_mutex_unlock(&collect_data_mutex);
+
+ counter = 0;
+
+ pthread_mutex_lock(&ebpf_exit_cleanup);
+ if (running_time && !em->running_time)
+ running_time = update_every;
+ else
+ running_time += update_every;
+
+ em->running_time = running_time;
+ pthread_mutex_unlock(&ebpf_exit_cleanup);
+ netdata_thread_enable_cancelability();
+ }
+
+ return NULL;
+}
+
+/**
* Main loop for this collector.
*
* @param step the number of microseconds used with heart beat
@@ -1961,31 +2076,22 @@ static void vfs_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
ebpf_vfs_read_global_table(stats, maps_per_core);
- pthread_mutex_lock(&collect_data_mutex);
- if (apps)
- ebpf_vfs_read_apps(maps_per_core);
if (cgroups)
- read_update_vfs_cgroup(maps_per_core);
+ read_update_vfs_cgroup();
pthread_mutex_lock(&lock);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_vfs_pid)
- ebpf_send_data_aral_chart(ebpf_aral_vfs_pid, em);
-#endif
-
ebpf_vfs_send_data(em);
fflush(stdout);
if (apps & NETDATA_EBPF_APPS_FLAG_CHART_CREATED)
ebpf_vfs_send_apps_data(em, apps_groups_root_target);
- if (cgroups)
+ if (cgroups && shm_ebpf_cgroup.header && ebpf_cgroup_pids)
ebpf_vfs_send_cgroup_data(em);
pthread_mutex_unlock(&lock);
- pthread_mutex_unlock(&collect_data_mutex);
pthread_mutex_lock(&ebpf_exit_cleanup);
if (running_time && !em->running_time)
@@ -2412,13 +2518,9 @@ void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr)
*
* @param apps is apps enabled?
*/
-static void ebpf_vfs_allocate_global_vectors(int apps)
+static void ebpf_vfs_allocate_global_vectors()
{
- if (apps) {
- ebpf_vfs_aral_init();
- vfs_pid = callocz((size_t)pid_max, sizeof(netdata_publish_vfs_t *));
- vfs_vector = callocz(ebpf_nprocs, sizeof(netdata_publish_vfs_t));
- }
+ vfs_vector = callocz(ebpf_nprocs, sizeof(netdata_publish_vfs_t));
memset(vfs_aggregated_data, 0, sizeof(vfs_aggregated_data));
memset(vfs_publish_aggregated, 0, sizeof(vfs_publish_aggregated));
@@ -2484,7 +2586,7 @@ void *ebpf_vfs_thread(void *ptr)
ebpf_update_pid_table(&vfs_maps[NETDATA_VFS_PID], em);
- ebpf_vfs_allocate_global_vectors(em->apps_charts);
+ ebpf_vfs_allocate_global_vectors();
#ifdef LIBBPF_MAJOR_VERSION
ebpf_adjust_thread_load(em, default_btf);
@@ -2505,13 +2607,16 @@ void *ebpf_vfs_thread(void *ptr)
ebpf_create_global_charts(em);
ebpf_update_stats(&plugin_statistics, em);
ebpf_update_kernel_memory_with_vector(&plugin_statistics, em->maps, EBPF_ACTION_STAT_ADD);
-#ifdef NETDATA_DEV_MODE
- if (ebpf_aral_vfs_pid)
- vfs_disable_priority = ebpf_statistic_create_aral_chart(NETDATA_EBPF_VFS_ARAL_NAME, em);
-#endif
pthread_mutex_unlock(&lock);
+ ebpf_read_vfs.thread = mallocz(sizeof(netdata_thread_t));
+ netdata_thread_create(ebpf_read_vfs.thread,
+ ebpf_read_vfs.name,
+ NETDATA_THREAD_OPTION_DEFAULT,
+ ebpf_read_vfs_thread,
+ em);
+
vfs_collector(em);
endvfs:
diff --git a/collectors/ebpf.plugin/ebpf_vfs.h b/src/collectors/ebpf.plugin/ebpf_vfs.h
index 8fe12a7eb..89364a2b5 100644
--- a/collectors/ebpf.plugin/ebpf_vfs.h
+++ b/src/collectors/ebpf.plugin/ebpf_vfs.h
@@ -39,7 +39,6 @@
// Group used on Dashboard
#define NETDATA_VFS_GROUP "vfs"
-#define NETDATA_VFS_CGROUP_GROUP "vfs (eBPF)"
// Contexts
#define NETDATA_CGROUP_VFS_UNLINK_CONTEXT "cgroup.vfs_unlink"
@@ -56,27 +55,29 @@
#define NETDATA_CGROUP_VFS_FSYNC_CONTEXT "cgroup.vfs_fsync"
#define NETDATA_CGROUP_VFS_FSYNC_ERROR_CONTEXT "cgroup.vfs_fsync_error"
-#define NETDATA_SYSTEMD_VFS_UNLINK_CONTEXT "services.vfs_unlink"
-#define NETDATA_SYSTEMD_VFS_WRITE_CONTEXT "services.vfs_write"
-#define NETDATA_SYSTEMD_VFS_WRITE_ERROR_CONTEXT "services.vfs_write_error"
-#define NETDATA_SYSTEMD_VFS_READ_CONTEXT "services.vfs_read"
-#define NETDATA_SYSTEMD_VFS_READ_ERROR_CONTEXT "services.vfs_read_error"
-#define NETDATA_SYSTEMD_VFS_WRITE_BYTES_CONTEXT "services.vfs_write_bytes"
-#define NETDATA_SYSTEMD_VFS_READ_BYTES_CONTEXT "services.vfs_read_bytes"
-#define NETDATA_SYSTEMD_VFS_CREATE_CONTEXT "services.vfs_create"
-#define NETDATA_SYSTEMD_VFS_CREATE_ERROR_CONTEXT "services.vfs_create_error"
-#define NETDATA_SYSTEMD_VFS_OPEN_CONTEXT "services.vfs_open"
-#define NETDATA_SYSTEMD_VFS_OPEN_ERROR_CONTEXT "services.vfs_open_error"
-#define NETDATA_SYSTEMD_VFS_FSYNC_CONTEXT "services.vfs_fsync"
-#define NETDATA_SYSTEMD_VFS_FSYNC_ERROR_CONTEXT "services.vfs_fsync_error"
+#define NETDATA_SYSTEMD_VFS_UNLINK_CONTEXT "systemd.services.vfs_unlink"
+#define NETDATA_SYSTEMD_VFS_WRITE_CONTEXT "systemd.services.vfs_write"
+#define NETDATA_SYSTEMD_VFS_WRITE_ERROR_CONTEXT "systemd.services.vfs_write_error"
+#define NETDATA_SYSTEMD_VFS_READ_CONTEXT "systemd.services.vfs_read"
+#define NETDATA_SYSTEMD_VFS_READ_ERROR_CONTEXT "systemd.services.vfs_read_error"
+#define NETDATA_SYSTEMD_VFS_WRITE_BYTES_CONTEXT "systemd.services.vfs_write_bytes"
+#define NETDATA_SYSTEMD_VFS_READ_BYTES_CONTEXT "systemd.services.vfs_read_bytes"
+#define NETDATA_SYSTEMD_VFS_CREATE_CONTEXT "systemd.services.vfs_create"
+#define NETDATA_SYSTEMD_VFS_CREATE_ERROR_CONTEXT "systemd.services.vfs_create_error"
+#define NETDATA_SYSTEMD_VFS_OPEN_CONTEXT "systemd.services.vfs_open"
+#define NETDATA_SYSTEMD_VFS_OPEN_ERROR_CONTEXT "systemd.services.vfs_open_error"
+#define NETDATA_SYSTEMD_VFS_FSYNC_CONTEXT "systemd.services.vfs_fsync"
+#define NETDATA_SYSTEMD_VFS_FSYNC_ERROR_CONTEXT "systemd.services.vfs_fsync_error"
// ARAL name
#define NETDATA_EBPF_VFS_ARAL_NAME "ebpf_vfs"
typedef struct netdata_publish_vfs {
- uint64_t pid_tgid;
- uint32_t pid;
- uint32_t pad;
+ uint64_t ct;
+ uint32_t tgid;
+ uint32_t uid;
+ uint32_t gid;
+ char name[TASK_COMM_LEN];
//Counter
uint32_t write_call;
diff --git a/collectors/ebpf.plugin/integrations/ebpf_cachestat.md b/src/collectors/ebpf.plugin/integrations/ebpf_cachestat.md
index 5bf0a3774..f56cd9533 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_cachestat.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_cachestat.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_cachestat.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_cachestat.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF Cachestat"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -146,7 +146,7 @@ The configuration file name for this integration is `ebpf.d/cachestat.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/collectors/ebpf.plugin/integrations/ebpf_dcstat.md b/src/collectors/ebpf.plugin/integrations/ebpf_dcstat.md
index 4c5719026..97f562338 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_dcstat.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_dcstat.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_dcstat.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_dcstat.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF DCstat"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -144,7 +144,7 @@ The configuration file name for this integration is `ebpf.d/dcstat.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/collectors/ebpf.plugin/integrations/ebpf_disk.md b/src/collectors/ebpf.plugin/integrations/ebpf_disk.md
index 557da125d..5ea848d5c 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_disk.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_disk.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_disk.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_disk.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF Disk"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -110,7 +110,7 @@ The configuration file name for this integration is `ebpf.d/disk.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md b/src/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md
index 23f5bd26e..684c84efe 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_filedescriptor.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF Filedescriptor"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -144,7 +144,7 @@ The configuration file name for this integration is `ebpf.d/fd.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/collectors/ebpf.plugin/integrations/ebpf_filesystem.md b/src/collectors/ebpf.plugin/integrations/ebpf_filesystem.md
index 7a1bb832b..398e247c4 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_filesystem.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_filesystem.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_filesystem.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_filesystem.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF Filesystem"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -131,7 +131,7 @@ The configuration file name for this integration is `ebpf.d/filesystem.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/collectors/ebpf.plugin/integrations/ebpf_hardirq.md b/src/collectors/ebpf.plugin/integrations/ebpf_hardirq.md
index f9b529624..c59240bf3 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_hardirq.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_hardirq.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_hardirq.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_hardirq.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF Hardirq"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -110,7 +110,7 @@ The configuration file name for this integration is `ebpf.d/hardirq.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/collectors/ebpf.plugin/integrations/ebpf_mdflush.md b/src/collectors/ebpf.plugin/integrations/ebpf_mdflush.md
index 0081b7d83..ce33f9264 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_mdflush.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_mdflush.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_mdflush.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_mdflush.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF MDflush"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -105,7 +105,7 @@ The configuration file name for this integration is `ebpf.d/mdflush.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/collectors/ebpf.plugin/integrations/ebpf_mount.md b/src/collectors/ebpf.plugin/integrations/ebpf_mount.md
index d19e57809..f3bd8dff8 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_mount.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_mount.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_mount.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_mount.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF Mount"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -111,7 +111,7 @@ The configuration file name for this integration is `ebpf.d/mount.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/collectors/ebpf.plugin/integrations/ebpf_oomkill.md b/src/collectors/ebpf.plugin/integrations/ebpf_oomkill.md
index 897cddfac..ff912ae7b 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_oomkill.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_oomkill.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_oomkill.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_oomkill.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF OOMkill"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -127,7 +127,7 @@ The configuration file name for this integration is `ebpf.d/oomkill.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/collectors/ebpf.plugin/integrations/ebpf_process.md b/src/collectors/ebpf.plugin/integrations/ebpf_process.md
index 109890139..df9b5c3d2 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_process.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_process.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_process.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_process.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF Process"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
diff --git a/collectors/ebpf.plugin/integrations/ebpf_processes.md b/src/collectors/ebpf.plugin/integrations/ebpf_processes.md
index 62542359a..032e1f648 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_processes.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_processes.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_processes.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_processes.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF Processes"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -154,7 +154,7 @@ The configuration file name for this integration is `ebpf.d/process.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/collectors/ebpf.plugin/integrations/ebpf_shm.md b/src/collectors/ebpf.plugin/integrations/ebpf_shm.md
index ffa05c770..5ac0c501f 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_shm.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_shm.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_shm.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_shm.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF SHM"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -148,7 +148,7 @@ The configuration file name for this integration is `ebpf.d/shm.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/collectors/ebpf.plugin/integrations/ebpf_socket.md b/src/collectors/ebpf.plugin/integrations/ebpf_socket.md
index dc7a7d07b..8a0286b47 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_socket.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_socket.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_socket.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_socket.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF Socket"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -165,7 +165,7 @@ The configuration file name for this integration is `ebpf.d/network.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/collectors/ebpf.plugin/integrations/ebpf_softirq.md b/src/collectors/ebpf.plugin/integrations/ebpf_softirq.md
index 6a4312c6e..8429aac29 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_softirq.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_softirq.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_softirq.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_softirq.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF SoftIRQ"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -110,7 +110,7 @@ The configuration file name for this integration is `ebpf.d/softirq.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/collectors/ebpf.plugin/integrations/ebpf_swap.md b/src/collectors/ebpf.plugin/integrations/ebpf_swap.md
index ce2423f8d..57f435170 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_swap.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_swap.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_swap.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_swap.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF SWAP"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -137,7 +137,7 @@ The configuration file name for this integration is `ebpf.d/swap.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/collectors/ebpf.plugin/integrations/ebpf_sync.md b/src/collectors/ebpf.plugin/integrations/ebpf_sync.md
index 6f6c246a7..dd8a399a2 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_sync.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_sync.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_sync.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_sync.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF Sync"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -80,7 +80,7 @@ The following alerts are available:
| Alert name | On metric | Description |
|:------------|:----------|:------------|
-| [ sync_freq ](https://github.com/netdata/netdata/blob/master/health/health.d/synchronization.conf) | mem.sync | number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the underlying filesystems. |
+| [ sync_freq ](https://github.com/netdata/netdata/blob/master/src/health/health.d/synchronization.conf) | mem.sync | number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the underlying filesystems. |
## Setup
@@ -118,7 +118,7 @@ The configuration file name for this integration is `ebpf.d/sync.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata
diff --git a/collectors/ebpf.plugin/integrations/ebpf_vfs.md b/src/collectors/ebpf.plugin/integrations/ebpf_vfs.md
index 4b824e975..dd182115d 100644
--- a/collectors/ebpf.plugin/integrations/ebpf_vfs.md
+++ b/src/collectors/ebpf.plugin/integrations/ebpf_vfs.md
@@ -1,9 +1,9 @@
<!--startmeta
-custom_edit_url: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/integrations/ebpf_vfs.md"
-meta_yaml: "https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/metadata.yaml"
+custom_edit_url: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/integrations/ebpf_vfs.md"
+meta_yaml: "https://github.com/netdata/netdata/edit/master/src/collectors/ebpf.plugin/metadata.yaml"
sidebar_label: "eBPF VFS"
learn_status: "Published"
-learn_rel_path: "Data Collection/eBPF"
+learn_rel_path: "Collecting Metrics/eBPF"
most_popular: False
message: "DO NOT EDIT THIS FILE DIRECTLY, IT IS GENERATED BY THE COLLECTOR'S metadata.yaml FILE"
endmeta-->
@@ -179,7 +179,7 @@ The configuration file name for this integration is `ebpf.d/vfs.conf`.
You can edit the configuration file using the `edit-config` script from the
-Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/configure/nodes.md#the-netdata-config-directory).
+Netdata [config directory](https://github.com/netdata/netdata/blob/master/docs/netdata-agent/configuration.md#the-netdata-config-directory).
```bash
cd /etc/netdata 2>/dev/null || cd /opt/netdata/etc/netdata