summaryrefslogtreecommitdiffstats
path: root/collectors/ebpf.plugin
diff options
context:
space:
mode:
Diffstat (limited to 'collectors/ebpf.plugin')
-rw-r--r--collectors/ebpf.plugin/Makefile.am20
-rw-r--r--collectors/ebpf.plugin/README.md722
-rw-r--r--collectors/ebpf.plugin/ebpf.c1002
-rw-r--r--collectors/ebpf.plugin/ebpf.d.conf44
-rw-r--r--collectors/ebpf.plugin/ebpf.d/cachestat.conf21
-rw-r--r--collectors/ebpf.plugin/ebpf.d/dcstat.conf18
-rw-r--r--collectors/ebpf.plugin/ebpf.d/disk.conf9
-rw-r--r--collectors/ebpf.plugin/ebpf.d/fd.conf19
-rw-r--r--collectors/ebpf.plugin/ebpf.d/filesystem.conf20
-rw-r--r--collectors/ebpf.plugin/ebpf.d/hardirq.conf8
-rw-r--r--collectors/ebpf.plugin/ebpf.d/mdflush.conf7
-rw-r--r--collectors/ebpf.plugin/ebpf.d/mount.conf8
-rw-r--r--collectors/ebpf.plugin/ebpf.d/network.conf14
-rw-r--r--collectors/ebpf.plugin/ebpf.d/oomkill.conf7
-rw-r--r--collectors/ebpf.plugin/ebpf.d/process.conf21
-rw-r--r--collectors/ebpf.plugin/ebpf.d/shm.conf24
-rw-r--r--collectors/ebpf.plugin/ebpf.d/softirq.conf8
-rw-r--r--collectors/ebpf.plugin/ebpf.d/swap.conf17
-rw-r--r--collectors/ebpf.plugin/ebpf.d/sync.conf16
-rw-r--r--collectors/ebpf.plugin/ebpf.d/vfs.conf17
-rw-r--r--collectors/ebpf.plugin/ebpf.h67
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.c57
-rw-r--r--collectors/ebpf.plugin/ebpf_apps.h54
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.c483
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.h19
-rw-r--r--collectors/ebpf.plugin/ebpf_cgroup.c348
-rw-r--r--collectors/ebpf.plugin/ebpf_cgroup.h70
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.c495
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.h15
-rw-r--r--collectors/ebpf.plugin/ebpf_disk.c842
-rw-r--r--collectors/ebpf.plugin/ebpf_disk.h78
-rw-r--r--collectors/ebpf.plugin/ebpf_fd.c865
-rw-r--r--collectors/ebpf.plugin/ebpf_fd.h85
-rw-r--r--collectors/ebpf.plugin/ebpf_filesystem.c661
-rw-r--r--collectors/ebpf.plugin/ebpf_filesystem.h68
-rw-r--r--collectors/ebpf.plugin/ebpf_hardirq.c494
-rw-r--r--collectors/ebpf.plugin/ebpf_hardirq.h73
-rw-r--r--collectors/ebpf.plugin/ebpf_mdflush.c312
-rw-r--r--collectors/ebpf.plugin/ebpf_mdflush.h42
-rw-r--r--collectors/ebpf.plugin/ebpf_mount.c260
-rw-r--r--collectors/ebpf.plugin/ebpf_mount.h36
-rw-r--r--collectors/ebpf.plugin/ebpf_oomkill.c400
-rw-r--r--collectors/ebpf.plugin/ebpf_oomkill.h29
-rw-r--r--collectors/ebpf.plugin/ebpf_process.c1127
-rw-r--r--collectors/ebpf.plugin/ebpf_process.h115
-rw-r--r--collectors/ebpf.plugin/ebpf_shm.c855
-rw-r--r--collectors/ebpf.plugin/ebpf_shm.h63
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.c777
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.h38
-rw-r--r--collectors/ebpf.plugin/ebpf_softirq.c273
-rw-r--r--collectors/ebpf.plugin/ebpf_softirq.h34
-rw-r--r--collectors/ebpf.plugin/ebpf_swap.c698
-rw-r--r--collectors/ebpf.plugin/ebpf_swap.h53
-rw-r--r--collectors/ebpf.plugin/ebpf_sync.c75
-rw-r--r--collectors/ebpf.plugin/ebpf_sync.h6
-rw-r--r--collectors/ebpf.plugin/ebpf_vfs.c1601
-rw-r--r--collectors/ebpf.plugin/ebpf_vfs.h151
-rw-r--r--collectors/ebpf.plugin/reset_netdata_trace.sh.in9
58 files changed, 12452 insertions, 1298 deletions
diff --git a/collectors/ebpf.plugin/Makefile.am b/collectors/ebpf.plugin/Makefile.am
index 18b1fc6c8..2d5f92a6b 100644
--- a/collectors/ebpf.plugin/Makefile.am
+++ b/collectors/ebpf.plugin/Makefile.am
@@ -3,10 +3,6 @@
AUTOMAKE_OPTIONS = subdir-objects
MAINTAINERCLEANFILES = $(srcdir)/Makefile.in
-CLEANFILES = \
- reset_netdata_trace.sh \
- $(NULL)
-
include $(top_srcdir)/build/subst.inc
SUFFIXES = .in
@@ -16,12 +12,7 @@ userebpfconfigdir=$(configdir)/ebpf.d
install-exec-local:
$(INSTALL) -d $(DESTDIR)$(userebpfconfigdir)
-dist_plugins_SCRIPTS = \
- reset_netdata_trace.sh \
- $(NULL)
-
dist_noinst_DATA = \
- reset_netdata_trace.sh.in \
README.md \
$(NULL)
@@ -34,7 +25,18 @@ dist_ebpfconfig_DATA = \
ebpf.d/ebpf_kernel_reject_list.txt \
ebpf.d/cachestat.conf \
ebpf.d/dcstat.conf \
+ ebpf.d/disk.conf \
+ ebpf.d/fd.conf \
+ ebpf.d/filesystem.conf \
+ ebpf.d/hardirq.conf \
+ ebpf.d/mdflush.conf \
+ ebpf.d/mount.conf \
ebpf.d/network.conf \
+ ebpf.d/oomkill.conf \
ebpf.d/process.conf \
+ ebpf.d/shm.conf \
+ ebpf.d/softirq.conf \
ebpf.d/sync.conf \
+ ebpf.d/swap.conf \
+ ebpf.d/vfs.conf \
$(NULL)
diff --git a/collectors/ebpf.plugin/README.md b/collectors/ebpf.plugin/README.md
index 1e593786b..60f1fd742 100644
--- a/collectors/ebpf.plugin/README.md
+++ b/collectors/ebpf.plugin/README.md
@@ -1,35 +1,52 @@
<!--
title: "eBPF monitoring with Netdata"
-description: "Use Netdata's extended Berkeley Packet Filter (eBPF) collector to monitor kernel-level metrics about your complex applications with per-second granularity."
+description: "Use Netdata's extended Berkeley Packet Filter (eBPF) collector to monitor kernel-level metrics about your
+complex applications with per-second granularity."
custom_edit_url: https://github.com/netdata/netdata/edit/master/collectors/ebpf.plugin/README.md
sidebar_label: "eBPF"
-->
# eBPF monitoring with Netdata
-Netdata's extended Berkeley Packet Filter (eBPF) collector monitors kernel-level metrics for file descriptors, virtual
-filesystem IO, and process management on Linux systems. You can use our eBPF collector to analyze how and when a process
-accesses files, when it makes system calls, whether it leaks memory or creating zombie processes, and more.
+eBPF consists of a wide toolchain that ultimately outputs a set of bytecode that will run inside the eBPF virtual
+machine (VM) which lives inside the Linux kernel. The program in particular is executed in response to a [tracepoint
+or kprobe](#probes-and-tracepoints) activation.
-Netdata's eBPF monitoring toolkit uses two custom eBPF programs. The default, called `entry`, monitors calls to a
-variety of kernel functions, such as `do_sys_open`, `__close_fd`, `vfs_read`, `vfs_write`, `_do_fork`, and more. The
-`return` program also monitors the return of each kernel functions to deliver more granular metrics about how your
-system and its applications interact with the Linux kernel.
+Netdata has written many eBPF programs, which, when compiled and integrated into the Netdata Agent, are able to collect
+a wide array of data about the host that would otherwise be impossible. The data eBPF programs can collect is truly unique,
+which gives the Netdata Agent access to data that is high value but normally hard to capture.
-eBPF monitoring can help you troubleshoot and debug how applications interact with the Linux kernel. See our [guide on
-troubleshooting apps with eBPF metrics](/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md) for configuration
-and troubleshooting tips.
+eBPF monitoring can help you troubleshoot and debug how applications interact with the Linux kernel. See
+our [guide on troubleshooting apps with eBPF metrics](/docs/guides/troubleshoot/monitor-debug-applications-ebpf.md) for
+configuration and troubleshooting tips.
<figure>
<img src="https://user-images.githubusercontent.com/1153921/74746434-ad6a1e00-5222-11ea-858a-a7882617ae02.png" alt="An example of VFS charts, made possible by the eBPF collector plugin" />
<figcaption>An example of VFS charts made possible by the eBPF collector plugin.</figcaption>
</figure>
-## Enable the collector on Linux
+## Probes and Tracepoints
+
+The following two features from the Linux kernel are used by Netdata to run eBPF programs:
+
+- Kprobes and return probes (kretprobe): Probes can insert virtually into any kernel instruction. When eBPF runs in
+ `entry` mode, it attaches only `kprobes` for internal functions monitoring calls and some arguments every time a
+ function is called. The user can also change configuration to use [`return`](#global) mode, and this will allow users
+ to monitor return from these functions and detect possible failures.
+- Tracepoints are hooks to call specific functions. Tracepoints are more stable than `kprobes` and are preferred when
+ both options are available.
+
+In each case, wherever a normal kprobe, kretprobe, or tracepoint would have run its hook function, an eBPF program is
+run instead, performing various collection logic before letting the kernel continue its normal control flow.
+
+There are more methods by which eBPF programs can be triggered but which are not currently supported, such as via uprobes
+which allow hooking into arbitrary user-space functions in a similar manner to kprobes.
+
+## Manually enable the collector on Linux
**The eBPF collector is installed and enabled by default on most new installations of the Agent**. The eBPF collector
-does not currently work with [static build installations](/packaging/installer/methods/kickstart-64.md), but improved
-support is in active development.
+does not currently work with [static build installations](/packaging/installer/methods/kickstart-64.md) for kernels older
+than `4.11`, but improved support is in active development.
eBPF monitoring only works on Linux systems and with specific Linux kernels, including all kernels newer than `4.11.0`,
and all kernels on CentOS 7.6 or later.
@@ -39,72 +56,403 @@ section for details.
## Charts
-The eBPF collector creates an **eBPF** menu in the Agent's dashboard along with three sub-menus: **File**, **VFS**, and
-**Process**. All the charts in this section update every second. The collector stores the actual value inside of its
-process, but charts only show the difference between the values collected in the previous and current seconds.
+The eBPF collector creates charts on different menus, like System Overview, Memory, MD arrays, Disks, Filesystem,
+Mount Points, Networking Stack, systemd Services, and Applications.
+
+The collector stores the actual value inside of its process, but charts only show the difference between the values
+collected in the previous and current seconds.
+
+### System overview
+
+Not all charts within the System Overview menu are enabled by default, because they add around 100ns overhead for each
+function call, this number is small for a human perspective, but the functions are called many times creating an impact
+on host. See the [configuration](#configuration) section for details about how to enable them.
+
+#### Processes
+
+Internally, the Linux kernel treats both processes and threads as `tasks`. To create a thread, the kernel offers a few
+system calls: `fork(2)`, `vfork(2)`, and `clone(2)`. To generate this chart, the eBPF
+collector uses the following `tracepoints` and `kprobe`:
+
+- `sched/sched_process_fork`: Tracepoint called after a call for `fork (2)`, `vfork (2)` and `clone (2)`.
+- `sched/sched_process_exec`: Tracepoint called after a exec-family syscall.
+- `kprobe/kernel_clone`: This is the main [`fork()`](https://elixir.bootlin.com/linux/v5.10/source/kernel/fork.c#L2415)
+ routine since kernel `5.10.0` was released.
+- `kprobe/_do_fork`: Like `kernel_clone`, but this was the main function between kernels `4.2.0` and `5.9.16`
+- `kprobe/do_fork`: This was the main function before kernel `4.2.0`.
+
+#### Process Exit
+
+Ending a task requires two steps. The first is a call to the internal function `do_exit`, which notifies the operating
+system that the task is finishing its work. The second step is to release the kernel information with the internal
+function `release_task`. The difference between the two dimensions can help you discover
+[zombie processes](https://en.wikipedia.org/wiki/Zombie_process). To get the metrics, the collector uses:
+
+- `sched/sched_process_exit`: Tracepoint called after a task exits.
+- `kprobe/release_task`: This function is called when a process exits, as the kernel still needs to remove the process
+ descriptor.
+
+#### Task error
+
+The functions responsible for ending tasks do not return values, so this chart contains information about failures on
+process and thread creation only.
+
+#### Swap
+
+Inside the swap submenu the eBPF plugin creates the chart `swapcalls`; this chart is displaying when processes are
+calling functions [`swap_readpage` and `swap_writepage`](https://hzliu123.github.io/linux-kernel/Page%20Cache%20in%20Linux%202.6.pdf ),
+which are functions responsible for doing IO in swap memory. To collect the exact moment that an access to swap happens,
+the collector attaches `kprobes` for cited functions.
+
+#### Soft IRQ
+
+The following `tracepoints` are used to measure time usage for soft IRQs:
+
+- [`irq/softirq_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_entry): Called
+ before softirq handler
+- [`irq/softirq_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_softirq_exit): Called when
+ softirq handler returns.
+
+#### Hard IRQ
+
+The following tracepoints are used to measure the latency of servicing a
+hardware interrupt request (hard IRQ).
+
+- [`irq/irq_handler_entry`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_entry):
+ Called immediately before the IRQ action handler.
+- [`irq/irq_handler_exit`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_irq_handler_exit):
+ Called immediately after the IRQ action handler returns.
+- `irq_vectors`: These are traces from `irq_handler_entry` and
+ `irq_handler_exit` when an IRQ is handled. The following elements from vector
+ are triggered:
+ - `irq_vectors/local_timer_entry`
+ - `irq_vectors/local_timer_exit`
+ - `irq_vectors/reschedule_entry`
+ - `irq_vectors/reschedule_exit`
+ - `irq_vectors/call_function_entry`
+ - `irq_vectors/call_function_exit`
+ - `irq_vectors/call_function_single_entry`
+ - `irq_vectors/call_function_single_xit`
+ - `irq_vectors/irq_work_entry`
+ - `irq_vectors/irq_work_exit`
+ - `irq_vectors/error_apic_entry`
+ - `irq_vectors/error_apic_exit`
+ - `irq_vectors/thermal_apic_entry`
+ - `irq_vectors/thermal_apic_exit`
+ - `irq_vectors/threshold_apic_entry`
+ - `irq_vectors/threshold_apic_exit`
+ - `irq_vectors/deferred_error_entry`
+ - `irq_vectors/deferred_error_exit`
+ - `irq_vectors/spurious_apic_entry`
+ - `irq_vectors/spurious_apic_exit`
+ - `irq_vectors/x86_platform_ipi_entry`
+ - `irq_vectors/x86_platform_ipi_exit`
+
+#### IPC shared memory
+
+To monitor shared memory system call counts, the following `kprobes` are used:
+
+- `shmget`: Runs when [`shmget`](https://man7.org/linux/man-pages/man2/shmget.2.html) is called.
+- `shmat`: Runs when [`shmat`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called.
+- `shmdt`: Runs when [`shmdt`](https://man7.org/linux/man-pages/man2/shmat.2.html) is called.
+- `shmctl`: Runs when [`shmctl`](https://man7.org/linux/man-pages/man2/shmctl.2.html) is called.
+
+### Memory
+
+In the memory submenu the eBPF plugin creates two submenus **page cache** and **synchronization** with the following
+organization:
+
+* Page Cache
+ * Page cache ratio
+ * Dirty pages
+ * Page cache hits
+ * Page cache misses
+* Synchronization
+ * File sync
+ * Memory map sync
+ * File system sync
+ * File range sync
+
+#### Page cache ratio
+
+The chart `cachestat_ratio` shows how processes are accessing page cache. In a normal scenario, we expect values around
+100%, which means that the majority of the work on the machine is processed in memory. To calculate the ratio, Netdata
+attaches `kprobes` for kernel functions:
+
+- `add_to_page_cache_lru`: Page addition.
+- `mark_page_accessed`: Access to cache.
+- `account_page_dirtied`: Dirty (modified) pages.
+- `mark_buffer_dirty`: Writes to page cache.
+
+#### Dirty pages
+
+On `cachestat_dirties` Netdata demonstrates the number of pages that were modified. This chart shows the number of calls
+to the function `mark_buffer_dirty`.
+
+#### Page cache hits
+
+A page cache hit is when the page cache is successfully accessed with a read operation. We do not count pages that were
+added relatively recently.
+
+#### Page cache misses
+
+A page cache miss means that a page was not inside memory when the process tried to access it. This chart shows the
+result of the difference for calls between functions `add_to_page_cache_lru` and `account_page_dirtied`.
+
+#### File sync
+
+This chart shows calls to synchronization methods, [`fsync(2)`](https://man7.org/linux/man-pages/man2/fdatasync.2.html)
+and [`fdatasync(2)`](https://man7.org/linux/man-pages/man2/fdatasync.2.html), to transfer all modified page caches
+for the files on disk devices. These calls block until the disk reports that the transfer has been completed. They flush
+data for specific file descriptors.
+
+#### Memory map sync
+
+The chart shows calls to [`msync(2)`](https://man7.org/linux/man-pages/man2/msync.2.html) syscalls. This syscall flushes
+changes to a file that was mapped into memory using [`mmap(2)`](https://man7.org/linux/man-pages/man2/mmap.2.html).
+
+#### File system sync
+
+This chart monitors calls demonstrating commits from filesystem caches to disk. Netdata attaches `kprobes` for
+[`sync(2)`](https://man7.org/linux/man-pages/man2/sync.2.html), and [`syncfs(2)`](https://man7.org/linux/man-pages/man2/sync.2.html).
+
+#### File range sync
+
+This chart shows calls to [`sync_file_range(2)`](https://man7.org/linux/man-pages/man2/sync_file_range.2.html) which
+synchronizes file segments with disk.
+
+> Note: This is the most dangerous syscall to synchronize data, according to its manual.
+
+### Multiple Device (MD) arrays
+
+The eBPF plugin shows multi-device flushes happening in real time. This can be used to explain some spikes happening
+in [disk latency](#disk) charts.
+
+By default, MD flush is disabled. To enable it, configure your
+`/etc/netdata/ebpf.d.conf` file as:
+
+```conf
+[global]
+ mdflush = yes
+```
+
+#### MD flush
+
+To collect data related to Linux multi-device (MD) flushing, the following kprobe is used:
+
+- `kprobe/md_flush_request`: called whenever a request for flushing multi-device data is made.
+
+### Disk
+
+The eBPF plugin also shows a chart in the Disk section when the `disk` thread is enabled. This will create the
+chart `disk_latency_io` for each disk on the host. The following tracepoints are used:
+
+- [`block/block_rq_issue`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_issue):
+ IO request operation to a device drive.
+- [`block/block_rq_complete`](https://www.kernel.org/doc/html/latest/core-api/tracepoint.html#c.trace_block_rq_complete):
+ IO operation completed by device.
+
+### Filesystem
+
+This group has charts demonstrating how applications interact with the Linux
+kernel to open and close file descriptors. It also brings latency charts for
+several different filesystems.
-### File
+#### ext4
-This group has two charts demonstrating how software interacts with the Linux kernel to open and close file descriptors.
+To measure the latency of executing some actions in an
+[ext4](https://elixir.bootlin.com/linux/latest/source/fs/ext4) filesystem, the
+collector needs to attach `kprobes` and `kretprobes` for each of the following
+functions:
+
+- `ext4_file_read_iter`: Function used to measure read latency.
+- `ext4_file_write_iter`: Function used to measure write latency.
+- `ext4_file_open`: Function used to measure open latency.
+- `ext4_sync_file`: Function used to measure sync latency.
+
+#### ZFS
+
+To measure the latency of executing some actions in a zfs filesystem, the
+collector needs to attach `kprobes` and `kretprobes` for each of the following
+functions:
+
+- `zpl_iter_read`: Function used to measure read latency.
+- `zpl_iter_write`: Function used to measure write latency.
+- `zpl_open`: Function used to measure open latency.
+- `zpl_fsync`: Function used to measure sync latency.
+
+#### XFS
+
+To measure the latency of executing some actions in an
+[xfs](https://elixir.bootlin.com/linux/latest/source/fs/xfs) filesystem, the
+collector needs to attach `kprobes` and `kretprobes` for each of the following
+functions:
+
+- `xfs_file_read_iter`: Function used to measure read latency.
+- `xfs_file_write_iter`: Function used to measure write latency.
+- `xfs_file_open`: Function used to measure open latency.
+- `xfs_file_fsync`: Function used to measure sync latency.
+
+#### NFS
+
+To measure the latency of executing some actions in an
+[nfs](https://elixir.bootlin.com/linux/latest/source/fs/nfs) filesystem, the
+collector needs to attach `kprobes` and `kretprobes` for each of the following
+functions:
+
+- `nfs_file_read`: Function used to measure read latency.
+- `nfs_file_write`: Function used to measure write latency.
+- `nfs_file_open`: Functions used to measure open latency.
+- `nfs4_file_open`: Functions used to measure open latency for NFS v4.
+- `nfs_getattr`: Function used to measure sync latency.
+
+#### btrfs
+
+To measure the latency of executing some actions in a [btrfs](https://elixir.bootlin.com/linux/latest/source/fs/btrfs/file.c)
+filesystem, the collector needs to attach `kprobes` and `kretprobes` for each of the following functions:
+
+> Note: We are listing two functions used to measure `read` latency, but we use either `btrfs_file_read_iter` or
+`generic_file_read_iter`, depending on kernel version.
+
+- `btrfs_file_read_iter`: Function used to measure read latency since kernel `5.10.0`.
+- `generic_file_read_iter`: Like `btrfs_file_read_iter`, but this function was used before kernel `5.10.0`.
+- `btrfs_file_write_iter`: Function used to write data.
+- `btrfs_file_open`: Function used to open files.
+- `btrfs_sync_file`: Function used to synchronize data to filesystem.
#### File descriptor
-This chart contains two dimensions that show the number of calls to the functions `do_sys_open` and `__close_fd`. Most
-software do not commonly call these functions directly, but they are behind the system calls `open(2)`, `openat(2)`,
-and `close(2)`.
+To give metrics related to `open` and `close` events, instead of attaching kprobes for each syscall used to do these
+events, the collector attaches `kprobes` for the common function used for syscalls:
+
+- [`do_sys_open`](https://0xax.gitbooks.io/linux-insides/content/SysCall/linux-syscall-5.html ): Internal function used to
+ open files.
+- [`do_sys_openat2`](https://elixir.bootlin.com/linux/v5.6/source/fs/open.c#L1162):
+ Function called from `do_sys_open` since version `5.6.0`.
+- [`close_fd`](https://www.mail-archive.com/linux-kernel@vger.kernel.org/msg2271761.html): Function used to close file
+ descriptor since kernel `5.11.0`.
+- `__close_fd`: Function used to close files before version `5.11.0`.
#### File error
This chart shows the number of times some software tried and failed to open or close a file descriptor.
-### VFS
+#### VFS
+
+The Linux Virtual File System (VFS) is an abstraction layer on top of a
+concrete filesystem like the ones listed in the parent section, e.g. `ext4`.
-A [virtual file system](https://en.wikipedia.org/wiki/Virtual_file_system) (VFS) is a layer on top of regular
-filesystems. The functions present inside this API are used for all filesystems, so it's possible the charts in this
-group won't show _all_ the actions that occurred on your system.
+In this section we list the mechanism by which we gather VFS data, and what
+charts are consequently created.
-#### Deleted objects
+##### VFS eBPF Hooks
-This chart monitors calls for `vfs_unlink`. This function is responsible for removing objects from the file system.
+To measure the latency and total quantity of executing some VFS-level
+functions, ebpf.plugin needs to attach kprobes and kretprobes for each of the
+following functions:
-#### IO
+- `vfs_write`: Function used monitoring the number of successful & failed
+ filesystem write calls, as well as the total number of written bytes.
+- `vfs_writev`: Same function as `vfs_write` but for vector writes (i.e. a
+ single write operation using a group of buffers rather than 1).
+- `vfs_read`: Function used for monitoring the number of successful & failed
+ filesystem read calls, as well as the total number of read bytes.
+- `vfs_readv` Same function as `vfs_read` but for vector reads (i.e. a single
+ read operation using a group of buffers rather than 1).
+- `vfs_unlink`: Function used for monitoring the number of successful & failed
+ filesystem unlink calls.
+- `vfs_fsync`: Function used for monitoring the number of successful & failed
+ filesystem fsync calls.
+- `vfs_open`: Function used for monitoring the number of successful & failed
+ filesystem open calls.
+- `vfs_create`: Function used for monitoring the number of successful & failed
+ filesystem create calls.
+
+##### VFS Deleted objects
+
+This chart monitors calls to `vfs_unlink`. This function is responsible for removing objects from the file system.
+
+##### VFS IO
This chart shows the number of calls to the functions `vfs_read` and `vfs_write`.
-#### IO bytes
+##### VFS IO bytes
-This chart also monitors `vfs_read` and `vfs_write`, but instead shows the total of bytes read and written with these
-functions.
+This chart also monitors `vfs_read` and `vfs_write` but, instead of the number of calls, it shows the total amount of
+bytes read and written with these functions.
The Agent displays the number of bytes written as negative because they are moving down to disk.
-#### IO errors
+##### VFS IO errors
The Agent counts and shows the number of instances where a running program experiences a read or write error.
-### Process
+##### VFS Create
-For this group, the eBPF collector monitors process/thread creation and process end, and then displays any errors in the
-following charts.
+This chart shows the number of calls to `vfs_create`. This function is responsible for creating files.
-#### Process thread
+##### VFS Synchronization
-Internally, the Linux kernel treats both processes and threads as `tasks`. To create a thread, the kernel offers a few
-system calls: `fork(2)`, `vfork(2)` and `clone(2)`. In turn, each of these system calls use the function `_do_fork`. To
-generate this chart, the eBPF collector monitors `_do_fork` to populate the `process` dimension, and monitors
-`sys_clone` to identify threads.
+This chart shows the number of calls to `vfs_fsync`. This function is responsible for calling `fsync(2)` or
+`fdatasync(2)` on a file. You can see more details in the Synchronization section.
-#### Exit
+##### VFS Open
-Ending a task requires two steps. The first is a call to the internal function `do_exit`, which notifies the operating
-system that the task is finishing its work. The second step is to release the kernel information with the internal
-function `release_task`. The difference between the two dimensions can help you discover [zombie
-processes](https://en.wikipedia.org/wiki/Zombie_process).
+This chart shows the number of calls to `vfs_open`. This function is responsible for opening files.
-#### Task error
+#### Directory Cache
-The functions responsible for ending tasks do not return values, so this chart contains information about failures on
-process and thread creation.
+Metrics for directory cache are collected using kprobe for `lookup_fast`, because we are interested in the number of
+times this function is accessed. On the other hand, for `d_lookup` we are not only interested in the number of times it
+is accessed, but also in possible errors, so we need to attach a `kretprobe`. For this reason, the following is used:
+
+- [`lookup_fast`](https://lwn.net/Articles/649115/): Called to look at data inside the directory cache.
+- [`d_lookup`](https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/fs/dcache.c?id=052b398a43a7de8c68c13e7fa05d6b3d16ce6801#n2223):
+ Called when the desired file is not inside the directory cache.
+
+### Mount Points
+
+The following `kprobes` are used to collect `mount` & `unmount` call counts:
+
+- [`mount`](https://man7.org/linux/man-pages/man2/mount.2.html): mount filesystem on host.
+- [`umount`](https://man7.org/linux/man-pages/man2/umount.2.html): umount filesystem on host.
+
+### Networking Stack
+
+Netdata monitors socket bandwidth attaching `kprobes` for internal functions.
+
+#### TCP functions
+
+This chart demonstrates calls to functions `tcp_sendmsg`, `tcp_cleanup_rbuf`, and `tcp_close`; these functions are used
+to send & receive data and to close connections when `TCP` protocol is used.
+
+#### TCP bandwidth
+
+Like the previous chart, this one also monitors `tcp_sendmsg` and `tcp_cleanup_rbuf`, but instead of showing the number
+of calls, it demonstrates the number of bytes sent and received.
+
+#### TCP retransmit
+
+This chart demonstrates calls to function `tcp_retransmit` that is responsible for executing TCP retransmission when the
+receiver did not return the packet during the expected time.
+
+#### UDP functions
+
+This chart demonstrates calls to functions `udp_sendmsg` and `udp_recvmsg`, which are responsible for sending &
+receiving data for connections when the `UDP` protocol is used.
+
+#### UDP bandwidth
+
+Like the previous chart, this one also monitors `udp_sendmsg` and `udp_recvmsg`, but instead of showing the number of
+calls, it monitors the number of bytes sent and received.
+
+### Apps
+
+#### OOM Killing
+
+These are tracepoints related to [OOM](https://en.wikipedia.org/wiki/Out_of_memory) killing processes.
+
+- `oom/mark_victim`: Monitors when an oomkill event happens.
## Configuration
@@ -134,7 +482,7 @@ cd /etc/netdata/ # Replace with your Netdata configuration directory, if not /
The `[global]` section defines settings for the whole eBPF collector.
-#### ebpf load mode
+#### eBPF load mode
The collector has two different eBPF programs. These programs monitor the same functions inside the kernel, but they
monitor, process, and display different kinds of information.
@@ -143,43 +491,20 @@ By default, this plugin uses the `entry` mode. Changing this mode can create sig
system, but also offer valuable information if you are developing or debugging software. The `ebpf load mode` option
accepts the following values: ​
-- `entry`: This is the default mode. In this mode, the eBPF collector only monitors calls for the functions described
- in the sections above, and does not show charts related to errors.
-- `return`: In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
- new charts for the return of these functions, such as errors. Monitoring function returns can help in debugging
- software, such as failing to close file descriptors or creating zombie processes.
-- `update every`: Number of seconds used for eBPF to send data for Netdata.
-- `pid table size`: Defines the maximum number of PIDs stored inside the application hash table.
-
+- `entry`: This is the default mode. In this mode, the eBPF collector only monitors calls for the functions described in
+ the sections above, and does not show charts related to errors.
+- `return`: In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates new
+ charts for the return of these functions, such as errors. Monitoring function returns can help in debugging software,
+ such as failing to close file descriptors or creating zombie processes.
+- `update every`: Number of seconds used for eBPF to send data for Netdata.
+- `pid table size`: Defines the maximum number of PIDs stored inside the application hash table.
+
#### Integration with `apps.plugin`
The eBPF collector also creates charts for each running application through an integration with the
[`apps.plugin`](/collectors/apps.plugin/README.md). This integration helps you understand how specific applications
interact with the Linux kernel.
-When the integration is enabled, your dashboard will also show the following charts using low-level Linux metrics:
-
-- eBPF file
- - Number of calls to open files. (`apps.file_open`)
- - Number of files closed. (`apps.file_closed`)
- - Number of calls to open files that returned errors.
- - Number of calls to close files that returned errors.
-- eBPF syscall
- - Number of calls to delete files. (`apps.file_deleted`)
- - Number of calls to `vfs_write`. (`apps.vfs_write_call`)
- - Number of calls to `vfs_read`. (`apps.vfs_read_call`)
- - Number of bytes written with `vfs_write`. (`apps.vfs_write_bytes`)
- - Number of bytes read with `vfs_read`. (`apps.vfs_read_bytes`)
- - Number of calls to write a file that returned errors.
- - Number of calls to read a file that returned errors.
-- eBPF process
- - Number of process created with `do_fork`. (`apps.process_create`)
- - Number of threads created with `do_fork` or `__x86_64_sys_clone`, depending on your system's kernel version. (`apps.thread_create`)
- - Number of times that a process called `do_exit`. (`apps.task_close`)
-- eBPF net
- - Number of bytes sent. (`apps.bandwidth_sent`)
- - Number of bytes received. (`apps.bandwidth_recv`)
-
If you want to _disable_ the integration with `apps.plugin` along with the above charts, change the setting `apps` to
`no`.
@@ -188,30 +513,129 @@ If you want to _disable_ the integration with `apps.plugin` along with the above
apps = yes
```
-When the integration is enabled, eBPF collector allocates memory for each process running. The total
- allocated memory has direct relationship with the kernel version. When the eBPF plugin is running on kernels newer than `4.15`,
- it uses per-cpu maps to speed up the update of hash tables. This also implies storing data for the same PID
- for each processor it runs.
+When the integration is enabled, eBPF collector allocates memory for each process running. The total allocated memory
+has direct relationship with the kernel version. When the eBPF plugin is running on kernels newer than `4.15`, it uses
+per-cpu maps to speed up the update of hash tables. This also implies storing data for the same PID for each processor
+it runs.
+
+#### Integration with `cgroups.plugin`
-#### `[ebpf programs]`
+The eBPF collector also creates charts for each cgroup through an integration with the
+[`cgroups.plugin`](/collectors/cgroups.plugin/README.md). This integration helps you understand how a specific cgroup
+interacts with the Linux kernel.
+
+The integration with `cgroups.plugin` is disabled by default to avoid creating overhead on your system. If you want to
+_enable_ the integration with `cgroups.plugin`, change the `cgroups` setting to `yes`.
+
+```conf
+[global]
+ cgroups = yes
+```
+
+If you do not need to monitor specific metrics for your `cgroups`, you can enable `cgroups` inside
+`ebpf.d.conf`, and then disable the plugin for a specific `thread` by following the steps in the
+[Configuration](#configuration) section.
+
+#### Integration Dashboard Elements
+
+When an integration is enabled, your dashboard will also show the following cgroups and apps charts using low-level
+Linux metrics:
+
+> Note: The parenthetical accompanying each bulleted item provides the chart name.
+
+- mem
+ - Number of processes killed due out of memory. (`oomkills`)
+- process
+ - Number of processes created with `do_fork`. (`process_create`)
+ - Number of threads created with `do_fork` or `clone (2)`, depending on your system's kernel
+ version. (`thread_create`)
+ - Number of times that a process called `do_exit`. (`task_exit`)
+ - Number of times that a process called `release_task`. (`task_close`)
+ - Number of times that an error happened to create thread or process. (`task_error`)
+- swap
+ - Number of calls to `swap_readpage`. (`swap_read_call`)
+ - Number of calls to `swap_writepage`. (`swap_write_call`)
+- network
+ - Number of bytes sent. (`total_bandwidth_sent`)
+ - Number of bytes received. (`total_bandwidth_recv`)
+ - Number of calls to `tcp_sendmsg`. (`bandwidth_tcp_send`)
+ - Number of calls to `tcp_cleanup_rbuf`. (`bandwidth_tcp_recv`)
+ - Number of calls to `tcp_retransmit_skb`. (`bandwidth_tcp_retransmit`)
+ - Number of calls to `udp_sendmsg`. (`bandwidth_udp_send`)
+ - Number of calls to `udp_recvmsg`. (`bandwidth_udp_recv`)
+- file access
+ - Number of calls to open files. (`file_open`)
+ - Number of calls to open files that returned errors. (`open_error`)
+ - Number of files closed. (`file_closed`)
+ - Number of calls to close files that returned errors. (`file_error_closed`)
+- vfs
+ - Number of calls to `vfs_unlink`. (`file_deleted`)
+ - Number of calls to `vfs_write`. (`vfs_write_call`)
+ - Number of calls to write a file that returned errors. (`vfs_write_error`)
+ - Number of calls to `vfs_read`. (`vfs_read_call`)
+ - Number of bytes written with `vfs_write`. (`vfs_write_bytes`)
+ - Number of bytes read with `vfs_read`. (`vfs_read_bytes`)
+ - Number of calls to read a file that returned errors. (`vfs_read_error`)
+ - Number of calls to `vfs_fsync`. (`vfs_fsync`)
+ - Number of calls to sync file that returned errors. (`vfs_fsync_error`)
+ - Number of calls to `vfs_open`. (`vfs_open`)
+ - Number of calls to open file that returned errors. (`vfs_open_error`)
+ - Number of calls to `vfs_create`. (`vfs_create`)
+ - Number of calls to open file that returned errors. (`vfs_create_error`)
+- page cache
+ - Ratio of pages accessed. (`cachestat_ratio`)
+ - Number of modified pages ("dirty"). (`cachestat_dirties`)
+ - Number of accessed pages. (`cachestat_hits`)
+ - Number of pages brought from disk. (`cachestat_misses`)
+- directory cache
+ - Ratio of files available in directory cache. (`dc_hit_ratio`)
+ - Number of files accessed. (`dc_reference`)
+ - Number of files accessed that were not in cache. (`dc_not_cache`)
+ - Number of files not found. (`dc_not_found`)
+- ipc shm
+ - Number of calls to `shm_get`. (`shmget_call`)
+ - Number of calls to `shm_at`. (`shmat_call`)
+ - Number of calls to `shm_dt`. (`shmdt_call`)
+ - Number of calls to `shm_ctl`. (`shmctl_call`)
+
+### `[ebpf programs]`
The eBPF collector enables and runs the following eBPF programs by default:
-- `cachestat`: Netdata's eBPF data collector creates charts about the memory page cache. When the integration with
- [`apps.plugin`](/collectors/apps.plugin/README.md) is enabled, this collector creates charts for the whole host _and_
- for each application.
-- `dcstat` : This eBPF program creates charts that show information about file access using directory cache. It appends
- `kprobes` for `lookup_fast()` and `d_lookup()` to identify if files are inside directory cache, outside and
- files are not found.
-- `process`: This eBPF program creates charts that show information about process creation, VFS IO, and files removed.
- When in `return` mode, it also creates charts showing errors when these operations are executed.
-- `network viewer`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
- bandwidth consumed by each.
-- `sync`: Montitor calls for syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2).
+- `fd` : This eBPF program creates charts that show information about calls to open files.
+- `mount`: This eBPF program creates charts that show calls to syscalls mount(2) and umount(2).
+- `shm`: This eBPF program creates charts that show calls to syscalls shmget(2), shmat(2), shmdt(2) and shmctl(2).
+- `sync`: Montitor calls to syscalls sync(2), fsync(2), fdatasync(2), syncfs(2), msync(2), and sync_file_range(2).
+- `network viewer`: This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
+ bandwidth consumed by each.
+- `vfs`: This eBPF program creates charts that show information about VFS (Virtual File System) functions.
+- `process`: This eBPF program creates charts that show information about process life. When in `return` mode, it also
+ creates charts showing errors when these operations are executed.
+- `hardirq`: This eBPF program creates charts that show information about time spent servicing individual hardware
+ interrupt requests (hard IRQs).
+- `softirq`: This eBPF program creates charts that show information about time spent servicing individual software
+ interrupt requests (soft IRQs).
+- `oomkill`: This eBPF program creates a chart that shows OOM kills for all applications recognized via
+ the `apps.plugin` integration. Note that this program will show application charts regardless of whether apps
+ integration is turned on or off.
+
+You can also enable the following eBPF programs:
+
+- `cachestat`: Netdata's eBPF data collector creates charts about the memory page cache. When the integration with
+ [`apps.plugin`](/collectors/apps.plugin/README.md) is enabled, this collector creates charts for the whole host _and_
+ for each application.
+- `dcstat` : This eBPF program creates charts that show information about file access using directory cache. It appends
+ `kprobes` for `lookup_fast()` and `d_lookup()` to identify if files are inside directory cache, outside and files are
+ not found.
+- `disk` : This eBPF program creates charts that show information about disk latency independent of filesystem.
+- `filesystem` : This eBPF program creates charts that show information about some filesystem latency.
+- `swap` : This eBPF program creates charts that show information about swap access.
+- `mdflush`: This eBPF program creates charts that show information about
+ multi-device software flushes.
## Thread configuration
-You can configure each thread of the eBPF data collector by editing either the `cachestat.conf`, `process.conf`,
+You can configure each thread of the eBPF data collector by editing either the `cachestat.conf`, `process.conf`,
or `network.conf` files. Use [`edit-config`](/docs/configure/nodes.md) from your Netdata config directory:
```bash
@@ -225,10 +649,16 @@ The following configuration files are available:
- `cachestat.conf`: Configuration for the `cachestat` thread.
- `dcstat.conf`: Configuration for the `dcstat` thread.
+- `disk.conf`: Configuration for the `disk` thread.
+- `fd.conf`: Configuration for the `file descriptor` thread.
+- `filesystem.conf`: Configuration for the `filesystem` thread.
+- `hardirq.conf`: Configuration for the `hardirq` thread.
- `process.conf`: Configuration for the `process` thread.
-- `network.conf`: Configuration for the `network viewer` thread. This config file overwrites the global options and
- also lets you specify which network the eBPF collector monitors.
+- `network.conf`: Configuration for the `network viewer` thread. This config file overwrites the global options and also
+ lets you specify which network the eBPF collector monitors.
+- `softirq.conf`: Configuration for the `softirq` thread.
- `sync.conf`: Configuration for the `sync` thread.
+- `vfs.conf`: Configuration for the `vfs` thread.
### Network configuration
@@ -237,7 +667,7 @@ are divided in the following sections:
#### `[network connections]`
-You can configure the information shown on `outbound` and `inbound` charts with the settings in this section.
+You can configure the information shown on `outbound` and `inbound` charts with the settings in this section.
```conf
[network connections]
@@ -249,24 +679,24 @@ You can configure the information shown on `outbound` and `inbound` charts with
```
When you define a `ports` setting, Netdata will collect network metrics for that specific port. For example, if you
-write `ports = 19999`, Netdata will collect only connections for itself. The `hostnames` setting accepts
-[simple patterns](/libnetdata/simple_pattern/README.md). The `ports`, and `ips` settings accept negation (`!`) to
- deny specific values or asterisk alone to define all values.
+write `ports = 19999`, Netdata will collect only connections for itself. The `hostnames` setting accepts
+[simple patterns](/libnetdata/simple_pattern/README.md). The `ports`, and `ips` settings accept negation (`!`) to deny
+specific values or asterisk alone to define all values.
In the above example, Netdata will collect metrics for all ports between 1 and 443, with the exception of 53 (domain)
and 145.
The following options are available:
-- `ports`: Define the destination ports for Netdata to monitor.
-- `hostnames`: The list of hostnames that can be resolved to an IP address.
-- `ips`: The IP or range of IPs that you want to monitor. You can use IPv4 or IPv6 addresses, use dashes to define a
- range of IPs, or use CIDR values. The default behavior is to only collect data for private IP addresses, but this
- can be changed with the `ips` setting.
-
-By default, Netdata displays up to 500 dimensions on network connection charts. If there are more possible dimensions,
-they will be bundled into the `other` dimension. You can increase the number of shown dimensions by changing the `maximum
-dimensions` setting.
+- `ports`: Define the destination ports for Netdata to monitor.
+- `hostnames`: The list of hostnames that can be resolved to an IP address.
+- `ips`: The IP or range of IPs that you want to monitor. You can use IPv4 or IPv6 addresses, use dashes to define a
+ range of IPs, or use CIDR values. The default behavior is to only collect data for private IP addresses, but this can
+ be changed with the `ips` setting.
+
+By default, Netdata displays up to 500 dimensions on network connection charts. If there are more possible dimensions,
+they will be bundled into the `other` dimension. You can increase the number of shown dimensions by changing
+the `maximum dimensions` setting.
The dimensions for the traffic charts are created using the destination IPs of the sockets by default. This can be
changed setting `resolve hostname ips = yes` and restarting Netdata, after this Netdata will create dimensions using
@@ -274,8 +704,9 @@ the `hostnames` every time that is possible to resolve IPs to their hostnames.
#### `[service name]`
-Netdata uses the list of services in `/etc/services` to plot network connection charts. If this file does not contain the
-name for a particular service you use in your infrastructure, you will need to add it to the `[service name]` section.
+Netdata uses the list of services in `/etc/services` to plot network connection charts. If this file does not contain
+the name for a particular service you use in your infrastructure, you will need to add it to the `[service name]`
+section.
For example, Netdata's default port (`19999`) is not listed in `/etc/services`. To associate that port with the Netdata
service in network connection charts, and thus see the name of the service instead of its port, define it:
@@ -287,7 +718,7 @@ service in network connection charts, and thus see the name of the service inste
### Sync configuration
-The sync configuration has specific options to disable monitoring for syscalls, as default option all syscalls are
+The sync configuration has specific options to disable monitoring for syscalls, as default option all syscalls are
monitored.
```conf
@@ -300,6 +731,22 @@ monitored.
sync_file_range = yes
```
+### Filesystem configuration
+
+The filesystem configuration has specific options to disable monitoring for filesystems, by default all filesystems are
+monitored.
+
+```conf
+[filesystem]
+ btrfsdist = yes
+ ext4dist = yes
+ nfsdist = yes
+ xfsdist = yes
+ zfsdist = yes
+```
+
+The ebpf program `nfsdist` monitors only `nfs` mount points.
+
## Troubleshooting
If the eBPF collector does not work, you can troubleshoot it by running the `ebpf.plugin` command and investigating its
@@ -330,17 +777,18 @@ curl -sSL https://raw.githubusercontent.com/netdata/kernel-collector/master/tool
If this script returns no output, your system is ready to compile and run the eBPF collector.
-If you see a warning about a missing kernel configuration (`KPROBES KPROBES_ON_FTRACE HAVE_KPROBES BPF BPF_SYSCALL
-BPF_JIT`), you will need to recompile your kernel to support this configuration. The process of recompiling Linux
-kernels varies based on your distribution and version. Read the documentation for your system's distribution to learn
-more about the specific workflow for recompiling the kernel, ensuring that you set all the necessary
+If you see a warning about a missing kernel
+configuration (`KPROBES KPROBES_ON_FTRACE HAVE_KPROBES BPF BPF_SYSCALL BPF_JIT`), you will need to recompile your kernel
+to support this configuration. The process of recompiling Linux kernels varies based on your distribution and version.
+Read the documentation for your system's distribution to learn more about the specific workflow for recompiling the
+kernel, ensuring that you set all the necessary
-- [Ubuntu](https://wiki.ubuntu.com/Kernel/BuildYourOwnKernel)
-- [Debian](https://kernel-team.pages.debian.net/kernel-handbook/ch-common-tasks.html#s-common-official)
-- [Fedora](https://fedoraproject.org/wiki/Building_a_custom_kernel)
-- [CentOS](https://wiki.centos.org/HowTos/Custom_Kernel)
-- [Arch Linux](https://wiki.archlinux.org/index.php/Kernel/Traditional_compilation)
-- [Slackware](https://docs.slackware.com/howtos:slackware_admin:kernelbuilding)
+- [Ubuntu](https://wiki.ubuntu.com/Kernel/BuildYourOwnKernel)
+- [Debian](https://kernel-team.pages.debian.net/kernel-handbook/ch-common-tasks.html#s-common-official)
+- [Fedora](https://fedoraproject.org/wiki/Building_a_custom_kernel)
+- [CentOS](https://wiki.centos.org/HowTos/Custom_Kernel)
+- [Arch Linux](https://wiki.archlinux.org/index.php/Kernel/Traditional_compilation)
+- [Slackware](https://docs.slackware.com/howtos:slackware_admin:kernelbuilding)
### Mount `debugfs` and `tracefs`
@@ -353,19 +801,20 @@ sudo mount -t tracefs nodev /sys/kernel/tracing
```
If they are already mounted, you will see an error. You can also configure your system's `/etc/fstab` configuration to
-mount these filesystems on startup. More information can be found in the [ftrace documentation](https://www.kernel.org/doc/Documentation/trace/ftrace.txt).
+mount these filesystems on startup. More information can be found in
+the [ftrace documentation](https://www.kernel.org/doc/Documentation/trace/ftrace.txt).
## Performance
-eBPF monitoring is complex and produces a large volume of metrics. We've discovered scenarios where the eBPF plugin
+eBPF monitoring is complex and produces a large volume of metrics. We've discovered scenarios where the eBPF plugin
significantly increases kernel memory usage by several hundred MB.
-If your node is experiencing high memory usage and there is no obvious culprit to be found in the `apps.mem` chart,
-consider testing for high kernel memory usage by [disabling eBPF monitoring](#configuration). Next,
-[restart Netdata](/docs/configure/start-stop-restart.md) with `sudo systemctl restart netdata` to see if system
-memory usage (see the `system.ram` chart) has dropped significantly.
+If your node is experiencing high memory usage and there is no obvious culprit to be found in the `apps.mem` chart,
+consider testing for high kernel memory usage by [disabling eBPF monitoring](#configuration). Next,
+[restart Netdata](/docs/configure/start-stop-restart.md) with `sudo systemctl restart netdata` to see if system memory
+usage (see the `system.ram` chart) has dropped significantly.
-Beginning with `v1.31`, kernel memory usage is configurable via the [`pid table size` setting](#ebpf-load-mode)
+Beginning with `v1.31`, kernel memory usage is configurable via the [`pid table size` setting](#ebpf-load-mode)
in `ebpf.conf`.
## SELinux
@@ -423,7 +872,7 @@ allow unconfined_service_t self:bpf { map_create map_read map_write prog_load pr
Then compile your `netdata_ebpf.te` file with the following commands to create a binary that loads the new policies:
```bash
-# checkmodule -M -m -o netdata_ebpf.mod netdata_ebpf.te
+# checkmodule -M -m -o netdata_ebpf.mod netdata_ebpf.te
# semodule_package -o netdata_ebpf.pp -m netdata_ebpf.mod
```
@@ -450,9 +899,4 @@ shows how the lockdown module impacts `ebpf.plugin` based on the selected option
If you or your distribution compiled the kernel with the last combination, your system cannot load shared libraries
required to run `ebpf.plugin`.
-## Cleaning `kprobe_events`
-The eBPF collector adds entries to the file `/sys/kernel/debug/tracing/kprobe_events`, and cleans them on exit, unless
-another process prevents it. If you need to clean the eBPF entries safely, you can manually run the script
-`/usr/libexec/netdata/plugins.d/reset_netdata_trace.sh`.
-
[![analytics](https://www.google-analytics.com/collect?v=1&aip=1&t=pageview&_s=1&ds=github&dr=https%3A%2F%2Fgithub.com%2Fnetdata%2Fnetdata&dl=https%3A%2F%2Fmy-netdata.io%2Fgithub%2Fcollectors%2Febpf.plugin%2FREADME&_u=MAC~&cid=5792dfd7-8dc4-476b-af31-da2fdb9f93d2&tid=UA-64295674-3)](<>)
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
index 5cc005f30..71a13e84f 100644
--- a/collectors/ebpf.plugin/ebpf.c
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -55,7 +55,6 @@ char *ebpf_plugin_dir = PLUGINS_DIR;
static char *ebpf_configured_log_dir = LOG_DIR;
char *ebpf_algorithms[] = {"absolute", "incremental"};
-int update_every = 1;
static int thread_finished = 0;
int close_ebpf_plugin = 0;
struct config collector_config = { .first_section = NULL,
@@ -67,7 +66,7 @@ struct config collector_config = { .first_section = NULL,
int running_on_kernel = 0;
char kernel_string[64];
int ebpf_nprocs;
-static int isrh;
+int isrh = 0;
uint32_t finalized_threads = 1;
pthread_mutex_t lock;
@@ -76,32 +75,109 @@ pthread_cond_t collect_data_cond_var;
ebpf_module_t ebpf_modules[] = {
{ .thread_name = "process", .config_name = "process", .enabled = 0, .start_routine = ebpf_process_thread,
- .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
- .optional = 0, .apps_routine = ebpf_process_create_apps_charts, .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL},
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_process_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &process_config,
+ .config_file = NETDATA_PROCESS_CONFIG_FILE},
{ .thread_name = "socket", .config_name = "socket", .enabled = 0, .start_routine = ebpf_socket_thread,
- .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
- .optional = 0, .apps_routine = ebpf_socket_create_apps_charts, .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL},
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_socket_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &socket_config,
+ .config_file = NETDATA_NETWORK_CONFIG_FILE},
{ .thread_name = "cachestat", .config_name = "cachestat", .enabled = 0, .start_routine = ebpf_cachestat_thread,
- .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
- .optional = 0, .apps_routine = ebpf_cachestat_create_apps_charts, .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL},
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_cachestat_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &cachestat_config,
+ .config_file = NETDATA_CACHESTAT_CONFIG_FILE},
{ .thread_name = "sync", .config_name = "sync", .enabled = 0, .start_routine = ebpf_sync_thread,
- .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
- .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL },
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &sync_config,
+ .config_file = NETDATA_SYNC_CONFIG_FILE},
{ .thread_name = "dc", .config_name = "dc", .enabled = 0, .start_routine = ebpf_dcstat_thread,
- .update_time = 1, .global_charts = 1, .apps_charts = 1, .mode = MODE_ENTRY,
- .optional = 0, .apps_routine = ebpf_dcstat_create_apps_charts, .maps = NULL,
- .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE },
- { .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_time = 1,
- .global_charts = 0, .apps_charts = 1, .mode = MODE_ENTRY,
- .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = 0, .names = NULL },
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_dcstat_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &dcstat_config,
+ .config_file = NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE},
+ { .thread_name = "swap", .config_name = "swap", .enabled = 0, .start_routine = ebpf_swap_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_swap_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &swap_config,
+ .config_file = NETDATA_DIRECTORY_SWAP_CONFIG_FILE},
+ { .thread_name = "vfs", .config_name = "vfs", .enabled = 0, .start_routine = ebpf_vfs_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_vfs_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &vfs_config,
+ .config_file = NETDATA_DIRECTORY_VFS_CONFIG_FILE },
+ { .thread_name = "filesystem", .config_name = "filesystem", .enabled = 0, .start_routine = ebpf_filesystem_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fs_config,
+ .config_file = NETDATA_FILESYSTEM_CONFIG_FILE},
+ { .thread_name = "disk", .config_name = "disk", .enabled = 0, .start_routine = ebpf_disk_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &disk_config,
+ .config_file = NETDATA_DISK_CONFIG_FILE},
+ { .thread_name = "mount", .config_name = "mount", .enabled = 0, .start_routine = ebpf_mount_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mount_config,
+ .config_file = NETDATA_MOUNT_CONFIG_FILE},
+ { .thread_name = "fd", .config_name = "fd", .enabled = 0, .start_routine = ebpf_fd_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_fd_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &fd_config,
+ .config_file = NETDATA_FD_CONFIG_FILE},
+ { .thread_name = "hardirq", .config_name = "hardirq", .enabled = 0, .start_routine = ebpf_hardirq_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &hardirq_config,
+ .config_file = NETDATA_HARDIRQ_CONFIG_FILE},
+ { .thread_name = "softirq", .config_name = "softirq", .enabled = 0, .start_routine = ebpf_softirq_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &softirq_config,
+ .config_file = NETDATA_SOFTIRQ_CONFIG_FILE},
+ { .thread_name = "oomkill", .config_name = "oomkill", .enabled = 0, .start_routine = ebpf_oomkill_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_oomkill_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &oomkill_config,
+ .config_file = NETDATA_OOMKILL_CONFIG_FILE},
+ { .thread_name = "shm", .config_name = "shm", .enabled = 0, .start_routine = ebpf_shm_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
+ .apps_routine = ebpf_shm_create_apps_charts, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &shm_config,
+ .config_file = NETDATA_DIRECTORY_SHM_CONFIG_FILE},
+ { .thread_name = "mdflush", .config_name = "mdflush", .enabled = 0, .start_routine = ebpf_mdflush_thread,
+ .update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = CONFIG_BOOLEAN_NO,
+ .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL,
+ .pid_map_size = ND_EBPF_DEFAULT_PID_SIZE, .names = NULL, .cfg = &mdflush_config,
+ .config_file = NETDATA_DIRECTORY_MDFLUSH_CONFIG_FILE},
+ { .thread_name = NULL, .enabled = 0, .start_routine = NULL, .update_every = EBPF_DEFAULT_UPDATE_EVERY,
+ .global_charts = 0, .apps_charts = CONFIG_BOOLEAN_NO, .cgroup_charts = CONFIG_BOOLEAN_NO,
+ .mode = MODE_ENTRY, .optional = 0, .apps_routine = NULL, .maps = NULL, .pid_map_size = 0, .names = NULL,
+ .cfg = NULL, .config_name = NULL},
};
// Link with apps.plugin
ebpf_process_stat_t *global_process_stat = NULL;
+// Link with cgroup.plugin
+netdata_ebpf_cgroup_shm_t shm_ebpf_cgroup = {NULL, NULL};
+int shm_fd_ebpf_cgroup = -1;
+sem_t *shm_sem_ebpf_cgroup = SEM_FAILED;
+pthread_mutex_t mutex_cgroup_shm;
+
//Network viewer
ebpf_network_viewer_options_t network_viewer_opt;
@@ -155,6 +231,33 @@ static void ebpf_exit(int sig)
freez(dcstat_pid);
}
+ if (ebpf_modules[EBPF_MODULE_SWAP_IDX].enabled) {
+ ebpf_modules[EBPF_MODULE_SWAP_IDX].enabled = 0;
+ clean_swap_pid_structures();
+ freez(swap_pid);
+ }
+
+ if (ebpf_modules[EBPF_MODULE_VFS_IDX].enabled) {
+ ebpf_modules[EBPF_MODULE_VFS_IDX].enabled = 0;
+ clean_vfs_pid_structures();
+ freez(vfs_pid);
+ }
+
+ if (ebpf_modules[EBPF_MODULE_FD_IDX].enabled) {
+ ebpf_modules[EBPF_MODULE_FD_IDX].enabled = 0;
+ clean_fd_pid_structures();
+ freez(fd_pid);
+ }
+
+ if (ebpf_modules[EBPF_MODULE_SHM_IDX].enabled) {
+ ebpf_modules[EBPF_MODULE_SHM_IDX].enabled = 0;
+ clean_shm_pid_structures();
+ freez(shm_pid);
+ }
+
+ ebpf_close_cgroup_shm();
+
+ ebpf_clean_cgroup_pids();
/*
int ret = fork();
if (ret < 0) // error
@@ -241,8 +344,7 @@ inline void write_end_chart()
*/
void write_chart_dimension(char *dim, long long value)
{
- int ret = printf("SET %s = %lld\n", dim, value);
- UNUSED(ret);
+ printf("SET %s = %lld\n", dim, value);
}
/**
@@ -253,7 +355,7 @@ void write_chart_dimension(char *dim, long long value)
* @param move the pointer with the values that will be published
* @param end the number of values that will be written on standard output
*
- * @return It returns a variable tha maps the charts that did not have zero values.
+ * @return It returns a variable that maps the charts that did not have zero values.
*/
void write_count_chart(char *name, char *family, netdata_publish_syscall_t *move, uint32_t end)
{
@@ -322,7 +424,7 @@ void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long
* @param dread the dimension name
* @param vread the value for previous dimension
*
- * @return It returns a variable tha maps the charts that did not have zero values.
+ * @return It returns a variable that maps the charts that did not have zero values.
*/
void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, char *dread, long long vread)
{
@@ -337,6 +439,36 @@ void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, c
/**
* Write chart cmd on standard output
*
+ * @param type chart type
+ * @param id chart id
+ * @param title chart title
+ * @param units units label
+ * @param family group name used to attach the chart on dashboard
+ * @param charttype chart type
+ * @param context chart context
+ * @param order chart order
+ * @param update_every update interval used by plugin
+ * @param module chart module name, this is the eBPF thread.
+ */
+void ebpf_write_chart_cmd(char *type, char *id, char *title, char *units, char *family,
+ char *charttype, char *context, int order, int update_every, char *module)
+{
+ printf("CHART %s.%s '' '%s' '%s' '%s' '%s' '%s' %d %d '' 'ebpf.plugin' '%s'\n",
+ type,
+ id,
+ title,
+ units,
+ (family)?family:"",
+ (context)?context:"",
+ (charttype)?charttype:"",
+ order,
+ update_every,
+ module);
+}
+
+/**
+ * Write chart cmd on standard output
+ *
* @param type chart type
* @param id chart id
* @param title chart title
@@ -345,11 +477,12 @@ void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite, c
* @param charttype chart type
* @param context chart context
* @param order chart order
+ * @param update_every value to overwrite the update frequency set by the server.
*/
-void ebpf_write_chart_cmd(char *type, char *id, char *title, char *units, char *family,
- char *charttype, char *context, int order)
+void ebpf_write_chart_obsolete(char *type, char *id, char *title, char *units, char *family,
+ char *charttype, char *context, int order, int update_every)
{
- printf("CHART %s.%s '' '%s' '%s' '%s' '%s' '%s' %d %d\n",
+ printf("CHART %s.%s '' '%s' '%s' '%s' '%s' '%s' %d %d 'obsolete'\n",
type,
id,
title,
@@ -395,17 +528,19 @@ void ebpf_create_global_dimension(void *ptr, int end)
/**
* Call write_chart_cmd to create the charts
*
- * @param type chart type
- * @param id chart id
- * @param title chart title
- * @param units axis label
- * @param family group name used to attach the chart on dashboard
- * @param context chart context
- * @param charttype chart type
- * @param order order number of the specified chart
- * @param ncd a pointer to a function called to create dimensions
- * @param move a pointer for a structure that has the dimensions
- * @param end number of dimensions for the chart created
+ * @param type chart type
+ * @param id chart id
+ * @param title chart title
+ * @param units axis label
+ * @param family group name used to attach the chart on dashboard
+ * @param context chart context
+ * @param charttype chart type
+ * @param order order number of the specified chart
+ * @param ncd a pointer to a function called to create dimensions
+ * @param move a pointer for a structure that has the dimensions
+ * @param end number of dimensions for the chart created
+ * @param update_every update interval used with chart.
+ * @param module chart module name, this is the eBPF thread.
*/
void ebpf_create_chart(char *type,
char *id,
@@ -417,11 +552,15 @@ void ebpf_create_chart(char *type,
int order,
void (*ncd)(void *, int),
void *move,
- int end)
+ int end,
+ int update_every,
+ char *module)
{
- ebpf_write_chart_cmd(type, id, title, units, family, charttype, context, order);
+ ebpf_write_chart_cmd(type, id, title, units, family, charttype, context, order, update_every, module);
- ncd(move, end);
+ if (ncd) {
+ ncd(move, end);
+ }
}
/**
@@ -435,12 +574,15 @@ void ebpf_create_chart(char *type,
* @param order the chart order
* @param algorithm the algorithm used by dimension
* @param root structure used to create the dimensions.
+ * @param update_every update interval used by plugin
+ * @param module chart module name, this is the eBPF thread.
*/
void ebpf_create_charts_on_apps(char *id, char *title, char *units, char *family, char *charttype, int order,
- char *algorithm, struct target *root)
+ char *algorithm, struct target *root, int update_every, char *module)
{
struct target *w;
- ebpf_write_chart_cmd(NETDATA_APPS_FAMILY, id, title, units, family, charttype, NULL, order);
+ ebpf_write_chart_cmd(NETDATA_APPS_FAMILY, id, title, units, family, charttype, NULL, order,
+ update_every, module);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed))
@@ -448,6 +590,31 @@ void ebpf_create_charts_on_apps(char *id, char *title, char *units, char *family
}
}
+/**
+ * Call the necessary functions to create a name.
+ *
+ * @param family family name
+ * @param name chart name
+ * @param hist0 histogram values
+ * @param dimensions dimension values.
+ * @param end number of bins that will be sent to Netdata.
+ *
+ * @return It returns a variable that maps the charts that did not have zero values.
+ */
+void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end)
+{
+ write_begin_chart(family, name);
+
+ uint32_t i;
+ for (i = 0; i < end; i++) {
+ write_chart_dimension(dimensions[i], (long long) hist[i]);
+ }
+
+ write_end_chart();
+
+ fflush(stdout);
+}
+
/*****************************************************************
*
* FUNCTIONS TO DEFINE OPTIONS
@@ -503,43 +670,68 @@ static inline void ebpf_set_thread_mode(netdata_run_mode_t lmode)
/**
* Enable specific charts selected by user.
*
- * @param em the structure that will be changed
- * @param enable the status about the apps charts.
+ * @param em the structure that will be changed
+ * @param disable_apps the status about the apps charts.
+ * @param disable_cgroup the status about the cgroups charts.
*/
-static inline void ebpf_enable_specific_chart(struct ebpf_module *em, int enable)
+static inline void ebpf_enable_specific_chart(struct ebpf_module *em, int disable_apps, int disable_cgroup)
{
- em->enabled = 1;
- if (!enable) {
- em->apps_charts = 1;
+ em->enabled = CONFIG_BOOLEAN_YES;
+
+ // oomkill stores data inside apps submenu, so it always need to have apps_enabled for plugin to create
+ // its chart, without this comparison eBPF.plugin will try to store invalid data when apps is disabled.
+ if (!disable_apps || !strcmp(em->thread_name, "oomkill")) {
+ em->apps_charts = CONFIG_BOOLEAN_YES;
}
- em->global_charts = 1;
+
+ if (!disable_cgroup) {
+ em->cgroup_charts = CONFIG_BOOLEAN_YES;
+ }
+
+ em->global_charts = CONFIG_BOOLEAN_YES;
}
/**
* Enable all charts
*
- * @param apps what is the current status of apps
+ * @param apps what is the current status of apps
+ * @param cgroups what is the current status of cgroups
*/
-static inline void ebpf_enable_all_charts(int apps)
+static inline void ebpf_enable_all_charts(int apps, int cgroups)
{
int i;
for (i = 0; ebpf_modules[i].thread_name; i++) {
- ebpf_enable_specific_chart(&ebpf_modules[i], apps);
+ ebpf_enable_specific_chart(&ebpf_modules[i], apps, cgroups);
}
}
/**
+ * Disable all Global charts
+ *
+ * Disable charts
+ */
+static inline void disable_all_global_charts()
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ ebpf_modules[i].enabled = 0;
+ ebpf_modules[i].global_charts = 0;
+ }
+}
+
+
+/**
* Enable the specified chart group
*
* @param idx the index of ebpf_modules that I am enabling
* @param disable_apps should I keep apps charts?
*/
-static inline void ebpf_enable_chart(int idx, int disable_apps)
+static inline void ebpf_enable_chart(int idx, int disable_apps, int disable_cgroup)
{
int i;
for (i = 0; ebpf_modules[i].thread_name; i++) {
if (i == idx) {
- ebpf_enable_specific_chart(&ebpf_modules[i], disable_apps);
+ ebpf_enable_specific_chart(&ebpf_modules[i], disable_apps, disable_cgroup);
break;
}
}
@@ -559,6 +751,19 @@ static inline void ebpf_disable_apps()
}
/**
+ * Disable Cgroups
+ *
+ * Disable charts for apps loading only global charts.
+ */
+static inline void ebpf_disable_cgroups()
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ ebpf_modules[i].cgroup_charts = 0;
+ }
+}
+
+/**
* Print help on standard error for user knows how to use the collector.
*/
void ebpf_print_help()
@@ -579,39 +784,140 @@ void ebpf_print_help()
" Released under GNU General Public License v3 or later.\n"
" All rights reserved.\n"
"\n"
- " This program is a data collector plugin for netdata.\n"
+ " This eBPF.plugin is a data collector plugin for netdata.\n"
"\n"
- " Available command line options:\n"
+ " This plugin only accepts long options with one or two dashes. The available command line options are:\n"
"\n"
- " SECONDS Set the data collection frequency.\n"
+ " SECONDS Set the data collection frequency.\n"
"\n"
- " --help or -h Show this help.\n"
+ " [-]-help Show this help.\n"
"\n"
- " --version or -v Show software version.\n"
+ " [-]-version Show software version.\n"
"\n"
- " --global or -g Disable charts per application.\n"
+ " [-]-global Disable charts per application and cgroup.\n"
"\n"
- " --all or -a Enable all chart groups (global and apps), unless -g is also given.\n"
+ " [-]-all Enable all chart groups (global, apps, and cgroup), unless -g is also given.\n"
"\n"
- " --cachestat or -c Enable charts related to process run time.\n"
+ " [-]-cachestat Enable charts related to process run time.\n"
"\n"
- " --dcstat or -d Enable charts related to directory cache.\n"
+ " [-]-dcstat Enable charts related to directory cache.\n"
"\n"
- " --net or -n Enable network viewer charts.\n"
+ " [-]-disk Enable charts related to disk monitoring.\n"
"\n"
- " --process or -p Enable charts related to process run time.\n"
+ " [-]-filesystem Enable chart related to filesystem run time.\n"
"\n"
- " --return or -r Run the collector in return mode.\n"
- "\n",
- " --sync or -s Enable chart related to sync run time.\n"
+ " [-]-hardirq Enable chart related to hard IRQ latency.\n"
+ "\n"
+ " [-]-mdflush Enable charts related to multi-device flush.\n"
+ "\n"
+ " [-]-mount Enable charts related to mount monitoring.\n"
"\n"
+ " [-]-net Enable network viewer charts.\n"
+ "\n"
+ " [-]-oomkill Enable chart related to OOM kill tracking.\n"
+ "\n"
+ " [-]-process Enable charts related to process run time.\n"
+ "\n"
+ " [-]-return Run the collector in return mode.\n"
+ "\n"
+ " [-]-shm Enable chart related to shared memory tracking.\n"
+ "\n"
+ " [-]-softirq Enable chart related to soft IRQ latency.\n"
+ "\n"
+ " [-]-sync Enable chart related to sync run time.\n"
+ "\n"
+ " [-]-swap Enable chart related to swap run time.\n"
+ "\n"
+ " [-]-vfs Enable chart related to vfs run time.\n"
+ "\n",
VERSION,
(year >= 116) ? year + 1900 : 2020);
}
/*****************************************************************
*
- * AUXILIAR FUNCTIONS USED DURING INITIALIZATION
+ * TRACEPOINT MANAGEMENT FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * Enable a tracepoint.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ebpf_enable_tracepoint(ebpf_tracepoint_t *tp)
+{
+ int test = ebpf_is_tracepoint_enabled(tp->class, tp->event);
+
+ // err?
+ if (test == -1) {
+ return -1;
+ }
+ // disabled?
+ else if (test == 0) {
+ // enable it then.
+ if (ebpf_enable_tracing_values(tp->class, tp->event)) {
+ return -1;
+ }
+ }
+
+ // enabled now or already was.
+ tp->enabled = true;
+
+ return 0;
+}
+
+/**
+ * Disable a tracepoint if it's enabled.
+ *
+ * @return 0 on success, -1 on error.
+ */
+int ebpf_disable_tracepoint(ebpf_tracepoint_t *tp)
+{
+ int test = ebpf_is_tracepoint_enabled(tp->class, tp->event);
+
+ // err?
+ if (test == -1) {
+ return -1;
+ }
+ // enabled?
+ else if (test == 1) {
+ // disable it then.
+ if (ebpf_disable_tracing_values(tp->class, tp->event)) {
+ return -1;
+ }
+ }
+
+ // disable now or already was.
+ tp->enabled = false;
+
+ return 0;
+}
+
+/**
+ * Enable multiple tracepoints on a list of tracepoints which end when the
+ * class is NULL.
+ *
+ * @return the number of successful enables.
+ */
+uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps)
+{
+ uint32_t cnt = 0;
+ for (int i = 0; tps[i].class != NULL; i++) {
+ if (ebpf_enable_tracepoint(&tps[i]) == -1) {
+ infoerr("failed to enable tracepoint %s:%s",
+ tps[i].class, tps[i].event);
+ }
+ else {
+ cnt += 1;
+ }
+ }
+ return cnt;
+}
+
+/*****************************************************************
+ *
+ * AUXILIARY FUNCTIONS USED DURING INITIALIZATION
*
*****************************************************************/
@@ -746,20 +1052,6 @@ static void ebpf_allocate_common_vectors()
}
/**
- * Fill the ebpf_data structure with default values
- *
- * @param ef the pointer to set default values
- */
-void fill_ebpf_data(ebpf_data_t *ef)
-{
- memset(ef, 0, sizeof(ebpf_data_t));
- ef->kernel_string = kernel_string;
- ef->running_on_kernel = running_on_kernel;
- ef->map_fd = callocz(EBPF_MAX_MAPS, sizeof(int));
- ef->isrh = isrh;
-}
-
-/**
* Define how to load the ebpf programs
*
* @param ptr the option given by users
@@ -778,13 +1070,16 @@ static inline void how_to_load(char *ptr)
* Update interval
*
* Update default interval with value from user
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
*/
-static void ebpf_update_interval()
+static void ebpf_update_interval(int update_every)
{
int i;
- int value = (int) appconfig_get_number(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_UPDATE_EVERY, 1);
+ int value = (int) appconfig_get_number(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_UPDATE_EVERY,
+ update_every);
for (i = 0; ebpf_modules[i].thread_name; i++) {
- ebpf_modules[i].update_time = value;
+ ebpf_modules[i].update_every = value;
}
}
@@ -807,9 +1102,11 @@ static void ebpf_update_table_size()
/**
* Read collector values
*
- * @param disable_apps variable to store information related to apps.
+ * @param disable_apps variable to store information related to apps.
+ * @param disable_cgroups variable to store information related to cgroups.
+ * @param update_every value to overwrite the update frequency set by the server.
*/
-static void read_collector_values(int *disable_apps)
+static void read_collector_values(int *disable_apps, int *disable_cgroups, int update_every)
{
// Read global section
char *value;
@@ -822,7 +1119,7 @@ static void read_collector_values(int *disable_apps)
how_to_load(value);
- ebpf_update_interval();
+ ebpf_update_interval(update_every);
ebpf_update_table_size();
@@ -837,12 +1134,17 @@ static void read_collector_values(int *disable_apps)
}
*disable_apps = (int)enabled;
+ // Cgroup is a positive sentence, so we need to invert the values to disable apps.
+ // We are using the same pattern for cgroup and apps
+ enabled = appconfig_get_boolean(&collector_config, EBPF_GLOBAL_SECTION, EBPF_CFG_CGROUP, CONFIG_BOOLEAN_NO);
+ *disable_cgroups = (enabled == CONFIG_BOOLEAN_NO)?CONFIG_BOOLEAN_YES:CONFIG_BOOLEAN_NO;
+
// Read ebpf programs section
enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION,
ebpf_modules[EBPF_MODULE_PROCESS_IDX].config_name, CONFIG_BOOLEAN_YES);
int started = 0;
if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_PROCESS_IDX, *disable_apps);
+ ebpf_enable_chart(EBPF_MODULE_PROCESS_IDX, *disable_apps, *disable_cgroups);
started++;
}
@@ -855,7 +1157,7 @@ static void read_collector_values(int *disable_apps)
CONFIG_BOOLEAN_NO);
if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, *disable_apps);
+ ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, *disable_apps, *disable_cgroups);
// Read network viewer section if network viewer is enabled
// This is kept here to keep backward compatibility
parse_network_viewer_section(&collector_config);
@@ -869,13 +1171,13 @@ static void read_collector_values(int *disable_apps)
if (!enabled)
enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "network connections",
CONFIG_BOOLEAN_NO);
- ebpf_modules[EBPF_MODULE_SOCKET_IDX].optional = enabled;
+ ebpf_modules[EBPF_MODULE_SOCKET_IDX].optional = (int)enabled;
enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "cachestat",
CONFIG_BOOLEAN_NO);
if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_CACHESTAT_IDX, *disable_apps);
+ ebpf_enable_chart(EBPF_MODULE_CACHESTAT_IDX, *disable_apps, *disable_cgroups);
started++;
}
@@ -883,19 +1185,96 @@ static void read_collector_values(int *disable_apps)
CONFIG_BOOLEAN_YES);
if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_SYNC_IDX, *disable_apps);
+ ebpf_enable_chart(EBPF_MODULE_SYNC_IDX, *disable_apps, *disable_cgroups);
started++;
}
enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "dcstat",
CONFIG_BOOLEAN_NO);
if (enabled) {
- ebpf_enable_chart(EBPF_MODULE_DCSTAT_IDX, *disable_apps);
+ ebpf_enable_chart(EBPF_MODULE_DCSTAT_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "swap",
+ CONFIG_BOOLEAN_NO);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_SWAP_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "vfs",
+ CONFIG_BOOLEAN_NO);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_VFS_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "filesystem",
+ CONFIG_BOOLEAN_NO);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_FILESYSTEM_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "disk",
+ CONFIG_BOOLEAN_NO);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_DISK_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "mount",
+ CONFIG_BOOLEAN_YES);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_MOUNT_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "fd",
+ CONFIG_BOOLEAN_YES);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_FD_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "hardirq",
+ CONFIG_BOOLEAN_YES);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_HARDIRQ_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "softirq",
+ CONFIG_BOOLEAN_YES);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_SOFTIRQ_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "oomkill",
+ CONFIG_BOOLEAN_YES);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_OOMKILL_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "shm",
+ CONFIG_BOOLEAN_YES);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_SHM_IDX, *disable_apps, *disable_cgroups);
+ started++;
+ }
+
+ enabled = appconfig_get_boolean(&collector_config, EBPF_PROGRAMS_SECTION, "mdflush",
+ CONFIG_BOOLEAN_NO);
+ if (enabled) {
+ ebpf_enable_chart(EBPF_MODULE_MDFLUSH_IDX, *disable_apps, *disable_cgroups);
started++;
}
if (!started){
- ebpf_enable_all_charts(*disable_apps);
+ ebpf_enable_all_charts(*disable_apps, *disable_cgroups);
// Read network viewer section
// This is kept here to keep backward compatibility
parse_network_viewer_section(&collector_config);
@@ -906,12 +1285,14 @@ static void read_collector_values(int *disable_apps)
/**
* Load collector config
*
- * @param path the path where the file ebpf.conf is stored.
- * @param disable_apps variable to store the information about apps plugin status.
+ * @param path the path where the file ebpf.conf is stored.
+ * @param disable_apps variable to store the information about apps plugin status.
+ * @param disable_cgroups variable to store the information about cgroups plugin status.
+ * @param update_every value to overwrite the update frequency set by the server.
*
* @return 0 on success and -1 otherwise.
*/
-static int load_collector_config(char *path, int *disable_apps)
+static int load_collector_config(char *path, int *disable_apps, int *disable_cgroups, int update_every)
{
char lpath[4096];
@@ -923,7 +1304,7 @@ static int load_collector_config(char *path, int *disable_apps)
}
}
- read_collector_values(disable_apps);
+ read_collector_values(disable_apps, disable_cgroups, update_every);
return 0;
}
@@ -957,6 +1338,21 @@ void set_global_variables()
isrh = get_redhat_release();
pid_max = get_system_pid_max();
+ running_on_kernel = ebpf_get_kernel_version();
+ ebpf_update_kernel(kernel_string, 63, isrh, running_on_kernel);
+}
+
+/**
+ * Load collector config
+ *
+ * @param lmode the mode that will be used for them.
+ */
+static inline void ebpf_load_thread_config()
+{
+ int i;
+ for (i = 0; ebpf_modules[i].thread_name; i++) {
+ ebpf_update_module(&ebpf_modules[i]);
+ }
}
/**
@@ -965,23 +1361,36 @@ void set_global_variables()
* @param argc the number of arguments
* @param argv the pointer to the arguments
*/
-static void parse_args(int argc, char **argv)
+static void ebpf_parse_args(int argc, char **argv)
{
- int enabled = 0;
int disable_apps = 0;
+ int disable_cgroups = 1;
int freq = 0;
int option_index = 0;
+ uint64_t select_threads = 0;
static struct option long_options[] = {
- {"help", no_argument, 0, 'h' },
- {"version", no_argument, 0, 'v' },
- {"global", no_argument, 0, 'g' },
- {"all", no_argument, 0, 'a' },
- {"cachestat", no_argument, 0, 'c' },
- {"dcstat", no_argument, 0, 'd' },
- {"net", no_argument, 0, 'n' },
- {"process", no_argument, 0, 'p' },
- {"return", no_argument, 0, 'r' },
- {"sync", no_argument, 0, 's' },
+ {"process", no_argument, 0, 0 },
+ {"net", no_argument, 0, 0 },
+ {"cachestat", no_argument, 0, 0 },
+ {"sync", no_argument, 0, 0 },
+ {"dcstat", no_argument, 0, 0 },
+ {"swap", no_argument, 0, 0 },
+ {"vfs", no_argument, 0, 0 },
+ {"filesystem", no_argument, 0, 0 },
+ {"disk", no_argument, 0, 0 },
+ {"mount", no_argument, 0, 0 },
+ {"filedescriptor", no_argument, 0, 0 },
+ {"hardirq", no_argument, 0, 0 },
+ {"softirq", no_argument, 0, 0 },
+ {"oomkill", no_argument, 0, 0 },
+ {"shm", no_argument, 0, 0 },
+ {"mdflush", no_argument, 0, 0 },
+ /* INSERT NEW THREADS BEFORE THIS COMMENT TO KEEP COMPATIBILITY WITH enum ebpf_module_indexes */
+ {"all", no_argument, 0, 0 },
+ {"version", no_argument, 0, 0 },
+ {"help", no_argument, 0, 0 },
+ {"global", no_argument, 0, 0 },
+ {"return", no_argument, 0, 0 },
{0, 0, 0, 0}
};
@@ -995,83 +1404,166 @@ static void parse_args(int argc, char **argv)
}
}
+ if (!freq)
+ freq = EBPF_DEFAULT_UPDATE_EVERY;
+
+ if (load_collector_config(ebpf_user_config_dir, &disable_apps, &disable_cgroups, freq)) {
+ info(
+ "Does not have a configuration file inside `%s/ebpf.d.conf. It will try to load stock file.",
+ ebpf_user_config_dir);
+ if (load_collector_config(ebpf_stock_config_dir, &disable_apps, &disable_cgroups, freq)) {
+ info("Does not have a stock file. It is starting with default options.");
+ }
+ }
+
+ ebpf_load_thread_config();
+
while (1) {
- int c = getopt_long(argc, argv, "hvgacdnprs", long_options, &option_index);
+ int c = getopt_long_only(argc, argv, "", long_options, &option_index);
if (c == -1)
break;
- switch (c) {
- case 'h': {
- ebpf_print_help();
- exit(0);
+ switch (option_index) {
+ case EBPF_MODULE_PROCESS_IDX: {
+ select_threads |= 1<<EBPF_MODULE_PROCESS_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"PROCESS\" charts, because it was started with the option \"[-]-process\".");
+#endif
+ break;
}
- case 'v': {
- printf("ebpf.plugin %s\n", VERSION);
- exit(0);
+ case EBPF_MODULE_SOCKET_IDX: {
+ select_threads |= 1<<EBPF_MODULE_SOCKET_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"NET\" charts, because it was started with the option \"[-]-net\".");
+#endif
+ break;
}
- case 'g': {
- disable_apps = 1;
- ebpf_disable_apps();
+ case EBPF_MODULE_CACHESTAT_IDX: {
+ select_threads |= 1<<EBPF_MODULE_CACHESTAT_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info(
- "EBPF running with global chart group, because it was started with the option \"--global\" or \"-g\".");
+ info("EBPF enabling \"CACHESTAT\" charts, because it was started with the option \"[-]-cachestat\".");
#endif
break;
}
- case 'a': {
- ebpf_enable_all_charts(disable_apps);
+ case EBPF_MODULE_SYNC_IDX: {
+ select_threads |= 1<<EBPF_MODULE_SYNC_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info("EBPF running with all chart groups, because it was started with the option \"--all\" or \"-a\".");
+ info("EBPF enabling \"SYNC\" chart, because it was started with the option \"[-]-sync\".");
#endif
break;
}
- case 'c': {
- enabled = 1;
- ebpf_enable_chart(EBPF_MODULE_CACHESTAT_IDX, disable_apps);
+ case EBPF_MODULE_DCSTAT_IDX: {
+ select_threads |= 1<<EBPF_MODULE_DCSTAT_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info(
- "EBPF enabling \"CACHESTAT\" charts, because it was started with the option \"--cachestat\" or \"-c\".");
+ info("EBPF enabling \"DCSTAT\" charts, because it was started with the option \"[-]-dcstat\".");
#endif
break;
}
- case 'd': {
- enabled = 1;
- ebpf_enable_chart(EBPF_MODULE_DCSTAT_IDX, disable_apps);
+ case EBPF_MODULE_SWAP_IDX: {
+ select_threads |= 1<<EBPF_MODULE_SWAP_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info(
- "EBPF enabling \"DCSTAT\" charts, because it was started with the option \"--dcstat\" or \"-d\".");
+ info("EBPF enabling \"SWAP\" chart, because it was started with the option \"[-]-swap\".");
#endif
break;
}
- case 'n': {
- enabled = 1;
- ebpf_enable_chart(EBPF_MODULE_SOCKET_IDX, disable_apps);
+ case EBPF_MODULE_VFS_IDX: {
+ select_threads |= 1<<EBPF_MODULE_VFS_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info("EBPF enabling \"NET\" charts, because it was started with the option \"--net\" or \"-n\".");
+ info("EBPF enabling \"VFS\" chart, because it was started with the option \"[-]-vfs\".");
#endif
break;
}
- case 'p': {
- enabled = 1;
- ebpf_enable_chart(EBPF_MODULE_PROCESS_IDX, disable_apps);
+ case EBPF_MODULE_FILESYSTEM_IDX: {
+ select_threads |= 1<<EBPF_MODULE_FILESYSTEM_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info(
- "EBPF enabling \"PROCESS\" charts, because it was started with the option \"--process\" or \"-p\".");
+ info("EBPF enabling \"FILESYSTEM\" chart, because it was started with the option \"[-]-filesystem\".");
#endif
break;
}
- case 'r': {
- ebpf_set_thread_mode(MODE_RETURN);
+ case EBPF_MODULE_DISK_IDX: {
+ select_threads |= 1<<EBPF_MODULE_DISK_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"DISK\" chart, because it was started with the option \"[-]-disk\".");
+#endif
+ break;
+ }
+ case EBPF_MODULE_MOUNT_IDX: {
+ select_threads |= 1<<EBPF_MODULE_MOUNT_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"MOUNT\" chart, because it was started with the option \"[-]-mount\".");
+#endif
+ break;
+ }
+ case EBPF_MODULE_FD_IDX: {
+ select_threads |= 1<<EBPF_MODULE_FD_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"FILEDESCRIPTOR\" chart, because it was started with the option \"[-]-filedescriptor\".");
+#endif
+ break;
+ }
+ case EBPF_MODULE_HARDIRQ_IDX: {
+ select_threads |= 1<<EBPF_MODULE_HARDIRQ_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"HARDIRQ\" chart, because it was started with the option \"[-]-hardirq\".");
+#endif
+ break;
+ }
+ case EBPF_MODULE_SOFTIRQ_IDX: {
+ select_threads |= 1<<EBPF_MODULE_SOFTIRQ_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info("EBPF running in \"return\" mode, because it was started with the option \"--return\" or \"-r\".");
+ info("EBPF enabling \"SOFTIRQ\" chart, because it was started with the option \"[-]-softirq\".");
#endif
break;
}
- case 's': {
- enabled = 1;
- ebpf_enable_chart(EBPF_MODULE_SYNC_IDX, disable_apps);
+ case EBPF_MODULE_OOMKILL_IDX: {
+ select_threads |= 1<<EBPF_MODULE_OOMKILL_IDX;
#ifdef NETDATA_INTERNAL_CHECKS
- info("EBPF enabling \"sync\" chart, because it was started with the option \"--sync\" or \"-s\".");
+ info("EBPF enabling \"OOMKILL\" chart, because it was started with the option \"[-]-oomkill\".");
+#endif
+ break;
+ }
+ case EBPF_MODULE_SHM_IDX: {
+ select_threads |= 1<<EBPF_MODULE_SHM_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"SHM\" chart, because it was started with the option \"[-]-shm\".");
+#endif
+ break;
+ }
+ case EBPF_MODULE_MDFLUSH_IDX: {
+ select_threads |= 1<<EBPF_MODULE_MDFLUSH_IDX;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF enabling \"MDFLUSH\" chart, because it was started with the option \"[-]-mdflush\".");
+#endif
+ break;
+ }
+ case EBPF_OPTION_ALL_CHARTS: {
+ disable_apps = 0;
+ disable_cgroups = 0;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF running with all chart groups, because it was started with the option \"[-]-all\".");
+#endif
+ break;
+ }
+ case EBPF_OPTION_VERSION: {
+ printf("ebpf.plugin %s\n", VERSION);
+ exit(0);
+ }
+ case EBPF_OPTION_HELP: {
+ ebpf_print_help();
+ exit(0);
+ }
+ case EBPF_OPTION_GLOBAL_CHART: {
+ disable_apps = 1;
+ disable_cgroups = 1;
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF running with global chart group, because it was started with the option \"[-]-global\".");
+#endif
+ break;
+ }
+ case EBPF_OPTION_RETURN_MODE: {
+ ebpf_set_thread_mode(MODE_RETURN);
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("EBPF running in \"RETURN\" mode, because it was started with the option \"[-]-return\".");
#endif
break;
}
@@ -1081,44 +1573,34 @@ static void parse_args(int argc, char **argv)
}
}
- if (freq > 0) {
- update_every = freq;
- }
+ if (disable_apps || disable_cgroups) {
+ if (disable_apps)
+ ebpf_disable_apps();
- if (load_collector_config(ebpf_user_config_dir, &disable_apps)) {
- info(
- "Does not have a configuration file inside `%s/ebpf.d.conf. It will try to load stock file.",
- ebpf_user_config_dir);
- if (load_collector_config(ebpf_stock_config_dir, &disable_apps)) {
- info("Does not have a stock file. It is starting with default options.");
- } else {
- enabled = 1;
- }
- } else {
- enabled = 1;
- }
+ if (disable_cgroups)
+ ebpf_disable_cgroups();
- if (!enabled) {
- ebpf_enable_all_charts(disable_apps);
-#ifdef NETDATA_INTERNAL_CHECKS
- info("EBPF running with all charts, because neither \"-n\" or \"-p\" was given.");
-#endif
+ ebpf_enable_all_charts(disable_apps, disable_cgroups);
}
- if (disable_apps)
- return;
+ if (select_threads) {
+ disable_all_global_charts();
+ uint64_t idx;
+ for (idx = 0; idx < EBPF_OPTION_ALL_CHARTS; idx++) {
+ if (select_threads & 1<<idx)
+ ebpf_enable_specific_chart(&ebpf_modules[idx], disable_apps, disable_cgroups);
+ }
+ }
// Load apps_groups.conf
if (ebpf_read_apps_groups_conf(
&apps_groups_default_target, &apps_groups_root_target, ebpf_user_config_dir, "groups")) {
- info(
- "Cannot read process groups configuration file '%s/apps_groups.conf'. Will try '%s/apps_groups.conf'",
- ebpf_user_config_dir, ebpf_stock_config_dir);
+ info("Cannot read process groups configuration file '%s/apps_groups.conf'. Will try '%s/apps_groups.conf'",
+ ebpf_user_config_dir, ebpf_stock_config_dir);
if (ebpf_read_apps_groups_conf(
&apps_groups_default_target, &apps_groups_root_target, ebpf_stock_config_dir, "groups")) {
- error(
- "Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.",
- ebpf_stock_config_dir);
+ error("Cannot read process groups '%s/apps_groups.conf'. There are no internal defaults. Failing.",
+ ebpf_stock_config_dir);
thread_finished++;
ebpf_exit(1);
}
@@ -1133,6 +1615,136 @@ static void parse_args(int argc, char **argv)
*****************************************************************/
/**
+ * Update PID file
+ *
+ * Update the content of PID file
+ *
+ * @param filename is the full name of the file.
+ * @param pid that identifies the process
+ */
+static void ebpf_update_pid_file(char *filename, pid_t pid)
+{
+ FILE *fp = fopen(filename, "w");
+ if (!fp)
+ return;
+
+ fprintf(fp, "%d", pid);
+ fclose(fp);
+}
+
+/**
+ * Get Process Name
+ *
+ * Get process name from /proc/PID/status
+ *
+ * @param pid that identifies the process
+ */
+static char *ebpf_get_process_name(pid_t pid)
+{
+ char *name = NULL;
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "/proc/%d/status", pid);
+
+ procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ error("Cannot open %s", filename);
+ return name;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return name;
+
+ unsigned long i, lines = procfile_lines(ff);
+ for(i = 0; i < lines ; i++) {
+ char *cmp = procfile_lineword(ff, i, 0);
+ if (!strcmp(cmp, "Name:")) {
+ name = strdupz(procfile_lineword(ff, i, 1));
+ break;
+ }
+ }
+
+ procfile_close(ff);
+
+ return name;
+}
+
+/**
+ * Read Previous PID
+ *
+ * @param filename is the full name of the file.
+ *
+ * @return It returns the PID used during previous execution on success or 0 otherwise
+ */
+static pid_t ebpf_read_previous_pid(char *filename)
+{
+ FILE *fp = fopen(filename, "r");
+ if (!fp)
+ return 0;
+
+ char buffer[64];
+ size_t length = fread(buffer, sizeof(*buffer), 63, fp);
+ pid_t old_pid = 0;
+ if (length) {
+ if (length > 63)
+ length = 63;
+
+ buffer[length] = '\0';
+ old_pid = (pid_t)str2uint32_t(buffer);
+ }
+ fclose(fp);
+
+ return old_pid;
+}
+
+/**
+ * Kill previous process
+ *
+ * Kill previous process whether it was not closed.
+ *
+ * @param filename is the full name of the file.
+ * @param pid that identifies the process
+ */
+static void ebpf_kill_previous_process(char *filename, pid_t pid)
+{
+ pid_t old_pid = ebpf_read_previous_pid(filename);
+ if (!old_pid)
+ return;
+
+ // Process is not running
+ char *prev_name = ebpf_get_process_name(old_pid);
+ if (!prev_name)
+ return;
+
+ char *current_name = ebpf_get_process_name(pid);
+
+ if (!strcmp(prev_name, current_name))
+ kill(old_pid, SIGKILL);
+
+ freez(prev_name);
+ freez(current_name);
+
+ // wait few microseconds before start new plugin
+ sleep_usec(USEC_PER_MS * 300);
+}
+
+/**
+ * Manage PID
+ *
+ * This function kills another instance of eBPF whether it is necessary and update the file content.
+ *
+ * @param pid that identifies the process
+ */
+static void ebpf_manage_pid(pid_t pid)
+{
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s/ebpf.d/ebpf.pid", netdata_configured_host_prefix, ebpf_plugin_dir);
+
+ ebpf_kill_previous_process(filename, pid);
+ ebpf_update_pid_file(filename, pid);
+}
+
+/**
* Entry point
*
* @param argc the number of arguments
@@ -1143,9 +1755,9 @@ static void parse_args(int argc, char **argv)
int main(int argc, char **argv)
{
set_global_variables();
- parse_args(argc, argv);
+ ebpf_parse_args(argc, argv);
+ ebpf_manage_pid(getpid());
- running_on_kernel = get_kernel_version(kernel_string, 63);
if (!has_condition_to_run(running_on_kernel)) {
error("The current collector cannot run on this kernel.");
return 2;
@@ -1203,6 +1815,28 @@ int main(int argc, char **argv)
NULL, NULL, ebpf_modules[EBPF_MODULE_SYNC_IDX].start_routine},
{"EBPF DCSTAT" , NULL, NULL, 1,
NULL, NULL, ebpf_modules[EBPF_MODULE_DCSTAT_IDX].start_routine},
+ {"EBPF SWAP" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_SWAP_IDX].start_routine},
+ {"EBPF VFS" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_VFS_IDX].start_routine},
+ {"EBPF FILESYSTEM" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_FILESYSTEM_IDX].start_routine},
+ {"EBPF DISK" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_DISK_IDX].start_routine},
+ {"EBPF MOUNT" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_MOUNT_IDX].start_routine},
+ {"EBPF FD" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_FD_IDX].start_routine},
+ {"EBPF HARDIRQ" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_HARDIRQ_IDX].start_routine},
+ {"EBPF SOFTIRQ" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_SOFTIRQ_IDX].start_routine},
+ {"EBPF OOMKILL" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_OOMKILL_IDX].start_routine},
+ {"EBPF SHM" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_SHM_IDX].start_routine},
+ {"EBPF MDFLUSH" , NULL, NULL, 1,
+ NULL, NULL, ebpf_modules[EBPF_MODULE_MDFLUSH_IDX].start_routine},
{NULL , NULL, NULL, 0,
NULL, NULL, NULL}
};
diff --git a/collectors/ebpf.plugin/ebpf.d.conf b/collectors/ebpf.plugin/ebpf.d.conf
index ef6ff8145..845b711c9 100644
--- a/collectors/ebpf.plugin/ebpf.d.conf
+++ b/collectors/ebpf.plugin/ebpf.d.conf
@@ -6,9 +6,10 @@
# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
# new charts for the return of these functions, such as errors.
#
-# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`.
-# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to
-# 'no'.
+# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
+# or `cgroups.plugin`.
+# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change the setting
+# `apps` and `cgroups` to 'no'.
#
# The `update every` option defines the number of seconds used to read data from kernel and send to netdata
#
@@ -17,7 +18,8 @@
[global]
ebpf load mode = entry
apps = yes
- update every = 1
+ cgroups = no
+ update every = 5
pid table size = 32768
#
@@ -25,17 +27,39 @@
#
# The eBPF collector enables and runs the following eBPF programs by default:
#
-# `cachestat`: Make charts for kernel functions related to page cache.
-# `process` : This eBPF program creates charts that show information about process creation, VFS IO, and
+# `cachestat` : Make charts for kernel functions related to page cache.
+# `dcstat` : Make charts for kernel functions related to directory cache.
+# `disk` : Monitor I/O latencies for disks
+# `fd` : This eBPF program creates charts that show information about file manipulation.
+# `mdflush` : Monitors flush counts for multi-devices.
+# `mount` : Monitor calls for syscalls mount and umount
+# `filesystem`: Monitor calls for functions used to manipulate specific filesystems
+# `hardirq` : Monitor latency of serving hardware interrupt requests (hard IRQs).
+# `oomkill` : This eBPF program creates a chart that shows which process got OOM killed and when.
+# `process` : This eBPF program creates charts that show information about process life.
+# `shm` : Monitor calls for syscalls shmget, shmat, shmdt and shmctl.
+# `socket` : This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
+# bandwidth consumed by each.
+# `softirq` : Monitor latency of serving software interrupt requests (soft IRQs).
+# `sync` : Montitor calls for syscall sync(2).
+# `swap` : Monitor calls for internal swap functions.
+# `vfs` : This eBPF program creates charts that show information about process VFS IO, VFS file manipulation and
# files removed.
-# `socket` : This eBPF program creates charts with information about `TCP` and `UDP` functions, including the
-# bandwidth consumed by each.
-# `sync` : Montitor calls for syscall sync(2).
[ebpf programs]
cachestat = no
dcstat = no
+ disk = no
+ fd = yes
+ filesystem = no
+ hardirq = yes
+ mdflush = no
+ mount = yes
+ oomkill = yes
process = yes
+ shm = yes
socket = yes
+ softirq = yes
sync = yes
+ swap = no
+ vfs = yes
network connections = no
-
diff --git a/collectors/ebpf.plugin/ebpf.d/cachestat.conf b/collectors/ebpf.plugin/ebpf.d/cachestat.conf
index 0c4d991df..41205930a 100644
--- a/collectors/ebpf.plugin/ebpf.d/cachestat.conf
+++ b/collectors/ebpf.plugin/ebpf.d/cachestat.conf
@@ -3,14 +3,17 @@
# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
# new charts for the return of these functions, such as errors.
#
-# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`.
-# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to
-# 'no'.
+# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
+# or `cgroups.plugin`.
+# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
+# the setting `apps` and `cgroups` to 'no'.
#
# The `pid table size` defines the maximum number of PIDs stored inside the application hash table.
-#
-[global]
- ebpf load mode = entry
- apps = yes
- update every = 2
- pid table size = 32768
+#
+# Uncomment lines to define specific options for thread.
+#[global]
+# ebpf load mode = entry
+# apps = yes
+# cgroups = no
+# update every = 10
+# pid table size = 32768
diff --git a/collectors/ebpf.plugin/ebpf.d/dcstat.conf b/collectors/ebpf.plugin/ebpf.d/dcstat.conf
index 2607b98fd..a65e0acbc 100644
--- a/collectors/ebpf.plugin/ebpf.d/dcstat.conf
+++ b/collectors/ebpf.plugin/ebpf.d/dcstat.conf
@@ -3,11 +3,15 @@
# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
# new charts for the return of these functions, such as errors.
#
-# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`.
-# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to
-# 'no'.
+# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
+# or `cgroups.plugin`.
+# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
+# the setting `apps` and `cgroups` to 'no'.
#
-[global]
- ebpf load mode = entry
- apps = yes
- update every = 2
+# Uncomment lines to define specific options for thread.
+#[global]
+# ebpf load mode = entry
+# apps = yes
+# cgroups = no
+# update every = 10
+# pid table size = 32768
diff --git a/collectors/ebpf.plugin/ebpf.d/disk.conf b/collectors/ebpf.plugin/ebpf.d/disk.conf
new file mode 100644
index 000000000..4adf88e74
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.d/disk.conf
@@ -0,0 +1,9 @@
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#
+#[global]
+# ebpf load mode = entry
+# update every = 10
+
diff --git a/collectors/ebpf.plugin/ebpf.d/fd.conf b/collectors/ebpf.plugin/ebpf.d/fd.conf
new file mode 100644
index 000000000..f6edd3d93
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.d/fd.conf
@@ -0,0 +1,19 @@
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#
+# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
+# or `cgroups.plugin`.
+# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
+# the setting `apps` and `cgroups` to 'no'.
+#
+# The `pid table size` defines the maximum number of PIDs stored inside the hash table.
+#
+# Uncomment lines to define specific options for thread.
+#[global]
+# ebpf load mode = entry
+# apps = yes
+# cgroups = no
+# update every = 10
+# pid table size = 32768
diff --git a/collectors/ebpf.plugin/ebpf.d/filesystem.conf b/collectors/ebpf.plugin/ebpf.d/filesystem.conf
new file mode 100644
index 000000000..c5eb01e54
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.d/filesystem.conf
@@ -0,0 +1,20 @@
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#
+# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`.
+# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to
+# 'no'.
+#
+#[global]
+# ebpf load mode = entry
+# update every = 10
+
+# All filesystems are named as 'NAMEdist' where NAME is the filesystem name while 'dist' is a reference for distribution.
+[filesystem]
+ btrfsdist = yes
+ ext4dist = yes
+ nfsdist = yes
+ xfsdist = yes
+ zfsdist = yes
diff --git a/collectors/ebpf.plugin/ebpf.d/hardirq.conf b/collectors/ebpf.plugin/ebpf.d/hardirq.conf
new file mode 100644
index 000000000..f2bae1d57
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.d/hardirq.conf
@@ -0,0 +1,8 @@
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#
+#[global]
+# ebpf load mode = entry
+# update every = 10
diff --git a/collectors/ebpf.plugin/ebpf.d/mdflush.conf b/collectors/ebpf.plugin/ebpf.d/mdflush.conf
new file mode 100644
index 000000000..e65e8672c
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.d/mdflush.conf
@@ -0,0 +1,7 @@
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#[global]
+# ebpf load mode = entry
+# update every = 1
diff --git a/collectors/ebpf.plugin/ebpf.d/mount.conf b/collectors/ebpf.plugin/ebpf.d/mount.conf
new file mode 100644
index 000000000..9d3174755
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.d/mount.conf
@@ -0,0 +1,8 @@
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#
+#[global]
+# ebpf load mode = entry
+# update every = 1
diff --git a/collectors/ebpf.plugin/ebpf.d/network.conf b/collectors/ebpf.plugin/ebpf.d/network.conf
index 6bbd49a49..e692622a9 100644
--- a/collectors/ebpf.plugin/ebpf.d/network.conf
+++ b/collectors/ebpf.plugin/ebpf.d/network.conf
@@ -3,9 +3,10 @@
# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
# new charts for the return of these functions, such as errors.
#
-# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`.
-# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to
-# 'no'.
+# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
+# or `cgroups.plugin`.
+# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
+# the setting `apps` and `cgroups` to 'no'.
#
# The following options change the hash table size:
# `bandwidth table size`: Maximum number of connections monitored
@@ -14,9 +15,10 @@
# `udp connection table size`: Maximum number of UDP connections monitored
#
[global]
- ebpf load mode = entry
- apps = yes
- update every = 1
+# ebpf load mode = entry
+# apps = yes
+# cgroups = no
+# update every = 10
bandwidth table size = 16384
ipv4 connection table size = 16384
ipv6 connection table size = 16384
diff --git a/collectors/ebpf.plugin/ebpf.d/oomkill.conf b/collectors/ebpf.plugin/ebpf.d/oomkill.conf
new file mode 100644
index 000000000..e65e8672c
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.d/oomkill.conf
@@ -0,0 +1,7 @@
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#[global]
+# ebpf load mode = entry
+# update every = 1
diff --git a/collectors/ebpf.plugin/ebpf.d/process.conf b/collectors/ebpf.plugin/ebpf.d/process.conf
index 511da95ad..f6edd3d93 100644
--- a/collectors/ebpf.plugin/ebpf.d/process.conf
+++ b/collectors/ebpf.plugin/ebpf.d/process.conf
@@ -3,14 +3,17 @@
# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
# new charts for the return of these functions, such as errors.
#
-# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`.
-# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to
-# 'no'.
+# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
+# or `cgroups.plugin`.
+# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
+# the setting `apps` and `cgroups` to 'no'.
#
# The `pid table size` defines the maximum number of PIDs stored inside the hash table.
-#
-[global]
- ebpf load mode = entry
- apps = yes
- update every = 1
- pid table size = 32768
+#
+# Uncomment lines to define specific options for thread.
+#[global]
+# ebpf load mode = entry
+# apps = yes
+# cgroups = no
+# update every = 10
+# pid table size = 32768
diff --git a/collectors/ebpf.plugin/ebpf.d/shm.conf b/collectors/ebpf.plugin/ebpf.d/shm.conf
new file mode 100644
index 000000000..c0a10c98e
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.d/shm.conf
@@ -0,0 +1,24 @@
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#
+# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
+# or `cgroups.plugin`.
+# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
+# the setting `apps` and `cgroups` to 'no'.
+#
+# Uncomment lines to define specific options for thread.
+#[global]
+# ebpf load mode = entry
+# apps = yes
+# cgroups = no
+# update every = 10
+# pid table size = 32768
+
+# List of monitored syscalls
+[syscalls]
+ shmget = yes
+ shmat = yes
+ shmdt = yes
+ shmctl = yes
diff --git a/collectors/ebpf.plugin/ebpf.d/softirq.conf b/collectors/ebpf.plugin/ebpf.d/softirq.conf
new file mode 100644
index 000000000..f2bae1d57
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.d/softirq.conf
@@ -0,0 +1,8 @@
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#
+#[global]
+# ebpf load mode = entry
+# update every = 10
diff --git a/collectors/ebpf.plugin/ebpf.d/swap.conf b/collectors/ebpf.plugin/ebpf.d/swap.conf
new file mode 100644
index 000000000..a65e0acbc
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.d/swap.conf
@@ -0,0 +1,17 @@
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#
+# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
+# or `cgroups.plugin`.
+# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
+# the setting `apps` and `cgroups` to 'no'.
+#
+# Uncomment lines to define specific options for thread.
+#[global]
+# ebpf load mode = entry
+# apps = yes
+# cgroups = no
+# update every = 10
+# pid table size = 32768
diff --git a/collectors/ebpf.plugin/ebpf.d/sync.conf b/collectors/ebpf.plugin/ebpf.d/sync.conf
index de28f3394..03c469f68 100644
--- a/collectors/ebpf.plugin/ebpf.d/sync.conf
+++ b/collectors/ebpf.plugin/ebpf.d/sync.conf
@@ -3,15 +3,17 @@
# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
# new charts for the return of these functions, such as errors.
#
-# The eBPF collector also creates charts for each running application through an integration with the `apps plugin`.
-# If you want to disable the integration with `apps.plugin` along with the above charts, change the setting `apps` to
-# 'no'.
+# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
+# or `cgroups.plugin`.
+# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
+# the setting `apps` and `cgroups` to 'no'.
#
#
-[global]
- ebpf load mode = entry
- apps = yes
- update every = 2
+#[global]
+# ebpf load mode = entry
+# apps = yes
+# cgroups = no
+# update every = 10
# List of monitored syscalls
[syscalls]
diff --git a/collectors/ebpf.plugin/ebpf.d/vfs.conf b/collectors/ebpf.plugin/ebpf.d/vfs.conf
new file mode 100644
index 000000000..a65e0acbc
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf.d/vfs.conf
@@ -0,0 +1,17 @@
+# The `ebpf load mode` option accepts the following values :
+# `entry` : The eBPF collector only monitors calls for the functions, and does not show charts related to errors.
+# `return : In the `return` mode, the eBPF collector monitors the same kernel functions as `entry`, but also creates
+# new charts for the return of these functions, such as errors.
+#
+# The eBPF collector also creates charts for each running application through an integration with the `apps.plugin`
+# or `cgroups.plugin`.
+# If you want to disable the integration with `apps.plugin` or `cgroups.plugin` along with the above charts, change
+# the setting `apps` and `cgroups` to 'no'.
+#
+# Uncomment lines to define specific options for thread.
+#[global]
+# ebpf load mode = entry
+# apps = yes
+# cgroups = no
+# update every = 10
+# pid table size = 32768
diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h
index 841701e20..a59bad031 100644
--- a/collectors/ebpf.plugin/ebpf.h
+++ b/collectors/ebpf.plugin/ebpf.h
@@ -30,6 +30,7 @@
#include "daemon/main.h"
#include "ebpf_apps.h"
+#include "ebpf_cgroup.h"
#define NETDATA_EBPF_OLD_CONFIG_FILE "ebpf.conf"
#define NETDATA_EBPF_CONFIG_FILE "ebpf.d.conf"
@@ -73,14 +74,37 @@ typedef struct netdata_error_report {
} netdata_error_report_t;
extern ebpf_module_t ebpf_modules[];
-enum ebpf_module_indexes {
+enum ebpf_main_index {
EBPF_MODULE_PROCESS_IDX,
EBPF_MODULE_SOCKET_IDX,
EBPF_MODULE_CACHESTAT_IDX,
EBPF_MODULE_SYNC_IDX,
- EBPF_MODULE_DCSTAT_IDX
+ EBPF_MODULE_DCSTAT_IDX,
+ EBPF_MODULE_SWAP_IDX,
+ EBPF_MODULE_VFS_IDX,
+ EBPF_MODULE_FILESYSTEM_IDX,
+ EBPF_MODULE_DISK_IDX,
+ EBPF_MODULE_MOUNT_IDX,
+ EBPF_MODULE_FD_IDX,
+ EBPF_MODULE_HARDIRQ_IDX,
+ EBPF_MODULE_SOFTIRQ_IDX,
+ EBPF_MODULE_OOMKILL_IDX,
+ EBPF_MODULE_SHM_IDX,
+ EBPF_MODULE_MDFLUSH_IDX,
+ /* THREADS MUST BE INCLUDED BEFORE THIS COMMENT */
+ EBPF_OPTION_ALL_CHARTS,
+ EBPF_OPTION_VERSION,
+ EBPF_OPTION_HELP,
+ EBPF_OPTION_GLOBAL_CHART,
+ EBPF_OPTION_RETURN_MODE
};
+typedef struct ebpf_tracepoint {
+ bool enabled;
+ char *class;
+ char *event;
+} ebpf_tracepoint_t;
+
// Copied from musl header
#ifndef offsetof
#if __GNUC__ > 3
@@ -92,10 +116,16 @@ enum ebpf_module_indexes {
// Chart definitions
#define NETDATA_EBPF_FAMILY "ebpf"
+#define NETDATA_EBPF_IP_FAMILY "ip"
#define NETDATA_FILESYSTEM_FAMILY "filesystem"
+#define NETDATA_EBPF_MOUNT_GLOBAL_FAMILY "mount_points"
#define NETDATA_EBPF_CHART_TYPE_LINE "line"
#define NETDATA_EBPF_CHART_TYPE_STACKED "stacked"
#define NETDATA_EBPF_MEMORY_GROUP "mem"
+#define NETDATA_EBPF_SYSTEM_GROUP "system"
+#define NETDATA_SYSTEM_SWAP_SUBMENU "swap"
+#define NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU "swap (eBPF)"
+#define NETDATA_SYSTEM_IPC_SHM_SUBMENU "ipc shared memory"
// Log file
#define NETDATA_DEVELOPER_LOG_FILE "developer.log"
@@ -111,6 +141,8 @@ enum ebpf_module_indexes {
#define EBPF_SYS_CLONE_IDX 11
#define EBPF_MAX_MAPS 32
+#define EBPF_DEFAULT_UPDATE_EVERY 10
+
enum ebpf_algorithms_list {
NETDATA_EBPF_ABSOLUTE_IDX,
NETDATA_EBPF_INCREMENTAL_IDX
@@ -125,6 +157,7 @@ extern pthread_mutex_t lock;
extern int close_ebpf_plugin;
extern int ebpf_nprocs;
extern int running_on_kernel;
+extern int isrh;
extern char *ebpf_plugin_dir;
extern char kernel_string[64];
@@ -146,7 +179,9 @@ extern void ebpf_write_chart_cmd(char *type,
char *family,
char *charttype,
char *context,
- int order);
+ int order,
+ int update_every,
+ char *module);
extern void ebpf_write_global_dimension(char *name, char *id, char *algorithm);
@@ -162,7 +197,9 @@ extern void ebpf_create_chart(char *type,
int order,
void (*ncd)(void *, int),
void *move,
- int end);
+ int end,
+ int update_every,
+ char *module);
extern void write_begin_chart(char *family, char *name);
@@ -175,8 +212,6 @@ extern void write_err_chart(char *name, char *family, netdata_publish_syscall_t
extern void write_io_chart(char *chart, char *family, char *dwrite, long long vwrite,
char *dread, long long vread);
-extern void fill_ebpf_data(ebpf_data_t *ef);
-
extern void ebpf_create_charts_on_apps(char *name,
char *title,
char *units,
@@ -184,12 +219,18 @@ extern void ebpf_create_charts_on_apps(char *name,
char *charttype,
int order,
char *algorithm,
- struct target *root);
+ struct target *root,
+ int update_every,
+ char *module);
extern void write_end_chart();
extern void ebpf_cleanup_publish_syscall(netdata_publish_syscall_t *nps);
+extern int ebpf_enable_tracepoint(ebpf_tracepoint_t *tp);
+extern int ebpf_disable_tracepoint(ebpf_tracepoint_t *tp);
+extern uint32_t ebpf_enable_tracepoints(ebpf_tracepoint_t *tps);
+
#define EBPF_PROGRAMS_SECTION "ebpf programs"
#define EBPF_COMMON_DIMENSION_PERCENTAGE "%"
@@ -199,16 +240,21 @@ extern void ebpf_cleanup_publish_syscall(netdata_publish_syscall_t *nps);
#define EBPF_COMMON_DIMENSION_DIFFERENCE "difference"
#define EBPF_COMMON_DIMENSION_PACKETS "packets"
#define EBPF_COMMON_DIMENSION_FILES "files"
+#define EBPF_COMMON_DIMENSION_MILLISECONDS "milliseconds"
+#define EBPF_COMMON_DIMENSION_KILLS "kills"
// Common variables
extern int debug_enabled;
extern struct pid_stat *root_of_pids;
+extern ebpf_cgroup_target_t *ebpf_cgroup_pids;
extern char *ebpf_algorithms[];
extern struct config collector_config;
-extern struct pid_stat *root_of_pids;
extern ebpf_process_stat_t *global_process_stat;
+extern netdata_ebpf_cgroup_shm_t shm_ebpf_cgroup;
+extern int shm_fd_ebpf_cgroup;
+extern sem_t *shm_sem_ebpf_cgroup;
+extern pthread_mutex_t mutex_cgroup_shm;
extern size_t all_pids_count;
-extern int update_every;
extern uint32_t finalized_threads;
// Socket functions and variables
@@ -219,6 +265,9 @@ extern void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *root
extern void ebpf_one_dimension_write_charts(char *family, char *chart, char *dim, long long v1);
extern collected_number get_value_from_structure(char *basis, size_t offset);
extern void ebpf_update_pid_table(ebpf_local_maps_t *pid, ebpf_module_t *em);
+extern void ebpf_write_chart_obsolete(char *type, char *id, char *title, char *units, char *family,
+ char *charttype, char *context, int order, int update_every);
+extern void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist, char **dimensions, uint32_t end);
#define EBPF_MAX_SYNCHRONIZATION_TIME 300
diff --git a/collectors/ebpf.plugin/ebpf_apps.c b/collectors/ebpf.plugin/ebpf_apps.c
index 6459bad0d..015d1bf21 100644
--- a/collectors/ebpf.plugin/ebpf_apps.c
+++ b/collectors/ebpf.plugin/ebpf_apps.c
@@ -116,9 +116,9 @@ int am_i_running_as_root()
/**
* Reset the target values
*
- * @param root the pointer to the chain that will be reseted.
+ * @param root the pointer to the chain that will be reset.
*
- * @return it returns the number of structures that was reseted.
+ * @return it returns the number of structures that was reset.
*/
size_t zero_all_targets(struct target *root)
{
@@ -910,6 +910,33 @@ static inline void del_pid_entry(pid_t pid)
}
/**
+ * Get command string associated with a PID.
+ * This can only safely be used when holding the `collect_data_mutex` lock.
+ *
+ * @param pid the pid to search the data.
+ * @param n the maximum amount of bytes to copy into dest.
+ * if this is greater than the size of the command, it is clipped.
+ * @param dest the target memory buffer to write the command into.
+ * @return -1 if the PID hasn't been scraped yet, 0 otherwise.
+ */
+int get_pid_comm(pid_t pid, size_t n, char *dest)
+{
+ struct pid_stat *stat;
+
+ stat = all_pids[pid];
+ if (unlikely(stat == NULL)) {
+ return -1;
+ }
+
+ if (unlikely(n > sizeof(stat->comm))) {
+ n = sizeof(stat->comm);
+ }
+
+ strncpyz(dest, stat->comm, n);
+ return 0;
+}
+
+/**
* Cleanup variable from other threads
*
* @param pid current pid.
@@ -922,7 +949,7 @@ void cleanup_variables_from_other_threads(uint32_t pid)
socket_bandwidth_curr[pid] = NULL;
}
- // Clean cachestat strcture
+ // Clean cachestat structure
if (cachestat_pid) {
freez(cachestat_pid[pid]);
cachestat_pid[pid] = NULL;
@@ -933,6 +960,30 @@ void cleanup_variables_from_other_threads(uint32_t pid)
freez(dcstat_pid[pid]);
dcstat_pid[pid] = NULL;
}
+
+ // Clean swap structure
+ if (swap_pid) {
+ freez(swap_pid[pid]);
+ swap_pid[pid] = NULL;
+ }
+
+ // Clean vfs structure
+ if (vfs_pid) {
+ freez(vfs_pid[pid]);
+ vfs_pid[pid] = NULL;
+ }
+
+ // Clean fd structure
+ if (fd_pid) {
+ freez(fd_pid[pid]);
+ fd_pid[pid] = NULL;
+ }
+
+ // Clean shm structure
+ if (shm_pid) {
+ freez(shm_pid[pid]);
+ shm_pid[pid] = NULL;
+ }
}
/**
diff --git a/collectors/ebpf.plugin/ebpf_apps.h b/collectors/ebpf.plugin/ebpf_apps.h
index edcdef605..0c72b8782 100644
--- a/collectors/ebpf.plugin/ebpf_apps.h
+++ b/collectors/ebpf.plugin/ebpf_apps.h
@@ -11,17 +11,28 @@
#include "libnetdata/ebpf/ebpf.h"
#define NETDATA_APPS_FAMILY "apps"
-#define NETDATA_APPS_FILE_GROUP "file (eBPF)"
-#define NETDATA_APPS_VFS_GROUP "vfs (eBPF)"
+#define NETDATA_APPS_FILE_GROUP "file_access"
+#define NETDATA_APPS_FILE_CGROUP_GROUP "file_access (eBPF)"
#define NETDATA_APPS_PROCESS_GROUP "process (eBPF)"
-#define NETDATA_APPS_NET_GROUP "net (eBPF)"
-#define NETDATA_APPS_CACHESTAT_GROUP "page cache (eBPF)"
-#define NETDATA_APPS_DCSTAT_GROUP "directory cache (eBPF)"
+#define NETDATA_APPS_NET_GROUP "net"
+#define NETDATA_APPS_IPC_SHM_GROUP "ipc shm (eBPF)"
#include "ebpf_process.h"
#include "ebpf_dcstat.h"
+#include "ebpf_disk.h"
+#include "ebpf_fd.h"
+#include "ebpf_filesystem.h"
+#include "ebpf_hardirq.h"
#include "ebpf_cachestat.h"
+#include "ebpf_mdflush.h"
+#include "ebpf_mount.h"
+#include "ebpf_oomkill.h"
+#include "ebpf_shm.h"
+#include "ebpf_socket.h"
+#include "ebpf_softirq.h"
#include "ebpf_sync.h"
+#include "ebpf_swap.h"
+#include "ebpf_vfs.h"
#define MAX_COMPARE_NAME 100
#define MAX_NAME 100
@@ -113,6 +124,10 @@ struct target {
// Changes made to simplify integration between apps and eBPF.
netdata_publish_cachestat_t cachestat;
netdata_publish_dcstat_t dcstat;
+ netdata_publish_swap_t swap;
+ netdata_publish_vfs_t vfs;
+ netdata_fd_stat_t fd;
+ netdata_publish_shm_t shm;
/* These variables are not necessary for eBPF collector
kernel_uint_t minflt;
@@ -341,34 +356,13 @@ typedef struct ebpf_process_stat {
uint32_t pid;
//Counter
- uint32_t open_call;
- uint32_t write_call;
- uint32_t writev_call;
- uint32_t read_call;
- uint32_t readv_call;
- uint32_t unlink_call;
uint32_t exit_call;
uint32_t release_call;
- uint32_t fork_call;
- uint32_t clone_call;
- uint32_t close_call;
-
- //Accumulator
- uint64_t write_bytes;
- uint64_t writev_bytes;
- uint64_t readv_bytes;
- uint64_t read_bytes;
+ uint32_t create_process;
+ uint32_t create_thread;
//Counter
- uint32_t open_err;
- uint32_t write_err;
- uint32_t writev_err;
- uint32_t read_err;
- uint32_t readv_err;
- uint32_t unlink_err;
- uint32_t fork_err;
- uint32_t clone_err;
- uint32_t close_err;
+ uint32_t task_err;
uint8_t removeme;
} ebpf_process_stat_t;
@@ -425,6 +419,8 @@ extern void cleanup_exited_pids();
extern int ebpf_read_hash_table(void *ep, int fd, uint32_t pid);
+extern int get_pid_comm(pid_t pid, size_t n, char *dest);
+
extern size_t read_processes_statistic_using_pid_on_target(ebpf_process_stat_t **ep,
int fd,
struct pid_on_target *pids);
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c
index cdeac6951..7ba8c01ae 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.c
+++ b/collectors/ebpf.plugin/ebpf_cachestat.c
@@ -3,7 +3,6 @@
#include "ebpf.h"
#include "ebpf_cachestat.h"
-static ebpf_data_t cachestat_data;
netdata_publish_cachestat_t **cachestat_pid;
static struct bpf_link **probe_links = NULL;
@@ -16,7 +15,8 @@ static netdata_publish_syscall_t cachestat_counter_publish_aggregated[NETDATA_CA
netdata_cachestat_pid_t *cachestat_vector = NULL;
-static netdata_idx_t *cachestat_hash_values = NULL;
+static netdata_idx_t cachestat_hash_values[NETDATA_CACHESTAT_END];
+static netdata_idx_t *cachestat_values = NULL;
static int read_thread_closed = 1;
@@ -24,11 +24,20 @@ struct netdata_static_thread cachestat_threads = {"CACHESTAT KERNEL",
NULL, NULL, 1, NULL,
NULL, NULL};
-static ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
- .user_input = 0},
- {.name = NULL, .internal_input = 0, .user_input = 0}};
-
-static int *map_fd = NULL;
+static ebpf_local_maps_t cachestat_maps[] = {{.name = "cstat_global", .internal_input = NETDATA_CACHESTAT_END,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "cstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "cstat_ctrl", .internal_input = NETDATA_CONTROLLER_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
struct config cachestat_config = { .first_section = NULL,
.last_section = NULL,
@@ -78,15 +87,17 @@ static void ebpf_cachestat_cleanup(void *ptr)
ebpf_cleanup_publish_syscall(cachestat_counter_publish_aggregated);
freez(cachestat_vector);
- freez(cachestat_hash_values);
-
- struct bpf_program *prog;
- size_t i = 0 ;
- bpf_object__for_each_program(prog, objects) {
- bpf_link__destroy(probe_links[i]);
- i++;
+ freez(cachestat_values);
+
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
}
- bpf_object__close(objects);
}
/*****************************************************************
@@ -100,7 +111,7 @@ static void ebpf_cachestat_cleanup(void *ptr)
*
* Update publish values before to write dimension.
*
- * @param out strcuture that will receive data.
+ * @param out structure that will receive data.
* @param mpa calls for mark_page_accessed during the last second.
* @param mbd calls for mark_buffer_dirty during the last second.
* @param apcl calls for add_to_page_cache_lru during the last second.
@@ -247,7 +258,7 @@ static void read_apps_table()
netdata_cachestat_pid_t *cv = cachestat_vector;
uint32_t key;
struct pid_stat *pids = root_of_pids;
- int fd = map_fd[NETDATA_CACHESTAT_PID_STATS];
+ int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd;
size_t length = sizeof(netdata_cachestat_pid_t)*ebpf_nprocs;
while (pids) {
key = pids->pid;
@@ -269,6 +280,43 @@ static void read_apps_table()
}
/**
+ * Update cgroup
+ *
+ * Update cgroup data based in
+ */
+static void ebpf_update_cachestat_cgroup()
+{
+ netdata_cachestat_pid_t *cv = cachestat_vector;
+ int fd = cachestat_maps[NETDATA_CACHESTAT_PID_STATS].map_fd;
+ size_t length = sizeof(netdata_cachestat_pid_t) * ebpf_nprocs;
+
+ ebpf_cgroup_target_t *ect;
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ struct pid_on_target2 *pids;
+ for (pids = ect->pids; pids; pids = pids->next) {
+ int pid = pids->pid;
+ netdata_cachestat_pid_t *out = &pids->cachestat;
+ if (likely(cachestat_pid) && cachestat_pid[pid]) {
+ netdata_publish_cachestat_t *in = cachestat_pid[pid];
+
+ memcpy(out, &in->current, sizeof(netdata_cachestat_pid_t));
+ } else {
+ memset(cv, 0, length);
+ if (bpf_map_lookup_elem(fd, &pid, cv)) {
+ continue;
+ }
+
+ cachestat_apps_accumulator(cv);
+
+ memcpy(out, cv, sizeof(netdata_cachestat_pid_t));
+ }
+ }
+ }
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
+/**
* Create apps charts
*
* Call ebpf_create_chart to create the charts on apps submenu.
@@ -277,43 +325,42 @@ static void read_apps_table()
*/
void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *ptr)
{
- UNUSED(em);
struct target *root = ptr;
ebpf_create_charts_on_apps(NETDATA_CACHESTAT_HIT_RATIO_CHART,
"The ratio is calculated dividing the Hit pages per total cache accesses without counting dirties.",
EBPF_COMMON_DIMENSION_PERCENTAGE,
- NETDATA_APPS_CACHESTAT_GROUP,
+ NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE,
20090,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
ebpf_create_charts_on_apps(NETDATA_CACHESTAT_DIRTY_CHART,
"Number of pages marked as dirty. When a page is called dirty, this means that the data stored inside the page needs to be written to devices.",
EBPF_CACHESTAT_DIMENSION_PAGE,
- NETDATA_APPS_CACHESTAT_GROUP,
+ NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_STACKED,
20091,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
ebpf_create_charts_on_apps(NETDATA_CACHESTAT_HIT_CHART,
"Number of cache access without counting dirty pages and page additions.",
EBPF_CACHESTAT_DIMENSION_HITS,
- NETDATA_APPS_CACHESTAT_GROUP,
+ NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_STACKED,
20092,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
ebpf_create_charts_on_apps(NETDATA_CACHESTAT_MISSES_CHART,
"Page caches added without counting dirty pages",
EBPF_CACHESTAT_DIMENSION_MISSES,
- NETDATA_APPS_CACHESTAT_GROUP,
+ NETDATA_CACHESTAT_SUBMENU,
NETDATA_EBPF_CHART_TYPE_STACKED,
20093,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
}
/*****************************************************************
@@ -331,12 +378,18 @@ static void read_global_table()
{
uint32_t idx;
netdata_idx_t *val = cachestat_hash_values;
- netdata_idx_t stored;
- int fd = map_fd[NETDATA_CACHESTAT_GLOBAL_STATS];
+ netdata_idx_t *stored = cachestat_values;
+ int fd = cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS].map_fd;
for (idx = NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU; idx < NETDATA_CACHESTAT_END; idx++) {
- if (!bpf_map_lookup_elem(fd, &idx, &stored)) {
- val[idx] = stored;
+ if (!bpf_map_lookup_elem(fd, &idx, stored)) {
+ int i;
+ int end = ebpf_nprocs;
+ netdata_idx_t total = 0;
+ for (i = 0; i < end; i++)
+ total += stored[i];
+
+ val[idx] = total;
}
}
}
@@ -360,7 +413,7 @@ void *ebpf_cachestat_read_hash(void *ptr)
ebpf_module_t *em = (ebpf_module_t *)ptr;
- usec_t step = NETDATA_LATENCY_CACHESTAT_SLEEP_MS * em->update_time;
+ usec_t step = NETDATA_LATENCY_CACHESTAT_SLEEP_MS * em->update_every;
while (!close_ebpf_plugin) {
usec_t dt = heartbeat_next(&hb, step);
(void)dt;
@@ -428,7 +481,7 @@ void ebpf_cachestat_sum_pids(netdata_publish_cachestat_t *publish, struct pid_on
}
/**
- * Send data to Netdata calling auxiliar functions.
+ * Send data to Netdata calling auxiliary functions.
*
* @param root the target list.
*/
@@ -446,7 +499,7 @@ void ebpf_cache_send_apps_data(struct target *root)
uint64_t mpa = current->mark_page_accessed - prev->mark_page_accessed;
uint64_t mbd = current->mark_buffer_dirty - prev->mark_buffer_dirty;
- w->cachestat.dirty = current->mark_buffer_dirty;
+ w->cachestat.dirty = mbd;
uint64_t apcl = current->add_to_page_cache_lru - prev->add_to_page_cache_lru;
uint64_t apd = current->account_page_dirtied - prev->account_page_dirtied;
@@ -487,6 +540,297 @@ void ebpf_cache_send_apps_data(struct target *root)
}
/**
+ * Cachestat sum PIDs
+ *
+ * Sum values for all PIDs associated to a group
+ *
+ * @param publish output structure.
+ * @param root structure with listed IPs
+ */
+void ebpf_cachestat_sum_cgroup_pids(netdata_publish_cachestat_t *publish, struct pid_on_target2 *root)
+{
+ memcpy(&publish->prev, &publish->current,sizeof(publish->current));
+ memset(&publish->current, 0, sizeof(publish->current));
+
+ netdata_cachestat_pid_t *dst = &publish->current;
+ while (root) {
+ netdata_cachestat_pid_t *src = &root->cachestat;
+
+ dst->account_page_dirtied += src->account_page_dirtied;
+ dst->add_to_page_cache_lru += src->add_to_page_cache_lru;
+ dst->mark_buffer_dirty += src->mark_buffer_dirty;
+ dst->mark_page_accessed += src->mark_page_accessed;
+
+ root = root->next;
+ }
+}
+
+/**
+ * Calc chart values
+ *
+ * Do necessary math to plot charts.
+ */
+void ebpf_cachestat_calc_chart_values()
+{
+ ebpf_cgroup_target_t *ect;
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ ebpf_cachestat_sum_cgroup_pids(&ect->publish_cachestat, ect->pids);
+
+ netdata_cachestat_pid_t *current = &ect->publish_cachestat.current;
+ netdata_cachestat_pid_t *prev = &ect->publish_cachestat.prev;
+
+ uint64_t mpa = current->mark_page_accessed - prev->mark_page_accessed;
+ uint64_t mbd = current->mark_buffer_dirty - prev->mark_buffer_dirty;
+ ect->publish_cachestat.dirty = mbd;
+ uint64_t apcl = current->add_to_page_cache_lru - prev->add_to_page_cache_lru;
+ uint64_t apd = current->account_page_dirtied - prev->account_page_dirtied;
+
+ cachestat_update_publish(&ect->publish_cachestat, mpa, mbd, apcl, apd);
+ }
+}
+
+/**
+ * Create Systemd cachestat Charts
+ *
+ * Create charts when systemd is enabled
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+ **/
+static void ebpf_create_systemd_cachestat_charts(int update_every)
+{
+ ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_HIT_RATIO_CHART,
+ "Hit is calculating using total cache added without dirties per total added because of red misses.",
+ EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE, 21100,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
+ NETDATA_SYSTEMD_CACHESTAT_HIT_RATIO_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT,
+ update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_DIRTY_CHART,
+ "Number of dirty pages added to the page cache.",
+ EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE, 21101,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
+ NETDATA_SYSTEMD_CACHESTAT_MODIFIED_CACHE_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT,
+ update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_HIT_CHART, "Hits are function calls that Netdata counts.",
+ EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE, 21102,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
+ NETDATA_SYSTEMD_CACHESTAT_HIT_FILE_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT,
+ update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_CACHESTAT_MISSES_CHART, "Misses are function calls that Netdata counts.",
+ EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE, 21103,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
+ NETDATA_SYSTEMD_CACHESTAT_MISS_FILES_CONTEXT, NETDATA_EBPF_MODULE_NAME_CACHESTAT,
+ update_every);
+}
+
+/**
+ * Send Cache Stat charts
+ *
+ * Send collected data to Netdata.
+ *
+ * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned
+ * otherwise function returns 1 to avoid chart recreation
+ */
+static int ebpf_send_systemd_cachestat_charts()
+{
+ int ret = 1;
+ ebpf_cgroup_target_t *ect;
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_HIT_RATIO_CHART);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long)ect->publish_cachestat.ratio);
+ } else
+ ret = 0;
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_DIRTY_CHART);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long)ect->publish_cachestat.dirty);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_HIT_CHART);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long)ect->publish_cachestat.hit);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_CACHESTAT_MISSES_CHART);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long)ect->publish_cachestat.miss);
+ }
+ }
+ write_end_chart();
+
+ return ret;
+}
+
+/**
+ * Send Directory Cache charts
+ *
+ * Send collected data to Netdata.
+ */
+static void ebpf_send_specific_cachestat_data(char *type, netdata_publish_cachestat_t *npc)
+{
+ write_begin_chart(type, NETDATA_CACHESTAT_HIT_RATIO_CHART);
+ write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_RATIO].name, (long long)npc->ratio);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_CACHESTAT_DIRTY_CHART);
+ write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_DIRTY].name, (long long)npc->dirty);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_CACHESTAT_HIT_CHART);
+ write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_HIT].name, (long long)npc->hit);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_CACHESTAT_MISSES_CHART);
+ write_chart_dimension(cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_MISS].name, (long long)npc->miss);
+ write_end_chart();
+}
+
+/**
+ * Create specific cache Stat charts
+ *
+ * Create charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_create_specific_cachestat_charts(char *type, int update_every)
+{
+ ebpf_create_chart(type, NETDATA_CACHESTAT_HIT_RATIO_CHART,
+ "Hit is calculating using total cache added without dirties per total added because of red misses.",
+ EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_CGROUP_SUBMENU,
+ NETDATA_CGROUP_CACHESTAT_HIT_RATIO_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5200,
+ ebpf_create_global_dimension,
+ cachestat_counter_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
+
+ ebpf_create_chart(type, NETDATA_CACHESTAT_DIRTY_CHART,
+ "Number of dirty pages added to the page cache.",
+ EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_CGROUP_SUBMENU,
+ NETDATA_CGROUP_CACHESTAT_MODIFIED_CACHE_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5201,
+ ebpf_create_global_dimension,
+ &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_DIRTY], 1,
+ update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
+
+ ebpf_create_chart(type, NETDATA_CACHESTAT_HIT_CHART,
+ "Hits are function calls that Netdata counts.",
+ EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_CGROUP_SUBMENU,
+ NETDATA_CGROUP_CACHESTAT_HIT_FILES_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5202,
+ ebpf_create_global_dimension,
+ &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_HIT], 1,
+ update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
+
+ ebpf_create_chart(type, NETDATA_CACHESTAT_MISSES_CHART,
+ "Misses are function calls that Netdata counts.",
+ EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_CGROUP_SUBMENU,
+ NETDATA_CGROUP_CACHESTAT_MISS_FILES_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5203,
+ ebpf_create_global_dimension,
+ &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_MISS], 1,
+ update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
+}
+
+/**
+ * Obsolete specific cache stat charts
+ *
+ * Obsolete charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_obsolete_specific_cachestat_charts(char *type, int update_every)
+{
+ ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_HIT_RATIO_CHART,
+ "Hit is calculating using total cache added without dirties per total added because of red misses.",
+ EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_CACHESTAT_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_HIT_RATIO_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5200, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_DIRTY_CHART,
+ "Number of dirty pages added to the page cache.",
+ EBPF_CACHESTAT_DIMENSION_PAGE, NETDATA_CACHESTAT_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_MODIFIED_CACHE_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5201, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_HIT_CHART,
+ "Hits are function calls that Netdata counts.",
+ EBPF_CACHESTAT_DIMENSION_HITS, NETDATA_CACHESTAT_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_HIT_FILES_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5202, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_CACHESTAT_MISSES_CHART,
+ "Misses are function calls that Netdata counts.",
+ EBPF_CACHESTAT_DIMENSION_MISSES, NETDATA_CACHESTAT_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_CACHESTAT_MISS_FILES_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5203, update_every);
+}
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+*/
+void ebpf_cachestat_send_cgroup_data(int update_every)
+{
+ if (!ebpf_cgroup_pids)
+ return;
+
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ ebpf_cgroup_target_t *ect;
+ ebpf_cachestat_calc_chart_values();
+
+ int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
+ if (has_systemd) {
+ static int systemd_charts = 0;
+ if (!systemd_charts) {
+ ebpf_create_systemd_cachestat_charts(update_every);
+ systemd_charts = 1;
+ }
+
+ systemd_charts = ebpf_send_systemd_cachestat_charts();
+ }
+
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (ect->systemd)
+ continue;
+
+ if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART) && ect->updated) {
+ ebpf_create_specific_cachestat_charts(ect->name, update_every);
+ ect->flags |= NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART;
+ }
+
+ if (ect->flags & NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART) {
+ if (ect->updated) {
+ ebpf_send_specific_cachestat_data(ect->name, &ect->publish_cachestat);
+ } else {
+ ebpf_obsolete_specific_cachestat_charts(ect->name, update_every);
+ ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART;
+ }
+ }
+ }
+
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
+/**
* Main loop for this collector.
*/
static void cachestat_collector(ebpf_module_t *em)
@@ -494,29 +838,40 @@ static void cachestat_collector(ebpf_module_t *em)
cachestat_threads.thread = mallocz(sizeof(netdata_thread_t));
cachestat_threads.start_routine = ebpf_cachestat_read_hash;
- map_fd = cachestat_data.map_fd;
-
netdata_thread_create(cachestat_threads.thread, cachestat_threads.name, NETDATA_THREAD_OPTION_JOINABLE,
ebpf_cachestat_read_hash, em);
netdata_publish_cachestat_t publish;
memset(&publish, 0, sizeof(publish));
int apps = em->apps_charts;
+ int cgroups = em->cgroup_charts;
+ int update_every = em->update_every;
+ int counter = update_every - 1;
while (!close_ebpf_plugin) {
pthread_mutex_lock(&collect_data_mutex);
pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
- if (apps)
- read_apps_table();
+ if (++counter == update_every) {
+ counter = 0;
+ if (apps)
+ read_apps_table();
- pthread_mutex_lock(&lock);
+ if (cgroups)
+ ebpf_update_cachestat_cgroup();
- cachestat_send_global(&publish);
+ pthread_mutex_lock(&lock);
- if (apps)
- ebpf_cache_send_apps_data(apps_groups_root_target);
+ cachestat_send_global(&publish);
+
+ if (apps)
+ ebpf_cache_send_apps_data(apps_groups_root_target);
+
+ if (cgroups)
+ ebpf_cachestat_send_cgroup_data(update_every);
+
+ pthread_mutex_unlock(&lock);
+ }
- pthread_mutex_unlock(&lock);
pthread_mutex_unlock(&collect_data_mutex);
}
}
@@ -531,8 +886,10 @@ static void cachestat_collector(ebpf_module_t *em)
* Create global charts
*
* Call ebpf_create_chart to create the charts for the collector.
+ *
+ * @param em a pointer to `struct ebpf_module`
*/
-static void ebpf_create_memory_charts()
+static void ebpf_create_memory_charts(ebpf_module_t *em)
{
ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_RATIO_CHART,
"Hit is calculating using total cache added without dirties per total added because of red misses.",
@@ -541,7 +898,7 @@ static void ebpf_create_memory_charts()
NETDATA_EBPF_CHART_TYPE_LINE,
21100,
ebpf_create_global_dimension,
- cachestat_counter_publish_aggregated, 1);
+ cachestat_counter_publish_aggregated, 1, em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_DIRTY_CHART,
"Number of dirty pages added to the page cache.",
@@ -550,7 +907,8 @@ static void ebpf_create_memory_charts()
NETDATA_EBPF_CHART_TYPE_LINE,
21101,
ebpf_create_global_dimension,
- &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_DIRTY], 1);
+ &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_DIRTY], 1,
+ em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_HIT_CHART,
"Hits are function calls that Netdata counts.",
@@ -559,7 +917,8 @@ static void ebpf_create_memory_charts()
NETDATA_EBPF_CHART_TYPE_LINE,
21102,
ebpf_create_global_dimension,
- &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_HIT], 1);
+ &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_HIT], 1,
+ em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_CACHESTAT_MISSES_CHART,
"Misses are function calls that Netdata counts.",
@@ -568,7 +927,8 @@ static void ebpf_create_memory_charts()
NETDATA_EBPF_CHART_TYPE_LINE,
21103,
ebpf_create_global_dimension,
- &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_MISS], 1);
+ &cachestat_counter_publish_aggregated[NETDATA_CACHESTAT_IDX_MISS], 1,
+ em->update_every, NETDATA_EBPF_MODULE_NAME_CACHESTAT);
fflush(stdout);
}
@@ -579,17 +939,20 @@ static void ebpf_create_memory_charts()
* We are not testing the return, because callocz does this and shutdown the software
* case it was not possible to allocate.
*
- * @param length is the length for the vectors used inside the collector.
+ * @param apps is apps enabled?
*/
-static void ebpf_cachestat_allocate_global_vectors(size_t length)
+static void ebpf_cachestat_allocate_global_vectors(int apps)
{
- cachestat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_cachestat_t *));
+ if (apps)
+ cachestat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_cachestat_t *));
+
cachestat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_cachestat_pid_t));
- cachestat_hash_values = callocz(length, sizeof(netdata_idx_t));
+ cachestat_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
- memset(cachestat_counter_aggregated_data, 0, length * sizeof(netdata_syscall_stat_t));
- memset(cachestat_counter_publish_aggregated, 0, length * sizeof(netdata_publish_syscall_t));
+ memset(cachestat_hash_values, 0, NETDATA_CACHESTAT_END * sizeof(netdata_idx_t));
+ memset(cachestat_counter_aggregated_data, 0, NETDATA_CACHESTAT_END * sizeof(netdata_syscall_stat_t));
+ memset(cachestat_counter_publish_aggregated, 0, NETDATA_CACHESTAT_END * sizeof(netdata_publish_syscall_t));
}
/*****************************************************************
@@ -613,22 +976,16 @@ void *ebpf_cachestat_thread(void *ptr)
ebpf_module_t *em = (ebpf_module_t *)ptr;
em->maps = cachestat_maps;
- fill_ebpf_data(&cachestat_data);
- ebpf_update_module(em, &cachestat_config, NETDATA_CACHESTAT_CONFIG_FILE);
- ebpf_update_pid_table(&cachestat_maps[0], em);
+ ebpf_update_pid_table(&cachestat_maps[NETDATA_CACHESTAT_PID_STATS], em);
if (!em->enabled)
goto endcachestat;
pthread_mutex_lock(&lock);
- ebpf_cachestat_allocate_global_vectors(NETDATA_CACHESTAT_END);
- if (ebpf_update_kernel(&cachestat_data)) {
- pthread_mutex_unlock(&lock);
- goto endcachestat;
- }
+ ebpf_cachestat_allocate_global_vectors(em->apps_charts);
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects, cachestat_data.map_fd);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
if (!probe_links) {
pthread_mutex_unlock(&lock);
goto endcachestat;
@@ -642,7 +999,7 @@ void *ebpf_cachestat_thread(void *ptr)
cachestat_counter_dimension_name, cachestat_counter_dimension_name,
algorithms, NETDATA_CACHESTAT_END);
- ebpf_create_memory_charts();
+ ebpf_create_memory_charts(em);
pthread_mutex_unlock(&lock);
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.h b/collectors/ebpf.plugin/ebpf_cachestat.h
index 694933e0c..7904c8113 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.h
+++ b/collectors/ebpf.plugin/ebpf_cachestat.h
@@ -3,13 +3,17 @@
#ifndef NETDATA_EBPF_CACHESTAT_H
#define NETDATA_EBPF_CACHESTAT_H 1
+// Module name
+#define NETDATA_EBPF_MODULE_NAME_CACHESTAT "cachestat"
+
// charts
#define NETDATA_CACHESTAT_HIT_RATIO_CHART "cachestat_ratio"
#define NETDATA_CACHESTAT_DIRTY_CHART "cachestat_dirties"
#define NETDATA_CACHESTAT_HIT_CHART "cachestat_hits"
#define NETDATA_CACHESTAT_MISSES_CHART "cachestat_misses"
-#define NETDATA_CACHESTAT_SUBMENU "page cache (eBPF)"
+#define NETDATA_CACHESTAT_SUBMENU "page_cache"
+#define NETDATA_CACHESTAT_CGROUP_SUBMENU "page cache (eBPF)"
#define EBPF_CACHESTAT_DIMENSION_PAGE "pages/s"
#define EBPF_CACHESTAT_DIMENSION_HITS "hits/s"
@@ -20,6 +24,17 @@
// configuration file
#define NETDATA_CACHESTAT_CONFIG_FILE "cachestat.conf"
+// Contexts
+#define NETDATA_CGROUP_CACHESTAT_HIT_RATIO_CONTEXT "cgroup.cachestat_ratio"
+#define NETDATA_CGROUP_CACHESTAT_MODIFIED_CACHE_CONTEXT "cgroup.cachestat_dirties"
+#define NETDATA_CGROUP_CACHESTAT_HIT_FILES_CONTEXT "cgroup.cachestat_hits"
+#define NETDATA_CGROUP_CACHESTAT_MISS_FILES_CONTEXT "cgroup.cachestat_misses"
+
+#define NETDATA_SYSTEMD_CACHESTAT_HIT_RATIO_CONTEXT "services.cachestat_ratio"
+#define NETDATA_SYSTEMD_CACHESTAT_MODIFIED_CACHE_CONTEXT "services.cachestat_dirties"
+#define NETDATA_SYSTEMD_CACHESTAT_HIT_FILE_CONTEXT "services.cachestat_hits"
+#define NETDATA_SYSTEMD_CACHESTAT_MISS_FILES_CONTEXT "services.cachestat_misses"
+
// variables
enum cachestat_counters {
NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU,
@@ -62,4 +77,6 @@ typedef struct netdata_publish_cachestat {
extern void *ebpf_cachestat_thread(void *ptr);
extern void clean_cachestat_pid_structures();
+extern struct config cachestat_config;
+
#endif // NETDATA_EBPF_CACHESTAT_H
diff --git a/collectors/ebpf.plugin/ebpf_cgroup.c b/collectors/ebpf.plugin/ebpf_cgroup.c
new file mode 100644
index 000000000..ecdc46c0b
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_cgroup.c
@@ -0,0 +1,348 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <sys/resource.h>
+
+#include "ebpf.h"
+#include "ebpf_cgroup.h"
+
+ebpf_cgroup_target_t *ebpf_cgroup_pids = NULL;
+
+// --------------------------------------------------------------------------------------------------------------------
+// Map shared memory
+
+/**
+ * Map Shared Memory locally
+ *
+ * Map the shared memory for current process
+ *
+ * @param fd file descriptor returned after shm_open was called.
+ * @param length length of the shared memory
+ *
+ * @return It returns a pointer to the region mapped.
+ */
+static inline void *ebpf_cgroup_map_shm_locally(int fd, size_t length)
+{
+ void *value;
+
+ value = mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ if (!value) {
+ error("Cannot map shared memory used between eBPF and cgroup, integration between processes won't happen");
+ close(shm_fd_ebpf_cgroup);
+ shm_fd_ebpf_cgroup = -1;
+ shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
+ }
+
+ return value;
+}
+
+/**
+ * Map cgroup shared memory
+ *
+ * Map cgroup shared memory from cgroup to plugin
+ */
+void ebpf_map_cgroup_shared_memory()
+{
+ static int limit_try = 0;
+ static time_t next_try = 0;
+
+ if (shm_ebpf_cgroup.header || limit_try > NETDATA_EBPF_CGROUP_MAX_TRIES)
+ return;
+
+ time_t curr_time = time(NULL);
+ if (curr_time < next_try)
+ return;
+
+ limit_try++;
+ next_try = curr_time + NETDATA_EBPF_CGROUP_NEXT_TRY_SEC;
+
+ shm_fd_ebpf_cgroup = shm_open(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME, O_RDWR, 0660);
+ if (shm_fd_ebpf_cgroup < 0) {
+ if (limit_try == NETDATA_EBPF_CGROUP_MAX_TRIES)
+ error("Shared memory was not initialized, integration between processes won't happen.");
+
+ return;
+ }
+
+ // Map only header
+ shm_ebpf_cgroup.header = (netdata_ebpf_cgroup_shm_header_t *) ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup,
+ sizeof(netdata_ebpf_cgroup_shm_header_t));
+ if (!shm_ebpf_cgroup.header) {
+ limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
+ return;
+ }
+
+ size_t length = shm_ebpf_cgroup.header->body_length;
+
+ munmap(shm_ebpf_cgroup.header, sizeof(netdata_ebpf_cgroup_shm_header_t));
+
+ shm_ebpf_cgroup.header = (netdata_ebpf_cgroup_shm_header_t *)ebpf_cgroup_map_shm_locally(shm_fd_ebpf_cgroup, length);
+ if (!shm_ebpf_cgroup.header) {
+ limit_try = NETDATA_EBPF_CGROUP_MAX_TRIES + 1;
+ return;
+ }
+ shm_ebpf_cgroup.body = (netdata_ebpf_cgroup_shm_body_t *) ((char *)shm_ebpf_cgroup.header +
+ sizeof(netdata_ebpf_cgroup_shm_header_t));
+
+ shm_sem_ebpf_cgroup = sem_open(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME, O_CREAT, 0660, 1);
+
+ if (shm_sem_ebpf_cgroup == SEM_FAILED) {
+ error("Cannot create semaphore, integration between eBPF and cgroup won't happen");
+ munmap(shm_ebpf_cgroup.header, length);
+ shm_ebpf_cgroup.header = NULL;
+ close(shm_fd_ebpf_cgroup);
+ shm_fd_ebpf_cgroup = -1;
+ shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
+ }
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Close and Cleanup
+
+/**
+ * Close shared memory
+ */
+void ebpf_close_cgroup_shm()
+{
+ if (shm_sem_ebpf_cgroup != SEM_FAILED) {
+ sem_close(shm_sem_ebpf_cgroup);
+ sem_unlink(NETDATA_NAMED_SEMAPHORE_EBPF_CGROUP_NAME);
+ shm_sem_ebpf_cgroup = SEM_FAILED;
+ }
+
+ if (shm_fd_ebpf_cgroup > 0) {
+ close(shm_fd_ebpf_cgroup);
+ shm_unlink(NETDATA_SHARED_MEMORY_EBPF_CGROUP_NAME);
+ shm_fd_ebpf_cgroup = -1;
+ }
+}
+
+/**
+ * Clean Specific cgroup pid
+ *
+ * Clean all PIDs associated with cgroup.
+ *
+ * @param pt structure pid on target that will have your PRs removed
+ */
+static inline void ebpf_clean_specific_cgroup_pids(struct pid_on_target2 *pt)
+{
+ while (pt) {
+ struct pid_on_target2 *next_pid = pt->next;
+
+ freez(pt);
+ pt = next_pid;
+ }
+}
+
+/**
+ * Cleanup link list
+ */
+void ebpf_clean_cgroup_pids()
+{
+ if (!ebpf_cgroup_pids)
+ return;
+
+ ebpf_cgroup_target_t *ect = ebpf_cgroup_pids;
+ while (ect) {
+ ebpf_cgroup_target_t *next_cgroup = ect->next;
+
+ ebpf_clean_specific_cgroup_pids(ect->pids);
+ freez(ect);
+
+ ect = next_cgroup;
+ }
+ ebpf_cgroup_pids = NULL;
+}
+
+/**
+ * Remove Cgroup Update Target Update List
+ *
+ * Remove from cgroup target and update the link list
+ */
+static void ebpf_remove_cgroup_target_update_list()
+{
+ ebpf_cgroup_target_t *next, *ect = ebpf_cgroup_pids;
+ ebpf_cgroup_target_t *prev = ebpf_cgroup_pids;
+ while (ect) {
+ next = ect->next;
+ if (!ect->updated) {
+ if (ect == ebpf_cgroup_pids) {
+ ebpf_cgroup_pids = next;
+ prev = next;
+ } else {
+ prev->next = next;
+ }
+
+ ebpf_clean_specific_cgroup_pids(ect->pids);
+ freez(ect);
+ } else {
+ prev = ect;
+ }
+
+ ect = next;
+ }
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Fill variables
+
+/**
+ * Set Target Data
+ *
+ * Set local variable values according shared memory information.
+ *
+ * @param out local output variable.
+ * @param ptr input from shared memory.
+ */
+static inline void ebpf_cgroup_set_target_data(ebpf_cgroup_target_t *out, netdata_ebpf_cgroup_shm_body_t *ptr)
+{
+ out->hash = ptr->hash;
+ snprintfz(out->name, 255, "%s", ptr->name);
+ out->systemd = ptr->options & CGROUP_OPTIONS_SYSTEM_SLICE_SERVICE;
+ out->updated = 1;
+}
+
+/**
+ * Find or create
+ *
+ * Find the structure inside the link list or allocate and link when it is not present.
+ *
+ * @param ptr Input from shared memory.
+ *
+ * @return It returns a pointer for the structure associated with the input.
+ */
+static ebpf_cgroup_target_t * ebpf_cgroup_find_or_create(netdata_ebpf_cgroup_shm_body_t *ptr)
+{
+ ebpf_cgroup_target_t *ect, *prev;
+ for (ect = ebpf_cgroup_pids, prev = ebpf_cgroup_pids; ect; prev = ect, ect = ect->next) {
+ if (ect->hash == ptr->hash && !strcmp(ect->name, ptr->name)) {
+ ect->updated = 1;
+ return ect;
+ }
+ }
+
+ ebpf_cgroup_target_t *new_ect = callocz(1, sizeof(ebpf_cgroup_target_t));
+
+ ebpf_cgroup_set_target_data(new_ect, ptr);
+ if (!ebpf_cgroup_pids) {
+ ebpf_cgroup_pids = new_ect;
+ } else {
+ prev->next = new_ect;
+ }
+
+ return new_ect;
+}
+
+/**
+ * Update pid link list
+ *
+ * Update PIDs list associated with specific cgroup.
+ *
+ * @param ect cgroup structure where pids will be stored
+ * @param path file with PIDs associated to cgroup.
+ */
+static void ebpf_update_pid_link_list(ebpf_cgroup_target_t *ect, char *path)
+{
+ procfile *ff = procfile_open(path, " \t:", PROCFILE_FLAG_DEFAULT);
+ if (!ff)
+ return;
+
+ ff = procfile_readall(ff);
+ if (!ff)
+ return;
+
+ size_t lines = procfile_lines(ff), l;
+ for (l = 0; l < lines ;l++) {
+ int pid = (int)str2l(procfile_lineword(ff, l, 0));
+ if (pid) {
+ struct pid_on_target2 *pt, *prev;
+ for (pt = ect->pids, prev = ect->pids; pt; prev = pt, pt = pt->next) {
+ if (pt->pid == pid)
+ break;
+ }
+
+ if (!pt) {
+ struct pid_on_target2 *w = callocz(1, sizeof(struct pid_on_target2));
+ w->pid = pid;
+ if (!ect->pids)
+ ect->pids = w;
+ else
+ prev->next = w;
+ }
+ }
+ }
+
+ procfile_close(ff);
+}
+
+/**
+ * Set remove var
+ *
+ * Set variable remove. If this variable is not reset, the structure will be removed from link list.
+ */
+ void ebpf_reset_updated_var()
+ {
+ ebpf_cgroup_target_t *ect;
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ ect->updated = 0;
+ }
+ }
+
+/**
+ * Parse cgroup shared memory
+ *
+ * This function is responsible to copy necessary data from shared memory to local memory.
+ */
+void ebpf_parse_cgroup_shm_data()
+{
+ if (shm_ebpf_cgroup.header) {
+ sem_wait(shm_sem_ebpf_cgroup);
+ int i, end = shm_ebpf_cgroup.header->cgroup_root_count;
+
+ pthread_mutex_lock(&mutex_cgroup_shm);
+
+ ebpf_remove_cgroup_target_update_list();
+
+ ebpf_reset_updated_var();
+
+ for (i = 0; i < end; i++) {
+ netdata_ebpf_cgroup_shm_body_t *ptr = &shm_ebpf_cgroup.body[i];
+ if (ptr->enabled) {
+ ebpf_cgroup_target_t *ect = ebpf_cgroup_find_or_create(ptr);
+ ebpf_update_pid_link_list(ect, ptr->path);
+ }
+ }
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+
+ sem_post(shm_sem_ebpf_cgroup);
+ }
+}
+
+// --------------------------------------------------------------------------------------------------------------------
+// Create charts
+
+/**
+ * Create charts on systemd submenu
+ *
+ * @param id the chart id
+ * @param title the value displayed on vertical axis.
+ * @param units the value displayed on vertical axis.
+ * @param family Submenu that the chart will be attached on dashboard.
+ * @param charttype chart type
+ * @param order the chart order
+ * @param algorithm the algorithm used by dimension
+ * @param context add context for chart
+ * @param module chart module name, this is the eBPF thread.
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order,
+ char *algorithm, char *context, char *module, int update_every)
+{
+ ebpf_cgroup_target_t *w;
+ ebpf_write_chart_cmd(NETDATA_SERVICE_FAMILY, id, title, units, family, charttype, context,
+ order, update_every, module);
+
+ for (w = ebpf_cgroup_pids; w; w = w->next) {
+ if (unlikely(w->systemd) && unlikely(w->updated))
+ fprintf(stdout, "DIMENSION %s '' %s 1 1\n", w->name, algorithm);
+ }
+}
diff --git a/collectors/ebpf.plugin/ebpf_cgroup.h b/collectors/ebpf.plugin/ebpf_cgroup.h
new file mode 100644
index 000000000..03969194a
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_cgroup.h
@@ -0,0 +1,70 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_CGROUP_H
+#define NETDATA_EBPF_CGROUP_H 1
+
+#define NETDATA_EBPF_CGROUP_MAX_TRIES 3
+#define NETDATA_EBPF_CGROUP_NEXT_TRY_SEC 30
+
+#include "ebpf.h"
+#include "ebpf_apps.h"
+
+#define NETDATA_SERVICE_FAMILY "services"
+
+struct pid_on_target2 {
+ int32_t pid;
+ int updated;
+
+ netdata_publish_swap_t swap;
+ netdata_fd_stat_t fd;
+ netdata_publish_vfs_t vfs;
+ ebpf_process_stat_t ps;
+ netdata_dcstat_pid_t dc;
+ netdata_publish_shm_t shm;
+ ebpf_bandwidth_t socket;
+ netdata_cachestat_pid_t cachestat;
+
+ struct pid_on_target2 *next;
+};
+
+enum ebpf_cgroup_flags {
+ NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART = 1,
+ NETDATA_EBPF_CGROUP_HAS_SWAP_CHART = 1<<2,
+ NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART = 1<<3,
+ NETDATA_EBPF_CGROUP_HAS_FD_CHART = 1<<4,
+ NETDATA_EBPF_CGROUP_HAS_VFS_CHART = 1<<5,
+ NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART = 1<<6,
+ NETDATA_EBPF_CGROUP_HAS_CACHESTAT_CHART = 1<<7,
+ NETDATA_EBPF_CGROUP_HAS_DC_CHART = 1<<8,
+ NETDATA_EBPF_CGROUP_HAS_SHM_CHART = 1<<9
+};
+
+typedef struct ebpf_cgroup_target {
+ char name[256]; // title
+ uint32_t hash;
+ uint32_t flags;
+ uint32_t systemd;
+ uint32_t updated;
+
+ netdata_publish_swap_t publish_systemd_swap;
+ netdata_fd_stat_t publish_systemd_fd;
+ netdata_publish_vfs_t publish_systemd_vfs;
+ ebpf_process_stat_t publish_systemd_ps;
+ netdata_publish_dcstat_t publish_dc;
+ int oomkill;
+ netdata_publish_shm_t publish_shm;
+ ebpf_socket_publish_apps_t publish_socket;
+ netdata_publish_cachestat_t publish_cachestat;
+
+ struct pid_on_target2 *pids;
+ struct ebpf_cgroup_target *next;
+} ebpf_cgroup_target_t;
+
+extern void ebpf_map_cgroup_shared_memory();
+extern void ebpf_parse_cgroup_shm_data();
+extern void ebpf_close_cgroup_shm();
+extern void ebpf_clean_cgroup_pids();
+extern void ebpf_create_charts_on_systemd(char *id, char *title, char *units, char *family, char *charttype, int order,
+ char *algorithm, char *context, char *module, int update_every);
+
+#endif /* NETDATA_EBPF_CGROUP_H */
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c
index 01fd97972..7ae821889 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.c
+++ b/collectors/ebpf.plugin/ebpf_dcstat.c
@@ -7,16 +7,14 @@ static char *dcstat_counter_dimension_name[NETDATA_DCSTAT_IDX_END] = { "ratio",
static netdata_syscall_stat_t dcstat_counter_aggregated_data[NETDATA_DCSTAT_IDX_END];
static netdata_publish_syscall_t dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_END];
-static ebpf_data_t dcstat_data;
-
netdata_dcstat_pid_t *dcstat_vector = NULL;
netdata_publish_dcstat_t **dcstat_pid = NULL;
static struct bpf_link **probe_links = NULL;
static struct bpf_object *objects = NULL;
-static int *map_fd = NULL;
static netdata_idx_t dcstat_hash_values[NETDATA_DCSTAT_IDX_END];
+static netdata_idx_t *dcstat_values = NULL;
static int read_thread_closed = 1;
@@ -30,9 +28,20 @@ struct netdata_static_thread dcstat_threads = {"DCSTAT KERNEL",
NULL, NULL, 1, NULL,
NULL, NULL};
-static ebpf_local_maps_t dcstat_maps[] = {{.name = "dcstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
- .user_input = 0},
- {.name = NULL, .internal_input = 0, .user_input = 0}};
+static ebpf_local_maps_t dcstat_maps[] = {{.name = "dcstat_global", .internal_input = NETDATA_DIRECTORY_CACHE_END,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "dcstat_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "dcstat_ctrl", .internal_input = NETDATA_CONTROLLER_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
static ebpf_specify_name_t dc_optional_name[] = { {.program_name = "netdata_lookup_fast",
.function_to_attach = "lookup_fast",
@@ -51,7 +60,7 @@ static ebpf_specify_name_t dc_optional_name[] = { {.program_name = "netdata_look
*
* Update publish values before to write dimension.
*
- * @param out strcuture that will receive data.
+ * @param out structure that will receive data.
* @param cache_access number of access to directory cache.
* @param not_found number of files not found on the file system
*/
@@ -117,18 +126,21 @@ static void ebpf_dcstat_cleanup(void *ptr)
}
freez(dcstat_vector);
+ freez(dcstat_values);
ebpf_cleanup_publish_syscall(dcstat_counter_publish_aggregated);
ebpf_dcstat_clean_names();
- struct bpf_program *prog;
- size_t i = 0 ;
- bpf_object__for_each_program(prog, objects) {
- bpf_link__destroy(probe_links[i]);
- i++;
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
}
- bpf_object__close(objects);
}
/*****************************************************************
@@ -146,43 +158,42 @@ static void ebpf_dcstat_cleanup(void *ptr)
*/
void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr)
{
- UNUSED(em);
struct target *root = ptr;
ebpf_create_charts_on_apps(NETDATA_DC_HIT_CHART,
"Percentage of files listed inside directory cache",
EBPF_COMMON_DIMENSION_PERCENTAGE,
- NETDATA_APPS_DCSTAT_GROUP,
+ NETDATA_DIRECTORY_CACHE_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE,
20100,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
ebpf_create_charts_on_apps(NETDATA_DC_REFERENCE_CHART,
"Count file access.",
EBPF_COMMON_DIMENSION_FILES,
- NETDATA_APPS_DCSTAT_GROUP,
+ NETDATA_DIRECTORY_CACHE_SUBMENU,
NETDATA_EBPF_CHART_TYPE_STACKED,
20101,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
ebpf_create_charts_on_apps(NETDATA_DC_REQUEST_NOT_CACHE_CHART,
"Access to files that were not present inside directory cache.",
EBPF_COMMON_DIMENSION_FILES,
- NETDATA_APPS_DCSTAT_GROUP,
+ NETDATA_DIRECTORY_CACHE_SUBMENU,
NETDATA_EBPF_CHART_TYPE_STACKED,
20102,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
ebpf_create_charts_on_apps(NETDATA_DC_REQUEST_NOT_FOUND_CHART,
"Number of requests for files that were not found on filesystem.",
EBPF_COMMON_DIMENSION_FILES,
- NETDATA_APPS_DCSTAT_GROUP,
+ NETDATA_DIRECTORY_CACHE_SUBMENU,
NETDATA_EBPF_CHART_TYPE_STACKED,
20103,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
}
/*****************************************************************
@@ -252,7 +263,7 @@ static void read_apps_table()
netdata_dcstat_pid_t *cv = dcstat_vector;
uint32_t key;
struct pid_stat *pids = root_of_pids;
- int fd = map_fd[NETDATA_DCSTAT_PID_STATS];
+ int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd;
size_t length = sizeof(netdata_dcstat_pid_t)*ebpf_nprocs;
while (pids) {
key = pids->pid;
@@ -274,6 +285,43 @@ static void read_apps_table()
}
/**
+ * Update cgroup
+ *
+ * Update cgroup data based in
+ */
+static void ebpf_update_dc_cgroup()
+{
+ netdata_dcstat_pid_t *cv = dcstat_vector;
+ int fd = dcstat_maps[NETDATA_DCSTAT_PID_STATS].map_fd;
+ size_t length = sizeof(netdata_dcstat_pid_t)*ebpf_nprocs;
+
+ ebpf_cgroup_target_t *ect;
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ struct pid_on_target2 *pids;
+ for (pids = ect->pids; pids; pids = pids->next) {
+ int pid = pids->pid;
+ netdata_dcstat_pid_t *out = &pids->dc;
+ if (likely(dcstat_pid) && dcstat_pid[pid]) {
+ netdata_publish_dcstat_t *in = dcstat_pid[pid];
+
+ memcpy(out, &in->curr, sizeof(netdata_dcstat_pid_t));
+ } else {
+ memset(cv, 0, length);
+ if (bpf_map_lookup_elem(fd, &pid, cv)) {
+ continue;
+ }
+
+ dcstat_apps_accumulator(cv);
+
+ memcpy(out, cv, sizeof(netdata_dcstat_pid_t));
+ }
+ }
+ }
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
+/**
* Read global table
*
* Read the table with number of calls for all functions
@@ -282,12 +330,18 @@ static void read_global_table()
{
uint32_t idx;
netdata_idx_t *val = dcstat_hash_values;
- netdata_idx_t stored;
- int fd = map_fd[NETDATA_DCSTAT_GLOBAL_STATS];
+ netdata_idx_t *stored = dcstat_values;
+ int fd = dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS].map_fd;
for (idx = NETDATA_KEY_DC_REFERENCE; idx < NETDATA_DIRECTORY_CACHE_END; idx++) {
- if (!bpf_map_lookup_elem(fd, &idx, &stored)) {
- val[idx] = stored;
+ if (!bpf_map_lookup_elem(fd, &idx, stored)) {
+ int i;
+ int end = ebpf_nprocs;
+ netdata_idx_t total = 0;
+ for (i = 0; i < end; i++)
+ total += stored[i];
+
+ val[idx] = total;
}
}
}
@@ -311,7 +365,7 @@ void *ebpf_dcstat_read_hash(void *ptr)
ebpf_module_t *em = (ebpf_module_t *)ptr;
- usec_t step = NETDATA_LATENCY_DCSTAT_SLEEP_MS * em->update_time;
+ usec_t step = NETDATA_LATENCY_DCSTAT_SLEEP_MS * em->update_every;
while (!close_ebpf_plugin) {
usec_t dt = heartbeat_next(&hb, step);
(void)dt;
@@ -350,7 +404,7 @@ void ebpf_dcstat_sum_pids(netdata_publish_dcstat_t *publish, struct pid_on_targe
}
/**
- * Send data to Netdata calling auxiliar functions.
+ * Send data to Netdata calling auxiliary functions.
*
* @param root the target list.
*/
@@ -457,6 +511,324 @@ static void dcstat_send_global(netdata_publish_dcstat_t *publish)
}
/**
+ * Create specific directory cache charts
+ *
+ * Create charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_create_specific_dc_charts(char *type, int update_every)
+{
+ ebpf_create_chart(type, NETDATA_DC_HIT_CHART, "Percentage of files listed inside directory cache.",
+ EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_DIRECTORY_CACHE_SUBMENU,
+ NETDATA_CGROUP_DC_HIT_RATIO_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5700,
+ ebpf_create_global_dimension,
+ dcstat_counter_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
+
+ ebpf_create_chart(type, NETDATA_DC_REFERENCE_CHART, "Count file access.",
+ EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU,
+ NETDATA_CGROUP_DC_REFERENCE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5701,
+ ebpf_create_global_dimension,
+ &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_REFERENCE], 1,
+ update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
+
+ ebpf_create_chart(type, NETDATA_DC_REQUEST_NOT_CACHE_CHART,
+ "Access to files that were not present inside directory cache.",
+ EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU,
+ NETDATA_CGROUP_DC_NOT_CACHE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5702,
+ ebpf_create_global_dimension,
+ &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_SLOW], 1,
+ update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
+
+ ebpf_create_chart(type, NETDATA_DC_REQUEST_NOT_FOUND_CHART,
+ "Number of requests for files that were not found on filesystem.",
+ EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU,
+ NETDATA_CGROUP_DC_NOT_FOUND_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5703,
+ ebpf_create_global_dimension,
+ &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_MISS], 1,
+ update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
+}
+
+/**
+ * Obsolete specific directory cache charts
+ *
+ * Obsolete charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_obsolete_specific_dc_charts(char *type, int update_every)
+{
+ ebpf_write_chart_obsolete(type, NETDATA_DC_HIT_CHART,
+ "Percentage of files listed inside directory cache.",
+ EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_DIRECTORY_CACHE_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_HIT_RATIO_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5700, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_DC_REFERENCE_CHART,
+ "Count file access.",
+ EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_REFERENCE_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5701, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_DC_REQUEST_NOT_CACHE_CHART,
+ "Access to files that were not present inside directory cache.",
+ EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_NOT_CACHE_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5702, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_DC_REQUEST_NOT_FOUND_CHART,
+ "Number of requests for files that were not found on filesystem.",
+ EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_DC_NOT_FOUND_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5703, update_every);
+}
+
+/**
+ * Cachestat sum PIDs
+ *
+ * Sum values for all PIDs associated to a group
+ *
+ * @param publish output structure.
+ * @param root structure with listed IPs
+ */
+void ebpf_dc_sum_cgroup_pids(netdata_publish_dcstat_t *publish, struct pid_on_target2 *root)
+{
+ memset(&publish->curr, 0, sizeof(netdata_dcstat_pid_t));
+ netdata_dcstat_pid_t *dst = &publish->curr;
+ while (root) {
+ netdata_dcstat_pid_t *src = &root->dc;
+
+ dst->cache_access += src->cache_access;
+ dst->file_system += src->file_system;
+ dst->not_found += src->not_found;
+
+ root = root->next;
+ }
+}
+
+/**
+ * Calc chart values
+ *
+ * Do necessary math to plot charts.
+ */
+void ebpf_dc_calc_chart_values()
+{
+ ebpf_cgroup_target_t *ect;
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ ebpf_dc_sum_cgroup_pids(&ect->publish_dc, ect->pids);
+ uint64_t cache = ect->publish_dc.curr.cache_access;
+ uint64_t not_found = ect->publish_dc.curr.not_found;
+
+ dcstat_update_publish(&ect->publish_dc, cache, not_found);
+
+ ect->publish_dc.cache_access = (long long)ect->publish_dc.curr.cache_access -
+ (long long)ect->publish_dc.prev.cache_access;
+ ect->publish_dc.prev.cache_access = ect->publish_dc.curr.cache_access;
+
+ if (ect->publish_dc.curr.not_found < ect->publish_dc.prev.not_found) {
+ ect->publish_dc.prev.not_found = 0;
+ }
+ }
+}
+
+/**
+ * Create Systemd directory cache Charts
+ *
+ * Create charts when systemd is enabled
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+ **/
+static void ebpf_create_systemd_dc_charts(int update_every)
+{
+ ebpf_create_charts_on_systemd(NETDATA_DC_HIT_CHART,
+ "Percentage of files listed inside directory cache.",
+ EBPF_COMMON_DIMENSION_PERCENTAGE,
+ NETDATA_DIRECTORY_CACHE_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ 21200,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
+ NETDATA_SYSTEMD_DC_HIT_RATIO_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT,
+ update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_DC_REFERENCE_CHART,
+ "Count file access.",
+ EBPF_COMMON_DIMENSION_FILES,
+ NETDATA_DIRECTORY_CACHE_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ 21201,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
+ NETDATA_SYSTEMD_DC_REFERENCE_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT,
+ update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_DC_REQUEST_NOT_CACHE_CHART,
+ "Access to files that were not present inside directory cache.",
+ EBPF_COMMON_DIMENSION_FILES,
+ NETDATA_DIRECTORY_CACHE_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ 21202,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
+ NETDATA_SYSTEMD_DC_NOT_CACHE_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT,
+ update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_DC_REQUEST_NOT_FOUND_CHART,
+ "Number of requests for files that were not found on filesystem.",
+ EBPF_COMMON_DIMENSION_FILES,
+ NETDATA_DIRECTORY_CACHE_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ 21202,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
+ NETDATA_SYSTEMD_DC_NOT_FOUND_CONTEXT, NETDATA_EBPF_MODULE_NAME_DCSTAT,
+ update_every);
+}
+
+/**
+ * Send Directory Cache charts
+ *
+ * Send collected data to Netdata.
+ *
+ * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned
+ * otherwise function returns 1 to avoid chart recreation
+ */
+static int ebpf_send_systemd_dc_charts()
+{
+ int ret = 1;
+ collected_number value;
+ ebpf_cgroup_target_t *ect;
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_HIT_CHART);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long) ect->publish_dc.ratio);
+ } else
+ ret = 0;
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REFERENCE_CHART);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long) ect->publish_dc.cache_access);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REQUEST_NOT_CACHE_CHART);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ value = (collected_number) (!ect->publish_dc.cache_access) ? 0 :
+ (long long )ect->publish_dc.curr.file_system - (long long)ect->publish_dc.prev.file_system;
+ ect->publish_dc.prev.file_system = ect->publish_dc.curr.file_system;
+
+ write_chart_dimension(ect->name, (long long) value);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_DC_REQUEST_NOT_FOUND_CHART);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ value = (collected_number) (!ect->publish_dc.cache_access) ? 0 :
+ (long long)ect->publish_dc.curr.not_found - (long long)ect->publish_dc.prev.not_found;
+
+ ect->publish_dc.prev.not_found = ect->publish_dc.curr.not_found;
+
+ write_chart_dimension(ect->name, (long long) value);
+ }
+ }
+ write_end_chart();
+
+ return ret;
+}
+
+/**
+ * Send Directory Cache charts
+ *
+ * Send collected data to Netdata.
+ *
+ */
+static void ebpf_send_specific_dc_data(char *type, netdata_publish_dcstat_t *pdc)
+{
+ collected_number value;
+ write_begin_chart(type, NETDATA_DC_HIT_CHART);
+ write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_RATIO].name,
+ (long long) pdc->ratio);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_DC_REFERENCE_CHART);
+ write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_REFERENCE].name,
+ (long long) pdc->cache_access);
+ write_end_chart();
+
+ value = (collected_number) (!pdc->cache_access) ? 0 :
+ (long long )pdc->curr.file_system - (long long)pdc->prev.file_system;
+ pdc->prev.file_system = pdc->curr.file_system;
+
+ write_begin_chart(type, NETDATA_DC_REQUEST_NOT_CACHE_CHART);
+ write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_SLOW].name, (long long) value);
+ write_end_chart();
+
+ value = (collected_number) (!pdc->cache_access) ? 0 :
+ (long long)pdc->curr.not_found - (long long)pdc->prev.not_found;
+ pdc->prev.not_found = pdc->curr.not_found;
+
+ write_begin_chart(type, NETDATA_DC_REQUEST_NOT_FOUND_CHART);
+ write_chart_dimension(dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_MISS].name, (long long) value);
+ write_end_chart();
+}
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+*/
+void ebpf_dc_send_cgroup_data(int update_every)
+{
+ if (!ebpf_cgroup_pids)
+ return;
+
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ ebpf_cgroup_target_t *ect;
+ ebpf_dc_calc_chart_values();
+
+ int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
+ if (has_systemd) {
+ static int systemd_charts = 0;
+ if (!systemd_charts) {
+ ebpf_create_systemd_dc_charts(update_every);
+ systemd_charts = 1;
+ }
+
+ systemd_charts = ebpf_send_systemd_dc_charts();
+ }
+
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (ect->systemd)
+ continue;
+
+ if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_DC_CHART) && ect->updated) {
+ ebpf_create_specific_dc_charts(ect->name, update_every);
+ ect->flags |= NETDATA_EBPF_CGROUP_HAS_DC_CHART;
+ }
+
+ if (ect->flags & NETDATA_EBPF_CGROUP_HAS_DC_CHART) {
+ if (ect->updated) {
+ ebpf_send_specific_dc_data(ect->name, &ect->publish_dc);
+ } else {
+ ebpf_obsolete_specific_dc_charts(ect->name, update_every);
+ ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_DC_CHART;
+ }
+ }
+ }
+
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
+/**
* Main loop for this collector.
*/
static void dcstat_collector(ebpf_module_t *em)
@@ -464,29 +836,40 @@ static void dcstat_collector(ebpf_module_t *em)
dcstat_threads.thread = mallocz(sizeof(netdata_thread_t));
dcstat_threads.start_routine = ebpf_dcstat_read_hash;
- map_fd = dcstat_data.map_fd;
-
netdata_thread_create(dcstat_threads.thread, dcstat_threads.name, NETDATA_THREAD_OPTION_JOINABLE,
ebpf_dcstat_read_hash, em);
netdata_publish_dcstat_t publish;
memset(&publish, 0, sizeof(publish));
int apps = em->apps_charts;
+ int cgroups = em->cgroup_charts;
+ int update_every = em->update_every;
+ int counter = update_every - 1;
while (!close_ebpf_plugin) {
pthread_mutex_lock(&collect_data_mutex);
pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
- if (apps)
- read_apps_table();
+ if (++counter == update_every) {
+ counter = 0;
+ if (apps)
+ read_apps_table();
- pthread_mutex_lock(&lock);
+ if (cgroups)
+ ebpf_update_dc_cgroup();
- dcstat_send_global(&publish);
+ pthread_mutex_lock(&lock);
- if (apps)
- ebpf_dcache_send_apps_data(apps_groups_root_target);
+ dcstat_send_global(&publish);
+
+ if (apps)
+ ebpf_dcache_send_apps_data(apps_groups_root_target);
+
+ if (cgroups)
+ ebpf_dc_send_cgroup_data(update_every);
+
+ pthread_mutex_unlock(&lock);
+ }
- pthread_mutex_unlock(&lock);
pthread_mutex_unlock(&collect_data_mutex);
}
}
@@ -501,26 +884,29 @@ static void dcstat_collector(ebpf_module_t *em)
* Create filesystem charts
*
* Call ebpf_create_chart to create the charts for the collector.
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
*/
-static void ebpf_create_filesystem_charts()
+static void ebpf_create_filesystem_charts(int update_every)
{
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, NETDATA_DC_HIT_CHART,
"Percentage of files listed inside directory cache",
- EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_DIRECTORY_FILESYSTEM_SUBMENU,
+ EBPF_COMMON_DIMENSION_PERCENTAGE, NETDATA_DIRECTORY_CACHE_SUBMENU,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
21200,
ebpf_create_global_dimension,
- dcstat_counter_publish_aggregated, 1);
+ dcstat_counter_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, NETDATA_DC_REFERENCE_CHART,
"Variables used to calculate hit ratio.",
- EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_FILESYSTEM_SUBMENU,
+ EBPF_COMMON_DIMENSION_FILES, NETDATA_DIRECTORY_CACHE_SUBMENU,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
21201,
ebpf_create_global_dimension,
- &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_REFERENCE], 3);
+ &dcstat_counter_publish_aggregated[NETDATA_DCSTAT_IDX_REFERENCE], 3,
+ update_every, NETDATA_EBPF_MODULE_NAME_DCSTAT);
fflush(stdout);
}
@@ -531,15 +917,18 @@ static void ebpf_create_filesystem_charts()
* We are not testing the return, because callocz does this and shutdown the software
* case it was not possible to allocate.
*
- * @param length is the length for the vectors used inside the collector.
+ * @param apps is apps enabled?
*/
-static void ebpf_dcstat_allocate_global_vectors(size_t length)
+static void ebpf_dcstat_allocate_global_vectors(int apps)
{
- dcstat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_dcstat_t *));
+ if (apps)
+ dcstat_pid = callocz((size_t)pid_max, sizeof(netdata_publish_dcstat_t *));
+
dcstat_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_dcstat_pid_t));
+ dcstat_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
- memset(dcstat_counter_aggregated_data, 0, length*sizeof(netdata_syscall_stat_t));
- memset(dcstat_counter_publish_aggregated, 0, length*sizeof(netdata_publish_syscall_t));
+ memset(dcstat_counter_aggregated_data, 0, NETDATA_DCSTAT_IDX_END * sizeof(netdata_syscall_stat_t));
+ memset(dcstat_counter_publish_aggregated, 0, NETDATA_DCSTAT_IDX_END * sizeof(netdata_publish_syscall_t));
}
/*****************************************************************
@@ -563,21 +952,19 @@ void *ebpf_dcstat_thread(void *ptr)
ebpf_module_t *em = (ebpf_module_t *)ptr;
em->maps = dcstat_maps;
- fill_ebpf_data(&dcstat_data);
- ebpf_update_module(em, &dcstat_config, NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE);
- ebpf_update_pid_table(&dcstat_maps[0], em);
+ ebpf_update_pid_table(&dcstat_maps[NETDATA_DCSTAT_PID_STATS], em);
ebpf_update_names(dc_optional_name, em);
if (!em->enabled)
goto enddcstat;
- ebpf_dcstat_allocate_global_vectors(NETDATA_DCSTAT_IDX_END);
+ ebpf_dcstat_allocate_global_vectors(em->apps_charts);
pthread_mutex_lock(&lock);
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects, dcstat_data.map_fd);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
if (!probe_links) {
pthread_mutex_unlock(&lock);
goto enddcstat;
@@ -592,7 +979,7 @@ void *ebpf_dcstat_thread(void *ptr)
dcstat_counter_dimension_name, dcstat_counter_dimension_name,
algorithms, NETDATA_DCSTAT_IDX_END);
- ebpf_create_filesystem_charts();
+ ebpf_create_filesystem_charts(em->update_every);
pthread_mutex_unlock(&lock);
dcstat_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.h b/collectors/ebpf.plugin/ebpf_dcstat.h
index ad4bd1992..c5e6e2bcf 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.h
+++ b/collectors/ebpf.plugin/ebpf_dcstat.h
@@ -3,6 +3,8 @@
#ifndef NETDATA_EBPF_DCSTAT_H
#define NETDATA_EBPF_DCSTAT_H 1
+// Module name
+#define NETDATA_EBPF_MODULE_NAME_DCSTAT "dcstat"
// charts
#define NETDATA_DC_HIT_CHART "dc_hit_ratio"
@@ -11,11 +13,21 @@
#define NETDATA_DC_REQUEST_NOT_FOUND_CHART "dc_not_found"
#define NETDATA_DIRECTORY_CACHE_SUBMENU "directory cache (eBPF)"
-#define NETDATA_DIRECTORY_FILESYSTEM_SUBMENU "Directory Cache (eBPF)"
// configuration file
#define NETDATA_DIRECTORY_DCSTAT_CONFIG_FILE "dcstat.conf"
+// Contexts
+#define NETDATA_CGROUP_DC_HIT_RATIO_CONTEXT "cgroup.dc_ratio"
+#define NETDATA_CGROUP_DC_REFERENCE_CONTEXT "cgroup.dc_reference"
+#define NETDATA_CGROUP_DC_NOT_CACHE_CONTEXT "cgroup.dc_not_cache"
+#define NETDATA_CGROUP_DC_NOT_FOUND_CONTEXT "cgroup.dc_not_found"
+
+#define NETDATA_SYSTEMD_DC_HIT_RATIO_CONTEXT "services.dc_ratio"
+#define NETDATA_SYSTEMD_DC_REFERENCE_CONTEXT "services.dc_reference"
+#define NETDATA_SYSTEMD_DC_NOT_CACHE_CONTEXT "services.dc_not_cache"
+#define NETDATA_SYSTEMD_DC_NOT_FOUND_CONTEXT "services.dc_not_found"
+
#define NETDATA_LATENCY_DCSTAT_SLEEP_MS 700000ULL
enum directory_cache_indexes {
@@ -60,5 +72,6 @@ typedef struct netdata_publish_dcstat {
extern void *ebpf_dcstat_thread(void *ptr);
extern void ebpf_dcstat_create_apps_charts(struct ebpf_module *em, void *ptr);
extern void clean_dcstat_pid_structures();
+extern struct config dcstat_config;
#endif // NETDATA_EBPF_DCSTAT_H
diff --git a/collectors/ebpf.plugin/ebpf_disk.c b/collectors/ebpf.plugin/ebpf_disk.c
new file mode 100644
index 000000000..6e139ec9f
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_disk.c
@@ -0,0 +1,842 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <sys/resource.h>
+#include <stdlib.h>
+
+#include "ebpf.h"
+#include "ebpf_disk.h"
+
+struct config disk_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+static ebpf_local_maps_t disk_maps[] = {{.name = "tbl_disk_iocall", .internal_input = NETDATA_DISK_HISTOGRAM_LENGTH,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+static avl_tree_lock disk_tree;
+netdata_ebpf_disks_t *disk_list = NULL;
+
+char *tracepoint_block_type = { "block"} ;
+char *tracepoint_block_issue = { "block_rq_issue" };
+char *tracepoint_block_rq_complete = { "block_rq_complete" };
+
+static struct bpf_link **probe_links = NULL;
+static struct bpf_object *objects = NULL;
+
+static int was_block_issue_enabled = 0;
+static int was_block_rq_complete_enabled = 0;
+
+static char **dimensions = NULL;
+static netdata_syscall_stat_t disk_aggregated_data[NETDATA_EBPF_HIST_MAX_BINS];
+static netdata_publish_syscall_t disk_publish_aggregated[NETDATA_EBPF_HIST_MAX_BINS];
+
+static int read_thread_closed = 1;
+
+static netdata_idx_t *disk_hash_values = NULL;
+static struct netdata_static_thread disk_threads = {"DISK KERNEL",
+ NULL, NULL, 1, NULL,
+ NULL, NULL };
+
+ebpf_publish_disk_t *plot_disks = NULL;
+pthread_mutex_t plot_mutex;
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO MANIPULATE HARD DISKS
+ *
+ *****************************************************************/
+
+/**
+ * Parse start
+ *
+ * Parse start address of disk
+ *
+ * @param w structure where data is stored
+ * @param filename variable used to store value
+ *
+ * @return It returns 0 on success and -1 otherwise
+ */
+static inline int ebpf_disk_parse_start(netdata_ebpf_disks_t *w, char *filename)
+{
+ char content[FILENAME_MAX + 1];
+ int fd = open(filename, O_RDONLY, 0);
+ if (fd < 0) {
+ return -1;
+ }
+
+ ssize_t file_length = read(fd, content, 4095);
+ if (file_length > 0) {
+ if (file_length > FILENAME_MAX)
+ file_length = FILENAME_MAX;
+
+ content[file_length] = '\0';
+ w->start = strtoul(content, NULL, 10);
+ }
+ close(fd);
+
+ return 0;
+}
+
+/**
+ * Parse uevent
+ *
+ * Parse uevent file
+ *
+ * @param w structure where data is stored
+ * @param filename variable used to store value
+ *
+ * @return It returns 0 on success and -1 otherwise
+ */
+static inline int ebpf_parse_uevent(netdata_ebpf_disks_t *w, char *filename)
+{
+ char content[FILENAME_MAX + 1];
+ int fd = open(filename, O_RDONLY, 0);
+ if (fd < 0) {
+ return -1;
+ }
+
+ ssize_t file_length = read(fd, content, FILENAME_MAX);
+ if (file_length > 0) {
+ if (file_length > FILENAME_MAX)
+ file_length = FILENAME_MAX;
+
+ content[file_length] = '\0';
+
+ char *s = strstr(content, "PARTNAME=EFI");
+ if (s) {
+ w->main->boot_partition = w;
+ w->flags |= NETDATA_DISK_HAS_EFI;
+ w->boot_chart = strdupz("disk_bootsector");
+ }
+ }
+ close(fd);
+
+ return 0;
+}
+
+/**
+ * Parse Size
+ *
+ * @param w structure where data is stored
+ * @param filename variable used to store value
+ *
+ * @return It returns 0 on success and -1 otherwise
+ */
+static inline int ebpf_parse_size(netdata_ebpf_disks_t *w, char *filename)
+{
+ char content[FILENAME_MAX + 1];
+ int fd = open(filename, O_RDONLY, 0);
+ if (fd < 0) {
+ return -1;
+ }
+
+ ssize_t file_length = read(fd, content, FILENAME_MAX);
+ if (file_length > 0) {
+ if (file_length > FILENAME_MAX)
+ file_length = FILENAME_MAX;
+
+ content[file_length] = '\0';
+ w->end = w->start + strtoul(content, NULL, 10) -1;
+ }
+ close(fd);
+
+ return 0;
+}
+
+/**
+ * Read Disk information
+ *
+ * Read disk information from /sys/block
+ *
+ * @param w structure where data is stored
+ * @param name disk name
+ */
+static void ebpf_read_disk_info(netdata_ebpf_disks_t *w, char *name)
+{
+ static netdata_ebpf_disks_t *main_disk = NULL;
+ static uint32_t key = 0;
+ char *path = { "/sys/block" };
+ char disk[NETDATA_DISK_NAME_LEN + 1];
+ char filename[FILENAME_MAX + 1];
+ snprintfz(disk, NETDATA_DISK_NAME_LEN, "%s", name);
+ size_t length = strlen(disk);
+ if (!length) {
+ return;
+ }
+
+ length--;
+ size_t curr = length;
+ while (isdigit((int)disk[length])) {
+ disk[length--] = '\0';
+ }
+
+ // We are looking for partition information, if it is a device we will ignore it.
+ if (curr == length) {
+ main_disk = w;
+ key = MKDEV(w->major, w->minor);
+ w->bootsector_key = key;
+ return;
+ }
+ w->bootsector_key = key;
+ w->main = main_disk;
+
+ snprintfz(filename, FILENAME_MAX, "%s/%s/%s/uevent", path, disk, name);
+ if (ebpf_parse_uevent(w, filename))
+ return;
+
+ snprintfz(filename, FILENAME_MAX, "%s/%s/%s/start", path, disk, name);
+ if (ebpf_disk_parse_start(w, filename))
+ return;
+
+ snprintfz(filename, FILENAME_MAX, "%s/%s/%s/size", path, disk, name);
+ ebpf_parse_size(w, filename);
+}
+
+/**
+ * New encode dev
+ *
+ * New encode algorithm extracted from https://elixir.bootlin.com/linux/v5.10.8/source/include/linux/kdev_t.h#L39
+ *
+ * @param major driver major number
+ * @param minor driver minor number
+ *
+ * @return
+ */
+static inline uint32_t netdata_new_encode_dev(uint32_t major, uint32_t minor) {
+ return (minor & 0xff) | (major << 8) | ((minor & ~0xff) << 12);
+}
+
+/**
+ * Compare disks
+ *
+ * Compare major and minor values to add disks to tree.
+ *
+ * @param a pointer to netdata_ebpf_disks
+ * @param b pointer to netdata_ebpf_disks
+ *
+ * @return It returns 0 case the values are equal, 1 case a is bigger than b and -1 case a is smaller than b.
+*/
+static int ebpf_compare_disks(void *a, void *b)
+{
+ netdata_ebpf_disks_t *ptr1 = a;
+ netdata_ebpf_disks_t *ptr2 = b;
+
+ if (ptr1->dev > ptr2->dev)
+ return 1;
+ if (ptr1->dev < ptr2->dev)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Update listen table
+ *
+ * Update link list when it is necessary.
+ *
+ * @param name disk name
+ * @param major major disk identifier
+ * @param minor minor disk identifier
+ * @param current_time current timestamp
+ */
+static void update_disk_table(char *name, int major, int minor, time_t current_time)
+{
+ netdata_ebpf_disks_t find;
+ netdata_ebpf_disks_t *w;
+ size_t length;
+
+ uint32_t dev = netdata_new_encode_dev(major, minor);
+ find.dev = dev;
+ netdata_ebpf_disks_t *ret = (netdata_ebpf_disks_t *) avl_search_lock(&disk_tree, (avl_t *)&find);
+ if (ret) { // Disk is already present
+ ret->flags |= NETDATA_DISK_IS_HERE;
+ ret->last_update = current_time;
+ return;
+ }
+
+ netdata_ebpf_disks_t *update_next = disk_list;
+ if (likely(disk_list)) {
+ netdata_ebpf_disks_t *move = disk_list;
+ while (move) {
+ if (dev == move->dev)
+ return;
+
+ update_next = move;
+ move = move->next;
+ }
+
+ w = callocz(1, sizeof(netdata_ebpf_disks_t));
+ length = strlen(name);
+ if (length >= NETDATA_DISK_NAME_LEN)
+ length = NETDATA_DISK_NAME_LEN;
+
+ memcpy(w->family, name, length);
+ w->family[length] = '\0';
+ w->major = major;
+ w->minor = minor;
+ w->dev = netdata_new_encode_dev(major, minor);
+ update_next->next = w;
+ } else {
+ disk_list = callocz(1, sizeof(netdata_ebpf_disks_t));
+ length = strlen(name);
+ if (length >= NETDATA_DISK_NAME_LEN)
+ length = NETDATA_DISK_NAME_LEN;
+
+ memcpy(disk_list->family, name, length);
+ disk_list->family[length] = '\0';
+ disk_list->major = major;
+ disk_list->minor = minor;
+ disk_list->dev = netdata_new_encode_dev(major, minor);
+
+ w = disk_list;
+ }
+
+ ebpf_read_disk_info(w, name);
+
+ netdata_ebpf_disks_t *check;
+ check = (netdata_ebpf_disks_t *) avl_insert_lock(&disk_tree, (avl_t *)w);
+ if (check != w)
+ error("Internal error, cannot insert the AVL tree.");
+
+#ifdef NETDATA_INTERNAL_CHECKS
+ info("The Latency is monitoring the hard disk %s (Major = %d, Minor = %d, Device = %u)", name, major, minor,w->dev);
+#endif
+
+ w->flags |= NETDATA_DISK_IS_HERE;
+}
+
+/**
+ * Read Local Disks
+ *
+ * Parse /proc/partitions to get block disks used to measure latency.
+ *
+ * @return It returns 0 on success and -1 otherwise
+ */
+static int read_local_disks()
+{
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s%s", netdata_configured_host_prefix, NETDATA_EBPF_PROC_PARTITIONS);
+ procfile *ff = procfile_open(filename, " \t:", PROCFILE_FLAG_DEFAULT);
+ if (!ff)
+ return -1;
+
+ ff = procfile_readall(ff);
+ if (!ff)
+ return -1;
+
+ size_t lines = procfile_lines(ff), l;
+ time_t current_time = now_realtime_sec();
+ for(l = 2; l < lines ;l++) {
+ size_t words = procfile_linewords(ff, l);
+ // This is header or end of file
+ if (unlikely(words < 4))
+ continue;
+
+ int major = (int)strtol(procfile_lineword(ff, l, 0), NULL, 10);
+ // The main goal of this thread is to measure block devices, so any block device with major number
+ // smaller than 7 according /proc/devices is not "important".
+ if (major > 7) {
+ int minor = (int)strtol(procfile_lineword(ff, l, 1), NULL, 10);
+ update_disk_table(procfile_lineword(ff, l, 3), major, minor, current_time);
+ }
+ }
+
+ procfile_close(ff);
+
+ return 0;
+}
+
+/**
+ * Update disks
+ *
+ * @param em main thread structure
+ */
+void ebpf_update_disks(ebpf_module_t *em)
+{
+ static time_t update_every = 0;
+ time_t curr = now_realtime_sec();
+ if (curr < update_every)
+ return;
+
+ update_every = curr + 5 * em->update_every;
+
+ (void)read_local_disks();
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CLOSE THE THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Disk disable tracepoints
+ *
+ * Disable tracepoints when the plugin was responsible to enable it.
+ */
+static void ebpf_disk_disable_tracepoints()
+{
+ char *default_message = { "Cannot disable the tracepoint" };
+ if (!was_block_issue_enabled) {
+ if (ebpf_disable_tracing_values(tracepoint_block_type, tracepoint_block_issue))
+ error("%s %s/%s.", default_message, tracepoint_block_type, tracepoint_block_issue);
+ }
+
+ if (!was_block_rq_complete_enabled) {
+ if (ebpf_disable_tracing_values(tracepoint_block_type, tracepoint_block_rq_complete))
+ error("%s %s/%s.", default_message, tracepoint_block_type, tracepoint_block_rq_complete);
+ }
+}
+
+/**
+ * Cleanup plot disks
+ *
+ * Clean disk list
+ */
+static void ebpf_cleanup_plot_disks()
+{
+ ebpf_publish_disk_t *move = plot_disks, *next;
+ while (move) {
+ next = move->next;
+
+ freez(move);
+
+ move = next;
+ }
+}
+
+/**
+ * Cleanup Disk List
+ */
+static void ebpf_cleanup_disk_list()
+{
+ netdata_ebpf_disks_t *move = disk_list;
+ while (move) {
+ netdata_ebpf_disks_t *next = move->next;
+
+ freez(move->histogram.name);
+ freez(move->boot_chart);
+ freez(move);
+
+ move = next;
+ }
+}
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_disk_cleanup(void *ptr)
+{
+ ebpf_disk_disable_tracepoints();
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled)
+ return;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 2 * USEC_PER_MS;
+ while (!read_thread_closed) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ if (dimensions)
+ ebpf_histogram_dimension_cleanup(dimensions, NETDATA_EBPF_HIST_MAX_BINS);
+
+ freez(disk_hash_values);
+ freez(disk_threads.thread);
+ pthread_mutex_destroy(&plot_mutex);
+
+ ebpf_cleanup_plot_disks();
+ ebpf_cleanup_disk_list();
+
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+ }
+}
+
+/*****************************************************************
+ *
+ * MAIN LOOP
+ *
+ *****************************************************************/
+
+/**
+ * Fill Plot list
+ *
+ * @param ptr a pointer for current disk
+ */
+static void ebpf_fill_plot_disks(netdata_ebpf_disks_t *ptr)
+{
+ pthread_mutex_lock(&plot_mutex);
+ ebpf_publish_disk_t *w;
+ if (likely(plot_disks)) {
+ ebpf_publish_disk_t *move = plot_disks, *store = plot_disks;
+ while (move) {
+ if (move->plot == ptr) {
+ pthread_mutex_unlock(&plot_mutex);
+ return;
+ }
+
+ store = move;
+ move = move->next;
+ }
+
+ w = callocz(1, sizeof(ebpf_publish_disk_t));
+ w->plot = ptr;
+ store->next = w;
+ } else {
+ plot_disks = callocz(1, sizeof(ebpf_publish_disk_t));
+ plot_disks->plot = ptr;
+ }
+ pthread_mutex_unlock(&plot_mutex);
+
+ ptr->flags |= NETDATA_DISK_ADDED_TO_PLOT_LIST;
+}
+
+/**
+ * Read hard disk table
+ *
+ * @param table file descriptor for table
+ *
+ * Read the table with number of calls for all functions
+ */
+static void read_hard_disk_tables(int table)
+{
+ netdata_idx_t *values = disk_hash_values;
+ block_key_t key = {};
+ block_key_t next_key = {};
+
+ netdata_ebpf_disks_t *ret = NULL;
+
+ while (bpf_map_get_next_key(table, &key, &next_key) == 0) {
+ int test = bpf_map_lookup_elem(table, &key, values);
+ if (test < 0) {
+ key = next_key;
+ continue;
+ }
+
+ netdata_ebpf_disks_t find;
+ find.dev = key.dev;
+
+ if (likely(ret)) {
+ if (find.dev != ret->dev)
+ ret = (netdata_ebpf_disks_t *)avl_search_lock(&disk_tree, (avl_t *)&find);
+ } else
+ ret = (netdata_ebpf_disks_t *)avl_search_lock(&disk_tree, (avl_t *)&find);
+
+ // Disk was inserted after we parse /proc/partitions
+ if (!ret) {
+ if (read_local_disks()) {
+ key = next_key;
+ continue;
+ }
+
+ ret = (netdata_ebpf_disks_t *)avl_search_lock(&disk_tree, (avl_t *)&find);
+ if (!ret) {
+ // We should never reach this point, but we are adding it to keep a safe code
+ key = next_key;
+ continue;
+ }
+ }
+
+ uint64_t total = 0;
+ int i;
+ int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+ for (i = 0; i < end; i++) {
+ total += values[i];
+ }
+
+ ret->histogram.histogram[key.bin] = total;
+
+ if (!(ret->flags & NETDATA_DISK_ADDED_TO_PLOT_LIST))
+ ebpf_fill_plot_disks(ret);
+
+ key = next_key;
+ }
+}
+
+/**
+ * Disk read hash
+ *
+ * This is the thread callback.
+ * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket.
+ *
+ * @param ptr It is a NULL value for this thread.
+ *
+ * @return It always returns NULL.
+ */
+void *ebpf_disk_read_hash(void *ptr)
+{
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+
+ usec_t step = NETDATA_LATENCY_DISK_SLEEP_MS * em->update_every;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ (void)dt;
+
+ read_hard_disk_tables(disk_maps[NETDATA_DISK_READ].map_fd);
+ }
+
+ return NULL;
+}
+
+/**
+ * Obsolete Hard Disk charts
+ *
+ * Make Hard disk charts and fill chart name
+ *
+ * @param w the structure with necessary information to create the chart
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_obsolete_hd_charts(netdata_ebpf_disks_t *w, int update_every)
+{
+ ebpf_write_chart_obsolete(w->histogram.name, w->family, w->histogram.title, EBPF_COMMON_DIMENSION_CALL,
+ w->family, NETDATA_EBPF_CHART_TYPE_STACKED, "disk.latency_io",
+ w->histogram.order, update_every);
+
+ w->flags = 0;
+}
+
+/**
+ * Create Hard Disk charts
+ *
+ * Make Hard disk charts and fill chart name
+ *
+ * @param w the structure with necessary information to create the chart
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_create_hd_charts(netdata_ebpf_disks_t *w, int update_every)
+{
+ int order = NETDATA_CHART_PRIO_DISK_LATENCY;
+ char *family = w->family;
+
+ w->histogram.name = strdupz("disk_latency_io");
+ w->histogram.title = NULL;
+ w->histogram.order = order;
+
+ ebpf_create_chart(w->histogram.name, family, "Disk latency", EBPF_COMMON_DIMENSION_CALL,
+ family, "disk.latency_io", NETDATA_EBPF_CHART_TYPE_STACKED, order,
+ ebpf_create_global_dimension, disk_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS,
+ update_every, NETDATA_EBPF_MODULE_NAME_DISK);
+ order++;
+
+ w->flags |= NETDATA_DISK_CHART_CREATED;
+}
+
+/**
+ * Remove pointer from plot
+ *
+ * Remove pointer from plot list when the disk is not present.
+ */
+static void ebpf_remove_pointer_from_plot_disk(ebpf_module_t *em)
+{
+ time_t current_time = now_realtime_sec();
+ time_t limit = 10 * em->update_every;
+ pthread_mutex_lock(&plot_mutex);
+ ebpf_publish_disk_t *move = plot_disks, *prev = plot_disks;
+ int update_every = em->update_every;
+ while (move) {
+ netdata_ebpf_disks_t *ned = move->plot;
+ uint32_t flags = ned->flags;
+
+ if (!(flags & NETDATA_DISK_IS_HERE) && ((current_time - ned->last_update) > limit)) {
+ ebpf_obsolete_hd_charts(ned, update_every);
+ avl_t *ret = (avl_t *)avl_remove_lock(&disk_tree, (avl_t *)ned);
+ UNUSED(ret);
+ if (move == plot_disks) {
+ freez(move);
+ plot_disks = NULL;
+ break;
+ } else {
+ prev->next = move->next;
+ ebpf_publish_disk_t *clean = move;
+ move = move->next;
+ freez(clean);
+ continue;
+ }
+ }
+
+ prev = move;
+ move = move->next;
+ }
+ pthread_mutex_unlock(&plot_mutex);
+}
+
+/**
+ * Send Hard disk data
+ *
+ * Send hard disk information to Netdata.
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_latency_send_hd_data(int update_every)
+{
+ pthread_mutex_lock(&plot_mutex);
+ if (!plot_disks) {
+ pthread_mutex_unlock(&plot_mutex);
+ return;
+ }
+
+ ebpf_publish_disk_t *move = plot_disks;
+ while (move) {
+ netdata_ebpf_disks_t *ned = move->plot;
+ uint32_t flags = ned->flags;
+ if (!(flags & NETDATA_DISK_CHART_CREATED)) {
+ ebpf_create_hd_charts(ned, update_every);
+ }
+
+ if ((flags & NETDATA_DISK_CHART_CREATED)) {
+ write_histogram_chart(ned->histogram.name, ned->family,
+ ned->histogram.histogram, dimensions, NETDATA_EBPF_HIST_MAX_BINS);
+ }
+
+ ned->flags &= ~NETDATA_DISK_IS_HERE;
+
+ move = move->next;
+ }
+ pthread_mutex_unlock(&plot_mutex);
+}
+
+/**
+* Main loop for this collector.
+*/
+static void disk_collector(ebpf_module_t *em)
+{
+ disk_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
+ disk_threads.thread = mallocz(sizeof(netdata_thread_t));
+ disk_threads.start_routine = ebpf_disk_read_hash;
+
+ netdata_thread_create(disk_threads.thread, disk_threads.name, NETDATA_THREAD_OPTION_JOINABLE,
+ ebpf_disk_read_hash, em);
+
+ int update_every = em->update_every;
+ int counter = update_every - 1;
+ read_thread_closed = 0;
+ while (!close_ebpf_plugin) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ if (++counter == update_every) {
+ counter = 0;
+ pthread_mutex_lock(&lock);
+ ebpf_remove_pointer_from_plot_disk(em);
+ ebpf_latency_send_hd_data(update_every);
+
+ pthread_mutex_unlock(&lock);
+ }
+
+ pthread_mutex_unlock(&collect_data_mutex);
+
+ ebpf_update_disks(em);
+ }
+ read_thread_closed = 1;
+}
+
+/*****************************************************************
+ *
+ * EBPF DISK THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Enable tracepoints
+ *
+ * Enable necessary tracepoints for thread.
+ *
+ * @return It returns 0 on success and -1 otherwise
+ */
+static int ebpf_disk_enable_tracepoints()
+{
+ int test = ebpf_is_tracepoint_enabled(tracepoint_block_type, tracepoint_block_issue);
+ if (test == -1)
+ return -1;
+ else if (!test) {
+ if (ebpf_enable_tracing_values(tracepoint_block_type, tracepoint_block_issue))
+ return -1;
+ }
+ was_block_issue_enabled = test;
+
+ test = ebpf_is_tracepoint_enabled(tracepoint_block_type, tracepoint_block_rq_complete);
+ if (test == -1)
+ return -1;
+ else if (!test) {
+ if (ebpf_enable_tracing_values(tracepoint_block_type, tracepoint_block_rq_complete))
+ return -1;
+ }
+ was_block_rq_complete_enabled = test;
+
+ return 0;
+}
+
+/**
+ * Disk thread
+ *
+ * Thread used to generate disk charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_disk_thread(void *ptr)
+{
+ netdata_thread_cleanup_push(ebpf_disk_cleanup, ptr);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ em->maps = disk_maps;
+
+ if (!em->enabled)
+ goto enddisk;
+
+ if (ebpf_disk_enable_tracepoints()) {
+ em->enabled = CONFIG_BOOLEAN_NO;
+ goto enddisk;
+ }
+
+ avl_init_lock(&disk_tree, ebpf_compare_disks);
+ if (read_local_disks()) {
+ em->enabled = CONFIG_BOOLEAN_NO;
+ goto enddisk;
+ }
+
+ if (pthread_mutex_init(&plot_mutex, NULL)) {
+ error("Cannot initialize local mutex");
+ goto enddisk;
+ }
+
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ if (!probe_links) {
+ goto enddisk;
+ }
+
+ int algorithms[NETDATA_EBPF_HIST_MAX_BINS];
+ ebpf_fill_algorithms(algorithms, NETDATA_EBPF_HIST_MAX_BINS, NETDATA_EBPF_INCREMENTAL_IDX);
+ dimensions = ebpf_fill_histogram_dimension(NETDATA_EBPF_HIST_MAX_BINS);
+
+ ebpf_global_labels(disk_aggregated_data, disk_publish_aggregated, dimensions, dimensions, algorithms,
+ NETDATA_EBPF_HIST_MAX_BINS);
+
+ disk_collector(em);
+
+enddisk:
+ netdata_thread_cleanup_pop(1);
+
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_disk.h b/collectors/ebpf.plugin/ebpf_disk.h
new file mode 100644
index 000000000..8e58174b9
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_disk.h
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_DISK_H
+#define NETDATA_EBPF_DISK_H 1
+
+// Module name
+#define NETDATA_EBPF_MODULE_NAME_DISK "disk"
+
+#include "libnetdata/avl/avl.h"
+#include "libnetdata/ebpf/ebpf.h"
+
+#define NETDATA_EBPF_PROC_PARTITIONS "/proc/partitions"
+
+#define NETDATA_LATENCY_DISK_SLEEP_MS 650000ULL
+
+// Process configuration name
+#define NETDATA_DISK_CONFIG_FILE "disk.conf"
+
+// Decode function extracted from: https://elixir.bootlin.com/linux/v5.10.8/source/include/linux/kdev_t.h#L7
+#define MINORBITS 20
+#define MKDEV(ma,mi) (((ma) << MINORBITS) | (mi))
+
+enum netdata_latency_disks_flags {
+ NETDATA_DISK_ADDED_TO_PLOT_LIST = 1,
+ NETDATA_DISK_CHART_CREATED = 2,
+ NETDATA_DISK_IS_HERE = 4,
+ NETDATA_DISK_HAS_EFI = 8
+};
+
+/*
+ * The definition (DISK_NAME_LEN) has been a stable value since Kernel 3.0,
+ * I decided to bring it as internal definition, to avoid include linux/genhd.h.
+ */
+#define NETDATA_DISK_NAME_LEN 32
+typedef struct netdata_ebpf_disks {
+ // Search
+ avl_t avl;
+ uint32_t dev;
+ uint32_t major;
+ uint32_t minor;
+ uint32_t bootsector_key;
+ uint64_t start; // start sector
+ uint64_t end; // end sector
+
+ // Print information
+ char family[NETDATA_DISK_NAME_LEN + 1];
+ char *boot_chart;
+
+ netdata_ebpf_histogram_t histogram;
+
+ uint32_t flags;
+ time_t last_update;
+
+ struct netdata_ebpf_disks *main;
+ struct netdata_ebpf_disks *boot_partition;
+ struct netdata_ebpf_disks *next;
+} netdata_ebpf_disks_t;
+
+enum ebpf_disk_tables {
+ NETDATA_DISK_READ
+};
+
+typedef struct block_key {
+ uint32_t bin;
+ uint32_t dev;
+} block_key_t;
+
+typedef struct netdata_ebpf_publish_disk {
+ netdata_ebpf_disks_t *plot;
+ struct netdata_ebpf_publish_disk *next;
+} ebpf_publish_disk_t;
+
+extern struct config disk_config;
+
+extern void *ebpf_disk_thread(void *ptr);
+
+#endif /* NETDATA_EBPF_DISK_H */
+
diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c
new file mode 100644
index 000000000..6eecf5847
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_fd.c
@@ -0,0 +1,865 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ebpf.h"
+#include "ebpf_fd.h"
+
+static char *fd_dimension_names[NETDATA_FD_SYSCALL_END] = { "open", "close" };
+static char *fd_id_names[NETDATA_FD_SYSCALL_END] = { "do_sys_open", "__close_fd" };
+
+static netdata_syscall_stat_t fd_aggregated_data[NETDATA_FD_SYSCALL_END];
+static netdata_publish_syscall_t fd_publish_aggregated[NETDATA_FD_SYSCALL_END];
+
+static ebpf_local_maps_t fd_maps[] = {{.name = "tbl_fd_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_fd_global", .internal_input = NETDATA_KEY_END_VECTOR,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "fd_ctrl", .internal_input = NETDATA_CONTROLLER_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+
+
+struct config fd_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+static struct bpf_link **probe_links = NULL;
+static struct bpf_object *objects = NULL;
+
+struct netdata_static_thread fd_thread = {"FD KERNEL", NULL, NULL, 1, NULL,
+ NULL, NULL};
+static int read_thread_closed = 1;
+static netdata_idx_t fd_hash_values[NETDATA_FD_COUNTER];
+static netdata_idx_t *fd_values = NULL;
+
+netdata_fd_stat_t *fd_vector = NULL;
+netdata_fd_stat_t **fd_pid = NULL;
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CLOSE THE THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Clean PID structures
+ *
+ * Clean the allocated structures.
+ */
+void clean_fd_pid_structures() {
+ struct pid_stat *pids = root_of_pids;
+ while (pids) {
+ freez(fd_pid[pids->pid]);
+
+ pids = pids->next;
+ }
+}
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_fd_cleanup(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled)
+ return;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 2 * USEC_PER_MS;
+ while (!read_thread_closed) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ ebpf_cleanup_publish_syscall(fd_publish_aggregated);
+ freez(fd_thread.thread);
+ freez(fd_values);
+ freez(fd_vector);
+
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+ }
+}
+
+/*****************************************************************
+ *
+ * MAIN LOOP
+ *
+ *****************************************************************/
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+ *
+ * @param em the structure with thread information
+ */
+static void ebpf_fd_send_data(ebpf_module_t *em)
+{
+ fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].ncall = fd_hash_values[NETDATA_KEY_CALLS_DO_SYS_OPEN];
+ fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].nerr = fd_hash_values[NETDATA_KEY_ERROR_DO_SYS_OPEN];
+
+ fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].ncall = fd_hash_values[NETDATA_KEY_CALLS_CLOSE_FD];
+ fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].nerr = fd_hash_values[NETDATA_KEY_ERROR_CLOSE_FD];
+
+ write_count_chart(NETDATA_FILE_OPEN_CLOSE_COUNT, NETDATA_FILESYSTEM_FAMILY, fd_publish_aggregated,
+ NETDATA_FD_SYSCALL_END);
+
+ if (em->mode < MODE_ENTRY) {
+ write_err_chart(NETDATA_FILE_OPEN_ERR_COUNT, NETDATA_FILESYSTEM_FAMILY,
+ fd_publish_aggregated, NETDATA_FD_SYSCALL_END);
+ }
+}
+
+/**
+ * Read global counter
+ *
+ * Read the table with number of calls for all functions
+ */
+static void read_global_table()
+{
+ uint32_t idx;
+ netdata_idx_t *val = fd_hash_values;
+ netdata_idx_t *stored = fd_values;
+ int fd = fd_maps[NETDATA_FD_GLOBAL_STATS].map_fd;
+
+ for (idx = NETDATA_KEY_CALLS_DO_SYS_OPEN; idx < NETDATA_FD_COUNTER; idx++) {
+ if (!bpf_map_lookup_elem(fd, &idx, stored)) {
+ int i;
+ int end = ebpf_nprocs;
+ netdata_idx_t total = 0;
+ for (i = 0; i < end; i++)
+ total += stored[i];
+
+ val[idx] = total;
+ }
+ }
+}
+
+/**
+ * File descriptor read hash
+ *
+ * This is the thread callback.
+ * This thread is necessary, because we cannot freeze the whole plugin to read the data.
+ *
+ * @param ptr It is a NULL value for this thread.
+ *
+ * @return It always returns NULL.
+ */
+void *ebpf_fd_read_hash(void *ptr)
+{
+ read_thread_closed = 0;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ usec_t step = NETDATA_FD_SLEEP_MS * em->update_every;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ (void)dt;
+
+ read_global_table();
+ }
+
+ read_thread_closed = 1;
+ return NULL;
+}
+
+/**
+ * Apps Accumulator
+ *
+ * Sum all values read from kernel and store in the first address.
+ *
+ * @param out the vector with read values.
+ */
+static void fd_apps_accumulator(netdata_fd_stat_t *out)
+{
+ int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ netdata_fd_stat_t *total = &out[0];
+ for (i = 1; i < end; i++) {
+ netdata_fd_stat_t *w = &out[i];
+ total->open_call += w->open_call;
+ total->close_call += w->close_call;
+ total->open_err += w->open_err;
+ total->close_err += w->close_err;
+ }
+}
+
+/**
+ * Fill PID
+ *
+ * Fill PID structures
+ *
+ * @param current_pid pid that we are collecting data
+ * @param out values read from hash tables;
+ */
+static void fd_fill_pid(uint32_t current_pid, netdata_fd_stat_t *publish)
+{
+ netdata_fd_stat_t *curr = fd_pid[current_pid];
+ if (!curr) {
+ curr = callocz(1, sizeof(netdata_fd_stat_t));
+ fd_pid[current_pid] = curr;
+ }
+
+ memcpy(curr, &publish[0], sizeof(netdata_fd_stat_t));
+}
+
+/**
+ * Read APPS table
+ *
+ * Read the apps table and store data inside the structure.
+ */
+static void read_apps_table()
+{
+ netdata_fd_stat_t *fv = fd_vector;
+ uint32_t key;
+ struct pid_stat *pids = root_of_pids;
+ int fd = fd_maps[NETDATA_FD_PID_STATS].map_fd;
+ size_t length = sizeof(netdata_fd_stat_t) * ebpf_nprocs;
+ while (pids) {
+ key = pids->pid;
+
+ if (bpf_map_lookup_elem(fd, &key, fv)) {
+ pids = pids->next;
+ continue;
+ }
+
+ fd_apps_accumulator(fv);
+
+ fd_fill_pid(key, fv);
+
+ // We are cleaning to avoid passing data read from one process to other.
+ memset(fv, 0, length);
+
+ pids = pids->next;
+ }
+}
+
+/**
+ * Update cgroup
+ *
+ * Update cgroup data based in
+ */
+static void ebpf_update_fd_cgroup()
+{
+ ebpf_cgroup_target_t *ect ;
+ netdata_fd_stat_t *fv = fd_vector;
+ int fd = fd_maps[NETDATA_FD_PID_STATS].map_fd;
+ size_t length = sizeof(netdata_fd_stat_t) * ebpf_nprocs;
+
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ struct pid_on_target2 *pids;
+ for (pids = ect->pids; pids; pids = pids->next) {
+ int pid = pids->pid;
+ netdata_fd_stat_t *out = &pids->fd;
+ if (likely(fd_pid) && fd_pid[pid]) {
+ netdata_fd_stat_t *in = fd_pid[pid];
+
+ memcpy(out, in, sizeof(netdata_fd_stat_t));
+ } else {
+ memset(fv, 0, length);
+ if (!bpf_map_lookup_elem(fd, &pid, fv)) {
+ fd_apps_accumulator(fv);
+
+ memcpy(out, fv, sizeof(netdata_fd_stat_t));
+ }
+ }
+ }
+ }
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
+/**
+ * Sum PIDs
+ *
+ * Sum values for all targets.
+ *
+ * @param fd the output
+ * @param root list of pids
+ */
+static void ebpf_fd_sum_pids(netdata_fd_stat_t *fd, struct pid_on_target *root)
+{
+ uint32_t open_call = 0;
+ uint32_t close_call = 0;
+ uint32_t open_err = 0;
+ uint32_t close_err = 0;
+
+ while (root) {
+ int32_t pid = root->pid;
+ netdata_fd_stat_t *w = fd_pid[pid];
+ if (w) {
+ open_call += w->open_call;
+ close_call += w->close_call;
+ open_err += w->open_err;
+ close_err += w->close_err;
+ }
+
+ root = root->next;
+ }
+
+ // These conditions were added, because we are using incremental algorithm
+ fd->open_call = (open_call >= fd->open_call) ? open_call : fd->open_call;
+ fd->close_call = (close_call >= fd->close_call) ? close_call : fd->close_call;
+ fd->open_err = (open_err >= fd->open_err) ? open_err : fd->open_err;
+ fd->close_err = (close_err >= fd->close_err) ? close_err : fd->close_err;
+}
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+ *
+ * @param em the structure with thread information
+ * @param root the target list.
+*/
+void ebpf_fd_send_apps_data(ebpf_module_t *em, struct target *root)
+{
+ struct target *w;
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ ebpf_fd_sum_pids(&w->fd, w->root_pid);
+ }
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->fd.open_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->fd.open_err);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->fd.close_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->fd.close_err);
+ }
+ }
+ write_end_chart();
+ }
+}
+
+/**
+ * Sum PIDs
+ *
+ * Sum values for all targets.
+ *
+ * @param fd structure used to store data
+ * @param pids input data
+ */
+static void ebpf_fd_sum_cgroup_pids(netdata_fd_stat_t *fd, struct pid_on_target2 *pids)
+{
+ netdata_fd_stat_t accumulator;
+ memset(&accumulator, 0, sizeof(accumulator));
+
+ while (pids) {
+ netdata_fd_stat_t *w = &pids->fd;
+
+ accumulator.open_err += w->open_err;
+ accumulator.open_call += w->open_call;
+ accumulator.close_call += w->close_call;
+ accumulator.close_err += w->close_err;
+
+ pids = pids->next;
+ }
+
+ fd->open_call = (accumulator.open_call >= fd->open_call) ? accumulator.open_call : fd->open_call;
+ fd->open_err = (accumulator.open_err >= fd->open_err) ? accumulator.open_err : fd->open_err;
+ fd->close_call = (accumulator.close_call >= fd->close_call) ? accumulator.close_call : fd->close_call;
+ fd->close_err = (accumulator.close_err >= fd->close_err) ? accumulator.close_err : fd->close_err;
+}
+
+/**
+ * Create specific file descriptor charts
+ *
+ * Create charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param em the main thread structure.
+ */
+static void ebpf_create_specific_fd_charts(char *type, ebpf_module_t *em)
+{
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN, "Number of open files",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
+ NETDATA_CGROUP_FD_OPEN_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5400,
+ ebpf_create_global_dimension,
+ &fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "Fails to open files",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
+ NETDATA_CGROUP_FD_OPEN_ERR_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5401,
+ ebpf_create_global_dimension,
+ &fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN],
+ 1, em->update_every,
+ NETDATA_EBPF_MODULE_NAME_SWAP);
+ }
+
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSED, "Files closed",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
+ NETDATA_CGROUP_FD_CLOSE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5402,
+ ebpf_create_global_dimension,
+ &fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "Fails to close files",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
+ NETDATA_CGROUP_FD_CLOSE_ERR_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5403,
+ ebpf_create_global_dimension,
+ &fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE],
+ 1, em->update_every,
+ NETDATA_EBPF_MODULE_NAME_SWAP);
+ }
+}
+
+/**
+ * Obsolete specific file descriptor charts
+ *
+ * Obsolete charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param em the main thread structure.
+ */
+static void ebpf_obsolete_specific_fd_charts(char *type, ebpf_module_t *em)
+{
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_OPEN, "Number of open files",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_OPEN_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5400, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "Fails to open files",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_OPEN_ERR_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5401, em->update_every);
+ }
+
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_CLOSED, "Files closed",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_CLOSE_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5402, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "Fails to close files",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_FD_CLOSE_ERR_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5403, em->update_every);
+ }
+}
+
+/*
+ * Send specific file descriptor data
+ *
+ * Send data for specific cgroup/apps.
+ *
+ * @param type chart type
+ * @param values structure with values that will be sent to netdata
+ */
+static void ebpf_send_specific_fd_data(char *type, netdata_fd_stat_t *values, ebpf_module_t *em)
+{
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN);
+ write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].name, (long long)values->open_call);
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR);
+ write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_OPEN].name, (long long)values->open_err);
+ write_end_chart();
+ }
+
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSED);
+ write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].name, (long long)values->close_call);
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR);
+ write_chart_dimension(fd_publish_aggregated[NETDATA_FD_SYSCALL_CLOSE].name, (long long)values->close_err);
+ write_end_chart();
+ }
+}
+
+/**
+ * Create systemd file descriptor charts
+ *
+ * Create charts when systemd is enabled
+ *
+ * @param em the main collector structure
+ **/
+static void ebpf_create_systemd_fd_charts(ebpf_module_t *em)
+{
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_OPEN, "Number of open files",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20061,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_OPEN_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR, "Fails to open files",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20062,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_OPEN_ERR_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
+ }
+
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_CLOSED, "Files closed",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20063,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_CLOSE_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR, "Fails to close files",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_FILE_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20064,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_FD_CLOSE_ERR_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
+ }
+}
+
+/**
+ * Send Systemd charts
+ *
+ * Send collected data to Netdata.
+ *
+ * @param em the main collector structure
+ *
+ * @return It returns the status for chart creation, if it is necessary to remove a specific dimension zero is returned
+ * otherwise function returns 1 to avoid chart recreation
+ */
+static int ebpf_send_systemd_fd_charts(ebpf_module_t *em)
+{
+ int ret = 1;
+ ebpf_cgroup_target_t *ect;
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_fd.open_call);
+ } else
+ ret = 0;
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_fd.open_err);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_fd.close_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_fd.close_err);
+ }
+ }
+ write_end_chart();
+ }
+
+ return ret;
+}
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+ *
+ * @param em the main collector structure
+*/
+static void ebpf_fd_send_cgroup_data(ebpf_module_t *em)
+{
+ if (!ebpf_cgroup_pids)
+ return;
+
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ ebpf_cgroup_target_t *ect;
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ ebpf_fd_sum_cgroup_pids(&ect->publish_systemd_fd, ect->pids);
+ }
+
+ int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
+ if (has_systemd) {
+ static int systemd_charts = 0;
+ if (!systemd_charts) {
+ ebpf_create_systemd_fd_charts(em);
+ systemd_charts = 1;
+ }
+
+ systemd_charts = ebpf_send_systemd_fd_charts(em);
+ }
+
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (ect->systemd)
+ continue;
+
+ if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_FD_CHART) && ect->updated) {
+ ebpf_create_specific_fd_charts(ect->name, em);
+ ect->flags |= NETDATA_EBPF_CGROUP_HAS_FD_CHART;
+ }
+
+ if (ect->flags & NETDATA_EBPF_CGROUP_HAS_FD_CHART ) {
+ if (ect->updated) {
+ ebpf_send_specific_fd_data(ect->name, &ect->publish_systemd_fd, em);
+ } else {
+ ebpf_obsolete_specific_fd_charts(ect->name, em);
+ ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_FD_CHART;
+ }
+ }
+ }
+
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
+/**
+* Main loop for this collector.
+*/
+static void fd_collector(ebpf_module_t *em)
+{
+ fd_thread.thread = mallocz(sizeof(netdata_thread_t));
+ fd_thread.start_routine = ebpf_fd_read_hash;
+
+ netdata_thread_create(fd_thread.thread, fd_thread.name, NETDATA_THREAD_OPTION_JOINABLE,
+ ebpf_fd_read_hash, em);
+
+ int apps = em->apps_charts;
+ int cgroups = em->cgroup_charts;
+ int update_every = em->update_every;
+ int counter = update_every - 1;
+ while (!close_ebpf_plugin) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ if (++counter == update_every) {
+ counter = 0;
+ if (apps)
+ read_apps_table();
+
+ if (cgroups)
+ ebpf_update_fd_cgroup();
+
+ pthread_mutex_lock(&lock);
+
+ ebpf_fd_send_data(em);
+
+ if (apps)
+ ebpf_fd_send_apps_data(em, apps_groups_root_target);
+
+ if (cgroups)
+ ebpf_fd_send_cgroup_data(em);
+
+ pthread_mutex_unlock(&lock);
+ }
+
+ pthread_mutex_unlock(&collect_data_mutex);
+ }
+}
+
+/*****************************************************************
+ *
+ * CREATE CHARTS
+ *
+ *****************************************************************/
+
+/**
+ * Create apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr)
+{
+ struct target *root = ptr;
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN,
+ "Number of open files",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_FILE_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20061,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR,
+ "Fails to open files",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_FILE_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20062,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
+ }
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSED,
+ "Files closed",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_FILE_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20063,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR,
+ "Fails to close files",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_FILE_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20064,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
+ }
+}
+
+/**
+ * Create global charts
+ *
+ * Call ebpf_create_chart to create the charts for the collector.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+static void ebpf_create_fd_global_charts(ebpf_module_t *em)
+{
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
+ NETDATA_FILE_OPEN_CLOSE_COUNT,
+ "Open and close calls",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_FILE_GROUP,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_EBPF_FD_CHARTS,
+ ebpf_create_global_dimension,
+ fd_publish_aggregated,
+ NETDATA_FD_SYSCALL_END,
+ em->update_every, NETDATA_EBPF_MODULE_NAME_FD);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
+ NETDATA_FILE_OPEN_ERR_COUNT,
+ "Open fails",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_FILE_GROUP,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_EBPF_FD_CHARTS + 1,
+ ebpf_create_global_dimension,
+ fd_publish_aggregated,
+ NETDATA_FD_SYSCALL_END,
+ em->update_every, NETDATA_EBPF_MODULE_NAME_FD);
+ }
+}
+
+/*****************************************************************
+ *
+ * MAIN THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Allocate vectors used with this thread.
+ *
+ * We are not testing the return, because callocz does this and shutdown the software
+ * case it was not possible to allocate.
+ *
+ * @param apps is apps enabled?
+ */
+static void ebpf_fd_allocate_global_vectors(int apps)
+{
+ if (apps)
+ fd_pid = callocz((size_t)pid_max, sizeof(netdata_fd_stat_t *));
+
+ fd_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_fd_stat_t));
+
+ fd_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
+}
+
+/**
+ * Directory Cache thread
+ *
+ * Thread used to make dcstat thread
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always returns NULL
+ */
+void *ebpf_fd_thread(void *ptr)
+{
+ netdata_thread_cleanup_push(ebpf_fd_cleanup, ptr);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ em->maps = fd_maps;
+
+ if (!em->enabled)
+ goto endfd;
+
+ ebpf_fd_allocate_global_vectors(em->apps_charts);
+
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ if (!probe_links) {
+ goto endfd;
+ }
+
+ int algorithms[NETDATA_FD_SYSCALL_END] = {
+ NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX
+ };
+
+ ebpf_global_labels(fd_aggregated_data, fd_publish_aggregated, fd_dimension_names, fd_id_names,
+ algorithms, NETDATA_FD_SYSCALL_END);
+
+ pthread_mutex_lock(&lock);
+ ebpf_create_fd_global_charts(em);
+ pthread_mutex_unlock(&lock);
+
+ fd_collector(em);
+
+endfd:
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_fd.h b/collectors/ebpf.plugin/ebpf_fd.h
new file mode 100644
index 000000000..851e040e5
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_fd.h
@@ -0,0 +1,85 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_FD_H
+#define NETDATA_EBPF_FD_H 1
+
+// Module name
+#define NETDATA_EBPF_MODULE_NAME_FD "filedescriptor"
+
+#define NETDATA_FD_SLEEP_MS 850000ULL
+
+// Menu group
+#define NETDATA_FILE_GROUP "file_access"
+
+// Global chart name
+#define NETDATA_FILE_OPEN_CLOSE_COUNT "file_descriptor"
+#define NETDATA_FILE_OPEN_ERR_COUNT "file_error"
+
+// Charts created on Apps submenu
+#define NETDATA_SYSCALL_APPS_FILE_OPEN "file_open"
+#define NETDATA_SYSCALL_APPS_FILE_CLOSED "file_closed"
+#define NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR "file_open_error"
+#define NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR "file_close_error"
+
+// Process configuration name
+#define NETDATA_FD_CONFIG_FILE "fd.conf"
+
+// Contexts
+#define NETDATA_CGROUP_FD_OPEN_CONTEXT "cgroup.fd_open"
+#define NETDATA_CGROUP_FD_OPEN_ERR_CONTEXT "cgroup.fd_open_error"
+#define NETDATA_CGROUP_FD_CLOSE_CONTEXT "cgroup.fd_close"
+#define NETDATA_CGROUP_FD_CLOSE_ERR_CONTEXT "cgroup.fd_close_error"
+
+#define NETDATA_SYSTEMD_FD_OPEN_CONTEXT "services.fd_open"
+#define NETDATA_SYSTEMD_FD_OPEN_ERR_CONTEXT "services.fd_open_error"
+#define NETDATA_SYSTEMD_FD_CLOSE_CONTEXT "services.fd_close"
+#define NETDATA_SYSTEMD_FD_CLOSE_ERR_CONTEXT "services.fd_close_error"
+
+typedef struct netdata_fd_stat {
+ uint64_t pid_tgid; // Unique identifier
+ uint32_t pid; // Process ID
+
+ uint32_t open_call; // Open syscalls (open and openat)
+ uint32_t close_call; // Close syscall (close)
+
+ // Errors
+ uint32_t open_err;
+ uint32_t close_err;
+} netdata_fd_stat_t;
+
+enum fd_tables {
+ NETDATA_FD_PID_STATS,
+ NETDATA_FD_GLOBAL_STATS,
+
+ // Keep this as last and don't skip numbers as it is used as element counter
+ NETDATA_FD_CONTROLLER
+};
+
+enum fd_counters {
+ NETDATA_KEY_CALLS_DO_SYS_OPEN,
+ NETDATA_KEY_ERROR_DO_SYS_OPEN,
+
+ NETDATA_KEY_CALLS_CLOSE_FD,
+ NETDATA_KEY_ERROR_CLOSE_FD,
+
+ // Keep this as last and don't skip numbers as it is used as element counter
+ NETDATA_FD_COUNTER
+};
+
+enum fd_syscalls {
+ NETDATA_FD_SYSCALL_OPEN,
+ NETDATA_FD_SYSCALL_CLOSE,
+
+ // Do not insert nothing after this value
+ NETDATA_FD_SYSCALL_END
+};
+
+
+extern void *ebpf_fd_thread(void *ptr);
+extern void ebpf_fd_create_apps_charts(struct ebpf_module *em, void *ptr);
+extern struct config fd_config;
+extern netdata_fd_stat_t **fd_pid;
+extern void clean_fd_pid_structures();
+
+#endif /* NETDATA_EBPF_FD_H */
+
diff --git a/collectors/ebpf.plugin/ebpf_filesystem.c b/collectors/ebpf.plugin/ebpf_filesystem.c
new file mode 100644
index 000000000..ad2c9eff0
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_filesystem.c
@@ -0,0 +1,661 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ebpf_filesystem.h"
+
+struct config fs_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+static ebpf_local_maps_t fs_maps[] = {{.name = "tbl_ext4", .internal_input = NETDATA_KEY_CALLS_SYNC,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_xfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_nfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_zfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_btrfs", .internal_input = NETDATA_KEY_CALLS_SYNC,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_ext_addr", .internal_input = 1,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+
+ebpf_filesystem_partitions_t localfs[] =
+ {{.filesystem = "ext4",
+ .optional_filesystem = NULL,
+ .family = "ext4",
+ .objects = NULL,
+ .probe_links = NULL,
+ .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .addresses = {.function = NULL, .addr = 0}},
+ {.filesystem = "xfs",
+ .optional_filesystem = NULL,
+ .family = "xfs",
+ .objects = NULL,
+ .probe_links = NULL,
+ .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .addresses = {.function = NULL, .addr = 0}},
+ {.filesystem = "nfs",
+ .optional_filesystem = "nfs4",
+ .family = "nfs",
+ .objects = NULL,
+ .probe_links = NULL,
+ .flags = NETDATA_FILESYSTEM_ATTR_CHARTS,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .addresses = {.function = NULL, .addr = 0}},
+ {.filesystem = "zfs",
+ .optional_filesystem = NULL,
+ .family = "zfs",
+ .objects = NULL,
+ .probe_links = NULL,
+ .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .addresses = {.function = NULL, .addr = 0}},
+ {.filesystem = "btrfs",
+ .optional_filesystem = NULL,
+ .family = "btrfs",
+ .objects = NULL,
+ .probe_links = NULL,
+ .flags = NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .addresses = {.function = "btrfs_file_operations", .addr = 0}},
+ {.filesystem = NULL,
+ .optional_filesystem = NULL,
+ .family = NULL,
+ .objects = NULL,
+ .probe_links = NULL,
+ .flags = NETDATA_FILESYSTEM_FLAG_NO_PARTITION,
+ .enabled = CONFIG_BOOLEAN_YES,
+ .addresses = {.function = NULL, .addr = 0}}};
+
+struct netdata_static_thread filesystem_threads = {"EBPF FS READ",
+ NULL, NULL, 1, NULL,
+ NULL, NULL };
+
+static int read_thread_closed = 1;
+static netdata_syscall_stat_t filesystem_aggregated_data[NETDATA_EBPF_HIST_MAX_BINS];
+static netdata_publish_syscall_t filesystem_publish_aggregated[NETDATA_EBPF_HIST_MAX_BINS];
+
+char **dimensions = NULL;
+static netdata_idx_t *filesystem_hash_values = NULL;
+
+/*****************************************************************
+ *
+ * COMMON FUNCTIONS
+ *
+ *****************************************************************/
+
+/**
+ * Create Filesystem chart
+ *
+ * Create latency charts
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_obsolete_fs_charts(int update_every)
+{
+ int i;
+ uint32_t test = NETDATA_FILESYSTEM_FLAG_CHART_CREATED | NETDATA_FILESYSTEM_REMOVE_CHARTS;
+ for (i = 0; localfs[i].filesystem; i++) {
+ ebpf_filesystem_partitions_t *efp = &localfs[i];
+ uint32_t flags = efp->flags;
+ if ((flags & test) == test) {
+ flags &= ~NETDATA_FILESYSTEM_FLAG_CHART_CREATED;
+
+ ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hread.name,
+ efp->hread.title,
+ EBPF_COMMON_DIMENSION_CALL, efp->family_name,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hread.order, update_every);
+
+ ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name,
+ efp->hwrite.title,
+ EBPF_COMMON_DIMENSION_CALL, efp->family_name,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hwrite.order, update_every);
+
+ ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name, efp->hopen.title,
+ EBPF_COMMON_DIMENSION_CALL, efp->family_name,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hopen.order, update_every);
+
+ ebpf_write_chart_obsolete(NETDATA_FILESYSTEM_FAMILY, efp->hadditional.name, efp->hadditional.title,
+ EBPF_COMMON_DIMENSION_CALL, efp->family_name,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, efp->hadditional.order,
+ update_every);
+ }
+ efp->flags = flags;
+ }
+}
+
+/**
+ * Create Filesystem chart
+ *
+ * Create latency charts
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_create_fs_charts(int update_every)
+{
+ static int order = NETDATA_CHART_PRIO_EBPF_FILESYSTEM_CHARTS;
+ char chart_name[64], title[256], family[64];
+ int i;
+ uint32_t test = NETDATA_FILESYSTEM_FLAG_CHART_CREATED|NETDATA_FILESYSTEM_REMOVE_CHARTS;
+ for (i = 0; localfs[i].filesystem; i++) {
+ ebpf_filesystem_partitions_t *efp = &localfs[i];
+ uint32_t flags = efp->flags;
+ if (flags & NETDATA_FILESYSTEM_FLAG_HAS_PARTITION && !(flags & test)) {
+ snprintfz(title, 255, "%s latency for each read request.", efp->filesystem);
+ snprintfz(family, 63, "%s_latency", efp->family);
+ snprintfz(chart_name, 63, "%s_read_latency", efp->filesystem);
+ efp->hread.name = strdupz(chart_name);
+ efp->hread.title = strdupz(title);
+ efp->hread.order = order;
+ efp->family_name = strdupz(family);
+
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hread.name,
+ title,
+ EBPF_COMMON_DIMENSION_CALL, family,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
+ filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS,
+ update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM);
+ order++;
+
+ snprintfz(title, 255, "%s latency for each write request.", efp->filesystem);
+ snprintfz(chart_name, 63, "%s_write_latency", efp->filesystem);
+ efp->hwrite.name = strdupz(chart_name);
+ efp->hwrite.title = strdupz(title);
+ efp->hwrite.order = order;
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name,
+ title,
+ EBPF_COMMON_DIMENSION_CALL, family,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
+ filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS,
+ update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM);
+ order++;
+
+ snprintfz(title, 255, "%s latency for each open request.", efp->filesystem);
+ snprintfz(chart_name, 63, "%s_open_latency", efp->filesystem);
+ efp->hopen.name = strdupz(chart_name);
+ efp->hopen.title = strdupz(title);
+ efp->hopen.order = order;
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name,
+ title,
+ EBPF_COMMON_DIMENSION_CALL, family,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
+ filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS,
+ update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM);
+ order++;
+
+ char *type = (efp->flags & NETDATA_FILESYSTEM_ATTR_CHARTS) ? "attribute" : "sync";
+ snprintfz(title, 255, "%s latency for each %s request.", efp->filesystem, type);
+ snprintfz(chart_name, 63, "%s_%s_latency", efp->filesystem, type);
+ efp->hadditional.name = strdupz(chart_name);
+ efp->hadditional.title = strdupz(title);
+ efp->hadditional.order = order;
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY, efp->hadditional.name, title,
+ EBPF_COMMON_DIMENSION_CALL, family,
+ NULL, NETDATA_EBPF_CHART_TYPE_STACKED, order, ebpf_create_global_dimension,
+ filesystem_publish_aggregated, NETDATA_EBPF_HIST_MAX_BINS,
+ update_every, NETDATA_EBPF_MODULE_NAME_FILESYSTEM);
+ order++;
+ efp->flags |= NETDATA_FILESYSTEM_FLAG_CHART_CREATED;
+ }
+ }
+}
+
+/**
+ * Initialize eBPF data
+ *
+ * @param em main thread structure.
+ *
+ * @return it returns 0 on success and -1 otherwise.
+ */
+int ebpf_filesystem_initialize_ebpf_data(ebpf_module_t *em)
+{
+ int i;
+ const char *saved_name = em->thread_name;
+ for (i = 0; localfs[i].filesystem; i++) {
+ ebpf_filesystem_partitions_t *efp = &localfs[i];
+ if (!efp->probe_links && efp->flags & NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM) {
+ em->thread_name = efp->filesystem;
+ efp->probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &efp->objects);
+ if (!efp->probe_links) {
+ em->thread_name = saved_name;
+ return -1;
+ }
+ efp->flags |= NETDATA_FILESYSTEM_FLAG_HAS_PARTITION;
+
+ // Nedeed for filesystems like btrfs
+ if ((efp->flags & NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE) && (efp->addresses.function)) {
+ ebpf_load_addresses(&efp->addresses, fs_maps[i + 1].map_fd);
+ }
+ }
+ efp->flags &= ~NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM;
+ }
+ em->thread_name = saved_name;
+
+ if (!dimensions) {
+ dimensions = ebpf_fill_histogram_dimension(NETDATA_EBPF_HIST_MAX_BINS);
+
+ memset(filesystem_aggregated_data, 0 , NETDATA_EBPF_HIST_MAX_BINS * sizeof(netdata_syscall_stat_t));
+ memset(filesystem_publish_aggregated, 0 , NETDATA_EBPF_HIST_MAX_BINS * sizeof(netdata_publish_syscall_t));
+
+ filesystem_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
+ }
+
+ return 0;
+}
+
+/**
+ * Read Local partitions
+ *
+ * @return the total of partitions that will be monitored
+ */
+static int ebpf_read_local_partitions()
+{
+ char filename[FILENAME_MAX + 1];
+ snprintfz(filename, FILENAME_MAX, "%s/proc/self/mountinfo", netdata_configured_host_prefix);
+ procfile *ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) {
+ snprintfz(filename, FILENAME_MAX, "%s/proc/1/mountinfo", netdata_configured_host_prefix);
+ ff = procfile_open(filename, " \t", PROCFILE_FLAG_DEFAULT);
+ if(unlikely(!ff)) return 0;
+ }
+
+ ff = procfile_readall(ff);
+ if(unlikely(!ff))
+ return 0;
+
+ int count = 0;
+ unsigned long l, i, lines = procfile_lines(ff);
+ for (i = 0; localfs[i].filesystem; i++) {
+ localfs[i].flags |= NETDATA_FILESYSTEM_REMOVE_CHARTS;
+ }
+
+ for(l = 0; l < lines ; l++) {
+ // In "normal" situation the expected value is at column 7
+ // When `shared` options is added to mount information, the filesystem is at column 8
+ // Finally when we have systemd starting netdata, it will be at column 9
+ unsigned long index = procfile_linewords(ff, l) - 3;
+
+ char *fs = procfile_lineword(ff, l, index);
+
+ for (i = 0; localfs[i].filesystem; i++) {
+ ebpf_filesystem_partitions_t *w = &localfs[i];
+ if (w->enabled && (!strcmp(fs, w->filesystem) ||
+ (w->optional_filesystem && !strcmp(fs, w->optional_filesystem)))) {
+ localfs[i].flags |= NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM;
+ localfs[i].flags &= ~NETDATA_FILESYSTEM_REMOVE_CHARTS;
+ count++;
+ break;
+ }
+ }
+ }
+ procfile_close(ff);
+
+ return count;
+}
+
+/**
+ * Update partition
+ *
+ * Update the partition structures before to plot
+ *
+ * @param em main thread structure
+ *
+ * @return 0 on success and -1 otherwise.
+ */
+static int ebpf_update_partitions(ebpf_module_t *em)
+{
+ static time_t update_every = 0;
+ time_t curr = now_realtime_sec();
+ if (curr < update_every)
+ return 0;
+
+ update_every = curr + 5 * em->update_every;
+ if (!ebpf_read_local_partitions()) {
+ em->optional = -1;
+ return -1;
+ }
+
+ if (ebpf_filesystem_initialize_ebpf_data(em)) {
+ return -1;
+ }
+
+ return 0;
+}
+
+/*****************************************************************
+ *
+ * CLEANUP FUNCTIONS
+ *
+ *****************************************************************/
+
+/*
+ * Cleanup eBPF data
+ */
+void ebpf_filesystem_cleanup_ebpf_data()
+{
+ int i;
+ for (i = 0; localfs[i].filesystem; i++) {
+ ebpf_filesystem_partitions_t *efp = &localfs[i];
+ if (efp->probe_links) {
+ freez(efp->family_name);
+
+ freez(efp->hread.name);
+ freez(efp->hread.title);
+
+ freez(efp->hwrite.name);
+ freez(efp->hwrite.title);
+
+ freez(efp->hopen.name);
+ freez(efp->hopen.title);
+
+ freez(efp->hadditional.name);
+ freez(efp->hadditional.title);
+
+ struct bpf_link **probe_links = efp->probe_links;
+ size_t j = 0 ;
+ struct bpf_program *prog;
+ bpf_object__for_each_program(prog, efp->objects) {
+ bpf_link__destroy(probe_links[j]);
+ j++;
+ }
+ bpf_object__close(efp->objects);
+ }
+ }
+}
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_filesystem_cleanup(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled)
+ return;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 2*USEC_PER_MS;
+ while (!read_thread_closed) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ freez(filesystem_threads.thread);
+ ebpf_cleanup_publish_syscall(filesystem_publish_aggregated);
+
+ ebpf_filesystem_cleanup_ebpf_data();
+ if (dimensions)
+ ebpf_histogram_dimension_cleanup(dimensions, NETDATA_EBPF_HIST_MAX_BINS);
+ freez(filesystem_hash_values);
+}
+
+/*****************************************************************
+ *
+ * MAIN THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Select hist
+ *
+ * Select a histogram to store data.
+ *
+ * @param efp pointer for the structure with pointers.
+ * @param id histogram selector
+ *
+ * @return It returns a pointer for the histogram
+ */
+static inline netdata_ebpf_histogram_t *select_hist(ebpf_filesystem_partitions_t *efp, uint32_t *idx, uint32_t id)
+{
+ if (id < NETDATA_KEY_CALLS_READ) {
+ *idx = id;
+ return &efp->hread;
+ } else if (id < NETDATA_KEY_CALLS_WRITE) {
+ *idx = id - NETDATA_KEY_CALLS_READ;
+ return &efp->hwrite;
+ } else if (id < NETDATA_KEY_CALLS_OPEN) {
+ *idx = id - NETDATA_KEY_CALLS_WRITE;
+ return &efp->hopen;
+ } else if (id < NETDATA_KEY_CALLS_SYNC ){
+ *idx = id - NETDATA_KEY_CALLS_OPEN;
+ return &efp->hadditional;
+ }
+
+ return NULL;
+}
+
+/**
+ * Read hard disk table
+ *
+ * @param table index for the hash table
+ *
+ * Read the table with number of calls for all functions
+ */
+static void read_filesystem_table(ebpf_filesystem_partitions_t *efp, int fd)
+{
+ netdata_idx_t *values = filesystem_hash_values;
+ uint32_t key;
+ uint32_t idx;
+ for (key = 0; key < NETDATA_KEY_CALLS_SYNC; key++) {
+ netdata_ebpf_histogram_t *w = select_hist(efp, &idx, key);
+ if (!w) {
+ continue;
+ }
+
+ int test = bpf_map_lookup_elem(fd, &key, values);
+ if (test < 0) {
+ continue;
+ }
+
+ uint64_t total = 0;
+ int i;
+ int end = ebpf_nprocs;
+ for (i = 0; i < end; i++) {
+ total += values[i];
+ }
+
+ if (idx >= NETDATA_EBPF_HIST_MAX_BINS)
+ idx = NETDATA_EBPF_HIST_MAX_BINS - 1;
+ w->histogram[idx] = total;
+ }
+}
+
+/**
+ * Read hard disk table
+ *
+ * @param table index for the hash table
+ *
+ * Read the table with number of calls for all functions
+ */
+static void read_filesystem_tables()
+{
+ int i;
+ for (i = 0; localfs[i].filesystem; i++) {
+ ebpf_filesystem_partitions_t *efp = &localfs[i];
+ if (efp->flags & NETDATA_FILESYSTEM_FLAG_HAS_PARTITION) {
+ read_filesystem_table(efp, fs_maps[i].map_fd);
+ }
+ }
+}
+
+/**
+ * Socket read hash
+ *
+ * This is the thread callback.
+ * This thread is necessary, because we cannot freeze the whole plugin to read the data on very busy socket.
+ *
+ * @param ptr It is a NULL value for this thread.
+ *
+ * @return It always returns NULL.
+ */
+void *ebpf_filesystem_read_hash(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ read_thread_closed = 0;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ usec_t step = NETDATA_FILESYSTEM_READ_SLEEP_MS * em->update_every;
+ int update_every = em->update_every;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ (void)dt;
+
+ (void) ebpf_update_partitions(em);
+ ebpf_obsolete_fs_charts(update_every);
+
+ // No more partitions, it is not necessary to read tables
+ if (em->optional)
+ continue;
+
+ read_filesystem_tables();
+ }
+
+ read_thread_closed = 1;
+ return NULL;
+}
+
+/**
+ * Send Hard disk data
+ *
+ * Send hard disk information to Netdata.
+ */
+static void ebpf_histogram_send_data()
+{
+ uint32_t i;
+ uint32_t test = NETDATA_FILESYSTEM_FLAG_HAS_PARTITION | NETDATA_FILESYSTEM_REMOVE_CHARTS;
+ for (i = 0; localfs[i].filesystem; i++) {
+ ebpf_filesystem_partitions_t *efp = &localfs[i];
+ if ((efp->flags & test) == NETDATA_FILESYSTEM_FLAG_HAS_PARTITION) {
+ write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hread.name,
+ efp->hread.histogram, dimensions, NETDATA_EBPF_HIST_MAX_BINS);
+
+ write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hwrite.name,
+ efp->hwrite.histogram, dimensions, NETDATA_EBPF_HIST_MAX_BINS);
+
+ write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hopen.name,
+ efp->hopen.histogram, dimensions, NETDATA_EBPF_HIST_MAX_BINS);
+
+ write_histogram_chart(NETDATA_FILESYSTEM_FAMILY, efp->hadditional.name,
+ efp->hadditional.histogram, dimensions, NETDATA_EBPF_HIST_MAX_BINS);
+ }
+ }
+}
+
+/**
+ * Main loop for this collector.
+ *
+ * @param em main structure for this thread
+ */
+static void filesystem_collector(ebpf_module_t *em)
+{
+ filesystem_threads.thread = mallocz(sizeof(netdata_thread_t));
+ filesystem_threads.start_routine = ebpf_filesystem_read_hash;
+
+ netdata_thread_create(filesystem_threads.thread, filesystem_threads.name,
+ NETDATA_THREAD_OPTION_JOINABLE, ebpf_filesystem_read_hash, em);
+
+ int update_every = em->update_every;
+ int counter = update_every - 1;
+ while (!close_ebpf_plugin || em->optional) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ if (++counter == update_every) {
+ counter = 0;
+ pthread_mutex_lock(&lock);
+
+ ebpf_create_fs_charts(update_every);
+ ebpf_histogram_send_data();
+
+ pthread_mutex_unlock(&lock);
+ }
+
+ pthread_mutex_unlock(&collect_data_mutex);
+ }
+}
+
+/*****************************************************************
+ *
+ * ENTRY THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Update Filesystem
+ *
+ * Update file system structure using values read from configuration file.
+ */
+static void ebpf_update_filesystem()
+{
+ char dist[NETDATA_FS_MAX_DIST_NAME + 1];
+ int i;
+ for (i = 0; localfs[i].filesystem; i++) {
+ snprintfz(dist, NETDATA_FS_MAX_DIST_NAME, "%sdist", localfs[i].filesystem);
+
+ localfs[i].enabled = appconfig_get_boolean(&fs_config, NETDATA_FILESYSTEM_CONFIG_NAME, dist,
+ CONFIG_BOOLEAN_YES);
+ }
+}
+
+/**
+ * Filesystem thread
+ *
+ * Thread used to generate socket charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_filesystem_thread(void *ptr)
+{
+ netdata_thread_cleanup_push(ebpf_filesystem_cleanup, ptr);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ em->maps = fs_maps;
+ ebpf_update_filesystem();
+
+ if (!em->enabled)
+ goto endfilesystem;
+
+ // Initialize optional as zero, to identify when there are not partitions to monitor
+ em->optional = 0;
+
+ if (ebpf_update_partitions(em)) {
+ if (em->optional)
+ info("Netdata cannot monitor the filesystems used on this host.");
+
+ em->enabled = 0;
+ goto endfilesystem;
+ }
+
+ int algorithms[NETDATA_EBPF_HIST_MAX_BINS];
+ ebpf_fill_algorithms(algorithms, NETDATA_EBPF_HIST_MAX_BINS, NETDATA_EBPF_INCREMENTAL_IDX);
+ ebpf_global_labels(filesystem_aggregated_data, filesystem_publish_aggregated, dimensions, dimensions,
+ algorithms, NETDATA_EBPF_HIST_MAX_BINS);
+
+ pthread_mutex_lock(&lock);
+ ebpf_create_fs_charts(em->update_every);
+ pthread_mutex_unlock(&lock);
+
+ filesystem_collector(em);
+
+endfilesystem:
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_filesystem.h b/collectors/ebpf.plugin/ebpf_filesystem.h
new file mode 100644
index 000000000..295eec205
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_filesystem.h
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_FILESYSTEM_H
+#define NETDATA_EBPF_FILESYSTEM_H 1
+
+// Module name
+#define NETDATA_EBPF_MODULE_NAME_FILESYSTEM "filesystem"
+
+#include "ebpf.h"
+
+#define NETDATA_FS_MAX_DIST_NAME 64UL
+
+#define NETDATA_FILESYSTEM_CONFIG_NAME "filesystem"
+#define NETDATA_FILESYSTEM_READ_SLEEP_MS 600000ULL
+
+// Process configuration name
+#define NETDATA_FILESYSTEM_CONFIG_FILE "filesystem.conf"
+
+typedef struct netdata_fs_hist {
+ uint32_t hist_id;
+ uint32_t bin;
+} netdata_fs_hist_t;
+
+enum filesystem_limit {
+ NETDATA_KEY_CALLS_READ = 24,
+ NETDATA_KEY_CALLS_WRITE = 48,
+ NETDATA_KEY_CALLS_OPEN = 72,
+ NETDATA_KEY_CALLS_SYNC = 96
+};
+
+enum netdata_filesystem_flags {
+ NETDATA_FILESYSTEM_FLAG_NO_PARTITION = 0,
+ NETDATA_FILESYSTEM_LOAD_EBPF_PROGRAM = 1,
+ NETDATA_FILESYSTEM_FLAG_HAS_PARTITION = 2,
+ NETDATA_FILESYSTEM_FLAG_CHART_CREATED = 4,
+ NETDATA_FILESYSTEM_FILL_ADDRESS_TABLE = 8,
+ NETDATA_FILESYSTEM_REMOVE_CHARTS = 16,
+ NETDATA_FILESYSTEM_ATTR_CHARTS = 32
+};
+
+enum netdata_filesystem_table {
+ NETDATA_MAIN_FS_TABLE,
+ NETDATA_ADDR_FS_TABLE
+};
+
+typedef struct ebpf_filesystem_partitions {
+ char *filesystem;
+ char *optional_filesystem;
+ char *family;
+ char *family_name;
+ struct bpf_object *objects;
+ struct bpf_link **probe_links;
+
+ netdata_ebpf_histogram_t hread;
+ netdata_ebpf_histogram_t hwrite;
+ netdata_ebpf_histogram_t hopen;
+ netdata_ebpf_histogram_t hadditional;
+
+ uint32_t flags;
+ uint32_t enabled;
+
+ ebpf_addresses_t addresses;
+} ebpf_filesystem_partitions_t;
+
+extern void *ebpf_filesystem_thread(void *ptr);
+extern struct config fs_config;
+
+#endif /* NETDATA_EBPF_FILESYSTEM_H */
diff --git a/collectors/ebpf.plugin/ebpf_hardirq.c b/collectors/ebpf.plugin/ebpf_hardirq.c
new file mode 100644
index 000000000..ff649e9cd
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_hardirq.c
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ebpf.h"
+#include "ebpf_hardirq.h"
+
+struct config hardirq_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+#define HARDIRQ_MAP_LATENCY 0
+#define HARDIRQ_MAP_LATENCY_STATIC 1
+static ebpf_local_maps_t hardirq_maps[] = {
+ {
+ .name = "tbl_hardirq",
+ .internal_input = NETDATA_HARDIRQ_MAX_IRQS,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ },
+ {
+ .name = "tbl_hardirq_static",
+ .internal_input = HARDIRQ_EBPF_STATIC_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ },
+ /* end */
+ {
+ .name = NULL,
+ .internal_input = 0,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ }
+};
+
+#define HARDIRQ_TP_CLASS_IRQ "irq"
+#define HARDIRQ_TP_CLASS_IRQ_VECTORS "irq_vectors"
+static ebpf_tracepoint_t hardirq_tracepoints[] = {
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ, .event = "irq_handler_entry"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ, .event = "irq_handler_exit"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "thermal_apic_entry"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "thermal_apic_exit"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "threshold_apic_entry"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "threshold_apic_exit"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "error_apic_entry"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "error_apic_exit"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "deferred_error_apic_entry"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "deferred_error_apic_exit"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "spurious_apic_entry"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "spurious_apic_exit"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "call_function_entry"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "call_function_exit"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "call_function_single_entry"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "call_function_single_exit"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "reschedule_entry"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "reschedule_exit"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "local_timer_entry"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "local_timer_exit"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "irq_work_entry"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "irq_work_exit"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "x86_platform_ipi_entry"},
+ {.enabled = false, .class = HARDIRQ_TP_CLASS_IRQ_VECTORS, .event = "x86_platform_ipi_exit"},
+ /* end */
+ {.enabled = false, .class = NULL, .event = NULL}
+};
+
+static hardirq_static_val_t hardirq_static_vals[] = {
+ {
+ .idx = HARDIRQ_EBPF_STATIC_APIC_THERMAL,
+ .name = "apic_thermal",
+ .latency = 0
+ },
+ {
+ .idx = HARDIRQ_EBPF_STATIC_APIC_THRESHOLD,
+ .name = "apic_threshold",
+ .latency = 0
+ },
+ {
+ .idx = HARDIRQ_EBPF_STATIC_APIC_ERROR,
+ .name = "apic_error",
+ .latency = 0
+ },
+ {
+ .idx = HARDIRQ_EBPF_STATIC_APIC_DEFERRED_ERROR,
+ .name = "apic_deferred_error",
+ .latency = 0
+ },
+ {
+ .idx = HARDIRQ_EBPF_STATIC_APIC_SPURIOUS,
+ .name = "apic_spurious",
+ .latency = 0
+ },
+ {
+ .idx = HARDIRQ_EBPF_STATIC_FUNC_CALL,
+ .name = "func_call",
+ .latency = 0
+ },
+ {
+ .idx = HARDIRQ_EBPF_STATIC_FUNC_CALL_SINGLE,
+ .name = "func_call_single",
+ .latency = 0
+ },
+ {
+ .idx = HARDIRQ_EBPF_STATIC_RESCHEDULE,
+ .name = "reschedule",
+ .latency = 0
+ },
+ {
+ .idx = HARDIRQ_EBPF_STATIC_LOCAL_TIMER,
+ .name = "local_timer",
+ .latency = 0
+ },
+ {
+ .idx = HARDIRQ_EBPF_STATIC_IRQ_WORK,
+ .name = "irq_work",
+ .latency = 0
+ },
+ {
+ .idx = HARDIRQ_EBPF_STATIC_X86_PLATFORM_IPI,
+ .name = "x86_platform_ipi",
+ .latency = 0
+ },
+};
+
+static struct bpf_link **probe_links = NULL;
+static struct bpf_object *objects = NULL;
+
+static int read_thread_closed = 1;
+
+// store for "published" data from the reader thread, which the collector
+// thread will write to netdata agent.
+static avl_tree_lock hardirq_pub;
+
+// tmp store for dynamic hard IRQ values we get from a per-CPU eBPF map.
+static hardirq_ebpf_val_t *hardirq_ebpf_vals = NULL;
+
+// tmp store for static hard IRQ values we get from a per-CPU eBPF map.
+static hardirq_ebpf_static_val_t *hardirq_ebpf_static_vals = NULL;
+
+static struct netdata_static_thread hardirq_threads = {"HARDIRQ KERNEL",
+ NULL, NULL, 1, NULL,
+ NULL, NULL };
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void hardirq_cleanup(void *ptr)
+{
+ for (int i = 0; hardirq_tracepoints[i].class != NULL; i++) {
+ ebpf_disable_tracepoint(&hardirq_tracepoints[i]);
+ }
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled) {
+ return;
+ }
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 1 * USEC_PER_MS;
+ while (!read_thread_closed) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ freez(hardirq_ebpf_vals);
+ freez(hardirq_ebpf_static_vals);
+ freez(hardirq_threads.thread);
+
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+ }
+}
+
+/*****************************************************************
+ * MAIN LOOP
+ *****************************************************************/
+
+/**
+ * Compare hard IRQ values.
+ *
+ * @param a `hardirq_val_t *`.
+ * @param b `hardirq_val_t *`.
+ *
+ * @return 0 if a==b, 1 if a>b, -1 if a<b.
+*/
+static int hardirq_val_cmp(void *a, void *b)
+{
+ hardirq_val_t *ptr1 = a;
+ hardirq_val_t *ptr2 = b;
+
+ if (ptr1->irq > ptr2->irq) {
+ return 1;
+ }
+ else if (ptr1->irq < ptr2->irq) {
+ return -1;
+ }
+ else {
+ return 0;
+ }
+}
+
+static void hardirq_read_latency_map(int mapfd)
+{
+ hardirq_ebpf_key_t key = {};
+ hardirq_ebpf_key_t next_key = {};
+ hardirq_val_t search_v = {};
+ hardirq_val_t *v = NULL;
+
+ while (bpf_map_get_next_key(mapfd, &key, &next_key) == 0) {
+ // get val for this key.
+ int test = bpf_map_lookup_elem(mapfd, &key, hardirq_ebpf_vals);
+ if (unlikely(test < 0)) {
+ key = next_key;
+ continue;
+ }
+
+ // is this IRQ saved yet?
+ //
+ // if not, make a new one, mark it as unsaved for now, and continue; we
+ // will insert it at the end after all of its values are correctly set,
+ // so that we can safely publish it to the collector within a single,
+ // short locked operation.
+ //
+ // otherwise simply continue; we will only update the latency, which
+ // can be republished safely without a lock.
+ //
+ // NOTE: lock isn't strictly necessary for this initial search, as only
+ // this thread does writing, but the AVL is using a read-write lock so
+ // there is no congestion.
+ bool v_is_new = false;
+ search_v.irq = key.irq;
+ v = (hardirq_val_t *)avl_search_lock(&hardirq_pub, (avl_t *)&search_v);
+ if (unlikely(v == NULL)) {
+ // latency/name can only be added reliably at a later time.
+ // when they're added, only then will we AVL insert.
+ v = callocz(1, sizeof(hardirq_val_t));
+ v->irq = key.irq;
+ v->dim_exists = false;
+
+ v_is_new = true;
+ }
+
+ // note two things:
+ // 1. we must add up latency value for this IRQ across all CPUs.
+ // 2. the name is unfortunately *not* available on all CPU maps - only
+ // a single map contains the name, so we must find it. we only need
+ // to copy it though if the IRQ is new for us.
+ bool name_saved = false;
+ uint64_t total_latency = 0;
+ int i;
+ int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+ for (i = 0; i < end; i++) {
+ total_latency += hardirq_ebpf_vals[i].latency/1000;
+
+ // copy name for new IRQs.
+ if (v_is_new && !name_saved && hardirq_ebpf_vals[i].name[0] != '\0') {
+ strncpyz(
+ v->name,
+ hardirq_ebpf_vals[i].name,
+ NETDATA_HARDIRQ_NAME_LEN
+ );
+ name_saved = true;
+ }
+ }
+
+ // can now safely publish latency for existing IRQs.
+ v->latency = total_latency;
+
+ // can now safely publish new IRQ.
+ if (v_is_new) {
+ avl_t *check = avl_insert_lock(&hardirq_pub, (avl_t *)v);
+ if (check != (avl_t *)v) {
+ error("Internal error, cannot insert the AVL tree.");
+ }
+ }
+
+ key = next_key;
+ }
+}
+
+static void hardirq_read_latency_static_map(int mapfd)
+{
+ uint32_t i;
+ for (i = 0; i < HARDIRQ_EBPF_STATIC_END; i++) {
+ uint32_t map_i = hardirq_static_vals[i].idx;
+ int test = bpf_map_lookup_elem(mapfd, &map_i, hardirq_ebpf_static_vals);
+ if (unlikely(test < 0)) {
+ continue;
+ }
+
+ uint64_t total_latency = 0;
+ int cpu_i;
+ int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+ for (cpu_i = 0; cpu_i < end; cpu_i++) {
+ total_latency += hardirq_ebpf_static_vals[cpu_i].latency/1000;
+ }
+
+ hardirq_static_vals[i].latency = total_latency;
+ }
+}
+
+/**
+ * Read eBPF maps for hard IRQ.
+ */
+static void *hardirq_reader(void *ptr)
+{
+ read_thread_closed = 0;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+
+ usec_t step = NETDATA_HARDIRQ_SLEEP_MS * em->update_every;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ UNUSED(dt);
+
+ hardirq_read_latency_map(hardirq_maps[HARDIRQ_MAP_LATENCY].map_fd);
+ hardirq_read_latency_static_map(hardirq_maps[HARDIRQ_MAP_LATENCY_STATIC].map_fd);
+ }
+
+ read_thread_closed = 1;
+ return NULL;
+}
+
+static void hardirq_create_charts(int update_every)
+{
+ ebpf_create_chart(
+ NETDATA_EBPF_SYSTEM_GROUP,
+ "hardirq_latency",
+ "Hardware IRQ latency",
+ EBPF_COMMON_DIMENSION_MILLISECONDS,
+ "interrupts",
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ NETDATA_CHART_PRIO_HARDIRQ_LATENCY,
+ NULL, NULL, 0, update_every,
+ NETDATA_EBPF_MODULE_NAME_HARDIRQ
+ );
+
+ fflush(stdout);
+}
+
+static void hardirq_create_static_dims()
+{
+ uint32_t i;
+ for (i = 0; i < HARDIRQ_EBPF_STATIC_END; i++) {
+ ebpf_write_global_dimension(
+ hardirq_static_vals[i].name, hardirq_static_vals[i].name,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]
+ );
+ }
+}
+
+// callback for avl tree traversal on `hardirq_pub`.
+static int hardirq_write_dims(void *entry, void *data)
+{
+ UNUSED(data);
+
+ hardirq_val_t *v = entry;
+
+ // IRQs get dynamically added in, so add the dimension if we haven't yet.
+ if (!v->dim_exists) {
+ ebpf_write_global_dimension(
+ v->name, v->name,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]
+ );
+ v->dim_exists = true;
+ }
+
+ write_chart_dimension(v->name, v->latency);
+
+ return 1;
+}
+
+static inline void hardirq_write_static_dims()
+{
+ uint32_t i;
+ for (i = 0; i < HARDIRQ_EBPF_STATIC_END; i++) {
+ write_chart_dimension(
+ hardirq_static_vals[i].name,
+ hardirq_static_vals[i].latency
+ );
+ }
+}
+
+/**
+* Main loop for this collector.
+*/
+static void hardirq_collector(ebpf_module_t *em)
+{
+ hardirq_ebpf_vals = callocz(
+ (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs,
+ sizeof(hardirq_ebpf_val_t)
+ );
+ hardirq_ebpf_static_vals = callocz(
+ (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs,
+ sizeof(hardirq_ebpf_static_val_t)
+ );
+
+ avl_init_lock(&hardirq_pub, hardirq_val_cmp);
+
+ // create reader thread.
+ hardirq_threads.thread = mallocz(sizeof(netdata_thread_t));
+ hardirq_threads.start_routine = hardirq_reader;
+ netdata_thread_create(
+ hardirq_threads.thread,
+ hardirq_threads.name,
+ NETDATA_THREAD_OPTION_JOINABLE,
+ hardirq_reader,
+ em
+ );
+
+ // create chart and static dims.
+ pthread_mutex_lock(&lock);
+ hardirq_create_charts(em->update_every);
+ hardirq_create_static_dims();
+ pthread_mutex_unlock(&lock);
+
+ // loop and read from published data until ebpf plugin is closed.
+ int update_every = em->update_every;
+ int counter = update_every - 1;
+ while (!close_ebpf_plugin) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ if (++counter == update_every) {
+ counter = 0;
+ pthread_mutex_lock(&lock);
+
+ // write dims now for all hitherto discovered IRQs.
+ write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "hardirq_latency");
+ avl_traverse_lock(&hardirq_pub, hardirq_write_dims, NULL);
+ hardirq_write_static_dims();
+ write_end_chart();
+
+ pthread_mutex_unlock(&lock);
+ }
+
+ pthread_mutex_unlock(&collect_data_mutex);
+ }
+}
+
+/*****************************************************************
+ * EBPF HARDIRQ THREAD
+ *****************************************************************/
+
+/**
+ * Hard IRQ latency thread.
+ *
+ * @param ptr a `ebpf_module_t *`.
+ * @return always NULL.
+ */
+void *ebpf_hardirq_thread(void *ptr)
+{
+ netdata_thread_cleanup_push(hardirq_cleanup, ptr);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ em->maps = hardirq_maps;
+
+ if (!em->enabled) {
+ goto endhardirq;
+ }
+
+ if (ebpf_enable_tracepoints(hardirq_tracepoints) == 0) {
+ em->enabled = CONFIG_BOOLEAN_NO;
+ goto endhardirq;
+ }
+
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ if (!probe_links) {
+ goto endhardirq;
+ }
+
+ hardirq_collector(em);
+
+endhardirq:
+ netdata_thread_cleanup_pop(1);
+
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_hardirq.h b/collectors/ebpf.plugin/ebpf_hardirq.h
new file mode 100644
index 000000000..4c8a7a098
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_hardirq.h
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_HARDIRQ_H
+#define NETDATA_EBPF_HARDIRQ_H 1
+
+/*****************************************************************
+ * copied from kernel-collectors repo, with modifications needed
+ * for inclusion here.
+ *****************************************************************/
+
+#define NETDATA_HARDIRQ_NAME_LEN 32
+#define NETDATA_HARDIRQ_MAX_IRQS 1024L
+
+typedef struct hardirq_ebpf_key {
+ int irq;
+} hardirq_ebpf_key_t;
+
+typedef struct hardirq_ebpf_val {
+ uint64_t latency;
+ uint64_t ts;
+ char name[NETDATA_HARDIRQ_NAME_LEN];
+} hardirq_ebpf_val_t;
+
+enum hardirq_ebpf_static {
+ HARDIRQ_EBPF_STATIC_APIC_THERMAL,
+ HARDIRQ_EBPF_STATIC_APIC_THRESHOLD,
+ HARDIRQ_EBPF_STATIC_APIC_ERROR,
+ HARDIRQ_EBPF_STATIC_APIC_DEFERRED_ERROR,
+ HARDIRQ_EBPF_STATIC_APIC_SPURIOUS,
+ HARDIRQ_EBPF_STATIC_FUNC_CALL,
+ HARDIRQ_EBPF_STATIC_FUNC_CALL_SINGLE,
+ HARDIRQ_EBPF_STATIC_RESCHEDULE,
+ HARDIRQ_EBPF_STATIC_LOCAL_TIMER,
+ HARDIRQ_EBPF_STATIC_IRQ_WORK,
+ HARDIRQ_EBPF_STATIC_X86_PLATFORM_IPI,
+
+ HARDIRQ_EBPF_STATIC_END
+};
+
+typedef struct hardirq_ebpf_static_val {
+ uint64_t latency;
+ uint64_t ts;
+} hardirq_ebpf_static_val_t;
+
+/*****************************************************************
+ * below this is eBPF plugin-specific code.
+ *****************************************************************/
+
+#define NETDATA_EBPF_MODULE_NAME_HARDIRQ "hardirq"
+#define NETDATA_HARDIRQ_SLEEP_MS 650000ULL
+#define NETDATA_HARDIRQ_CONFIG_FILE "hardirq.conf"
+
+typedef struct hardirq_val {
+ // must be at top for simplified AVL tree usage.
+ // if it's not at the top, we need to use `containerof` for almost all ops.
+ avl_t avl;
+
+ int irq;
+ bool dim_exists; // keep this after `int irq` for alignment byte savings.
+ uint64_t latency;
+ char name[NETDATA_HARDIRQ_NAME_LEN];
+} hardirq_val_t;
+
+typedef struct hardirq_static_val {
+ enum hardirq_ebpf_static idx;
+ char *name;
+ uint64_t latency;
+} hardirq_static_val_t;
+
+extern struct config hardirq_config;
+extern void *ebpf_hardirq_thread(void *ptr);
+
+#endif /* NETDATA_EBPF_HARDIRQ_H */
diff --git a/collectors/ebpf.plugin/ebpf_mdflush.c b/collectors/ebpf.plugin/ebpf_mdflush.c
new file mode 100644
index 000000000..e2420ec8e
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_mdflush.c
@@ -0,0 +1,312 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ebpf.h"
+#include "ebpf_mdflush.h"
+
+struct config mdflush_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+#define MDFLUSH_MAP_COUNT 0
+static ebpf_local_maps_t mdflush_maps[] = {
+ {
+ .name = "tbl_mdflush",
+ .internal_input = 1024,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ },
+ /* end */
+ {
+ .name = NULL,
+ .internal_input = 0,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ }
+};
+
+// store for "published" data from the reader thread, which the collector
+// thread will write to netdata agent.
+static avl_tree_lock mdflush_pub;
+
+// tmp store for mdflush values we get from a per-CPU eBPF map.
+static mdflush_ebpf_val_t *mdflush_ebpf_vals = NULL;
+
+static struct bpf_link **probe_links = NULL;
+static struct bpf_object *objects = NULL;
+
+static int read_thread_closed = 1;
+
+static struct netdata_static_thread mdflush_threads = {"MDFLUSH KERNEL",
+ NULL, NULL, 1, NULL,
+ NULL, NULL };
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void mdflush_cleanup(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled) {
+ return;
+ }
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 1 * USEC_PER_MS;
+ while (!read_thread_closed) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ freez(mdflush_ebpf_vals);
+ freez(mdflush_threads.thread);
+
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+ }
+}
+
+/**
+ * Compare mdflush values.
+ *
+ * @param a `netdata_mdflush_t *`.
+ * @param b `netdata_mdflush_t *`.
+ *
+ * @return 0 if a==b, 1 if a>b, -1 if a<b.
+*/
+static int mdflush_val_cmp(void *a, void *b)
+{
+ netdata_mdflush_t *ptr1 = a;
+ netdata_mdflush_t *ptr2 = b;
+
+ if (ptr1->unit > ptr2->unit) {
+ return 1;
+ }
+ else if (ptr1->unit < ptr2->unit) {
+ return -1;
+ }
+ else {
+ return 0;
+ }
+}
+
+static void mdflush_read_count_map()
+{
+ int mapfd = mdflush_maps[MDFLUSH_MAP_COUNT].map_fd;
+ mdflush_ebpf_key_t curr_key = (uint32_t)-1;
+ mdflush_ebpf_key_t key = (uint32_t)-1;
+ netdata_mdflush_t search_v;
+ netdata_mdflush_t *v = NULL;
+
+ while (bpf_map_get_next_key(mapfd, &curr_key, &key) == 0) {
+ curr_key = key;
+
+ // get val for this key.
+ int test = bpf_map_lookup_elem(mapfd, &key, mdflush_ebpf_vals);
+ if (unlikely(test < 0)) {
+ continue;
+ }
+
+ // is this record saved yet?
+ //
+ // if not, make a new one, mark it as unsaved for now, and continue; we
+ // will insert it at the end after all of its values are correctly set,
+ // so that we can safely publish it to the collector within a single,
+ // short locked operation.
+ //
+ // otherwise simply continue; we will only update the flush count,
+ // which can be republished safely without a lock.
+ //
+ // NOTE: lock isn't strictly necessary for this initial search, as only
+ // this thread does writing, but the AVL is using a read-write lock so
+ // there is no congestion.
+ bool v_is_new = false;
+ search_v.unit = key;
+ v = (netdata_mdflush_t *)avl_search_lock(
+ &mdflush_pub,
+ (avl_t *)&search_v
+ );
+ if (unlikely(v == NULL)) {
+ // flush count can only be added reliably at a later time.
+ // when they're added, only then will we AVL insert.
+ v = callocz(1, sizeof(netdata_mdflush_t));
+ v->unit = key;
+ sprintf(v->disk_name, "md%u", key);
+ v->dim_exists = false;
+
+ v_is_new = true;
+ }
+
+ // we must add up count value for this record across all CPUs.
+ uint64_t total_cnt = 0;
+ int i;
+ int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+ for (i = 0; i < end; i++) {
+ total_cnt += mdflush_ebpf_vals[i];
+ }
+
+ // can now safely publish count for existing records.
+ v->cnt = total_cnt;
+
+ // can now safely publish new record.
+ if (v_is_new) {
+ avl_t *check = avl_insert_lock(&mdflush_pub, (avl_t *)v);
+ if (check != (avl_t *)v) {
+ error("Internal error, cannot insert the AVL tree.");
+ }
+ }
+ }
+}
+
+/**
+ * Read eBPF maps for mdflush.
+ */
+static void *mdflush_reader(void *ptr)
+{
+ read_thread_closed = 0;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+
+ usec_t step = NETDATA_MDFLUSH_SLEEP_MS * em->update_every;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ UNUSED(dt);
+
+ mdflush_read_count_map();
+ }
+
+ read_thread_closed = 1;
+ return NULL;
+}
+
+static void mdflush_create_charts(int update_every)
+{
+ ebpf_create_chart(
+ "mdstat",
+ "mdstat_flush",
+ "MD flushes",
+ "flushes",
+ "flush (eBPF)",
+ "md.flush",
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ NETDATA_CHART_PRIO_MDSTAT_FLUSH,
+ NULL, NULL, 0, update_every,
+ NETDATA_EBPF_MODULE_NAME_MDFLUSH
+ );
+
+ fflush(stdout);
+}
+
+// callback for avl tree traversal on `mdflush_pub`.
+static int mdflush_write_dims(void *entry, void *data)
+{
+ UNUSED(data);
+
+ netdata_mdflush_t *v = entry;
+
+ // records get dynamically added in, so add the dim if we haven't yet.
+ if (!v->dim_exists) {
+ ebpf_write_global_dimension(
+ v->disk_name, v->disk_name,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]
+ );
+ v->dim_exists = true;
+ }
+
+ write_chart_dimension(v->disk_name, v->cnt);
+
+ return 1;
+}
+
+/**
+* Main loop for this collector.
+*/
+static void mdflush_collector(ebpf_module_t *em)
+{
+ mdflush_ebpf_vals = callocz(ebpf_nprocs, sizeof(mdflush_ebpf_val_t));
+
+ avl_init_lock(&mdflush_pub, mdflush_val_cmp);
+
+ // create reader thread.
+ mdflush_threads.thread = mallocz(sizeof(netdata_thread_t));
+ mdflush_threads.start_routine = mdflush_reader;
+ netdata_thread_create(
+ mdflush_threads.thread,
+ mdflush_threads.name,
+ NETDATA_THREAD_OPTION_JOINABLE,
+ mdflush_reader,
+ em
+ );
+
+ // create chart and static dims.
+ pthread_mutex_lock(&lock);
+ mdflush_create_charts(em->update_every);
+ pthread_mutex_unlock(&lock);
+
+ // loop and read from published data until ebpf plugin is closed.
+ int update_every = em->update_every;
+ int counter = update_every - 1;
+ while (!close_ebpf_plugin) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ if (++counter == update_every) {
+ counter = 0;
+ pthread_mutex_lock(&lock);
+
+ // write dims now for all hitherto discovered devices.
+ write_begin_chart("mdstat", "mdstat_flush");
+ avl_traverse_lock(&mdflush_pub, mdflush_write_dims, NULL);
+ write_end_chart();
+
+ pthread_mutex_unlock(&lock);
+ }
+
+ pthread_mutex_unlock(&collect_data_mutex);
+ }
+}
+
+/**
+ * mdflush thread.
+ *
+ * @param ptr a `ebpf_module_t *`.
+ * @return always NULL.
+ */
+void *ebpf_mdflush_thread(void *ptr)
+{
+ netdata_thread_cleanup_push(mdflush_cleanup, ptr);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ em->maps = mdflush_maps;
+
+ if (!em->enabled) {
+ goto endmdflush;
+ }
+
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ if (!probe_links) {
+ goto endmdflush;
+ }
+
+ mdflush_collector(em);
+
+endmdflush:
+ netdata_thread_cleanup_pop(1);
+
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_mdflush.h b/collectors/ebpf.plugin/ebpf_mdflush.h
new file mode 100644
index 000000000..59856ad67
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_mdflush.h
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_MDFLUSH_H
+#define NETDATA_EBPF_MDFLUSH_H 1
+
+// Module name
+#define NETDATA_EBPF_MODULE_NAME_MDFLUSH "mdflush"
+
+#define NETDATA_MDFLUSH_SLEEP_MS 850000ULL
+
+// charts
+#define NETDATA_MDFLUSH_GLOBAL_CHART "mdflush"
+
+// configuration file
+#define NETDATA_DIRECTORY_MDFLUSH_CONFIG_FILE "mdflush.conf"
+
+// copy of mdflush types from kernel-collectors repo.
+typedef uint32_t mdflush_ebpf_key_t;
+typedef uint64_t mdflush_ebpf_val_t;
+
+typedef struct netdata_mdflush {
+ // must be at top for simplified AVL tree usage.
+ // if it's not at the top, we need to use `containerof` for almost all ops.
+ avl_t avl;
+
+ // key & name of device.
+ // the name is generated by the key, usually as `md<unit>`.
+ uint32_t unit;
+ char disk_name[32];
+
+ // have we defined the dimension for this device yet?
+ bool dim_exists;
+
+ // incremental flush count value.
+ uint64_t cnt;
+} netdata_mdflush_t;
+
+extern void *ebpf_mdflush_thread(void *ptr);
+
+extern struct config mdflush_config;
+
+#endif
diff --git a/collectors/ebpf.plugin/ebpf_mount.c b/collectors/ebpf.plugin/ebpf_mount.c
new file mode 100644
index 000000000..46f323471
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_mount.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ebpf.h"
+#include "ebpf_mount.h"
+
+static ebpf_local_maps_t mount_maps[] = {{.name = "tbl_mount", .internal_input = NETDATA_MOUNT_END,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+
+static char *mount_dimension_name[NETDATA_EBPF_MOUNT_SYSCALL] = { "mount", "umount" };
+static netdata_syscall_stat_t mount_aggregated_data[NETDATA_EBPF_MOUNT_SYSCALL];
+static netdata_publish_syscall_t mount_publish_aggregated[NETDATA_EBPF_MOUNT_SYSCALL];
+
+struct config mount_config = { .first_section = NULL, .last_section = NULL, .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = {.avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+static int read_thread_closed = 1;
+static netdata_idx_t *mount_values = NULL;
+
+static struct bpf_link **probe_links = NULL;
+static struct bpf_object *objects = NULL;
+
+static netdata_idx_t mount_hash_values[NETDATA_MOUNT_END];
+
+struct netdata_static_thread mount_thread = {"MOUNT KERNEL",
+ NULL, NULL, 1, NULL,
+ NULL, NULL};
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CLOSE THE THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_mount_cleanup(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled)
+ return;
+
+ freez(mount_thread.thread);
+ freez(mount_values);
+
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+ }
+}
+
+/*****************************************************************
+ *
+ * MAIN LOOP
+ *
+ *****************************************************************/
+
+/**
+ * Read global table
+ *
+ * Read the table with number of calls for all functions
+ */
+static void read_global_table()
+{
+ uint32_t idx;
+ netdata_idx_t *val = mount_hash_values;
+ netdata_idx_t *stored = mount_values;
+ int fd = mount_maps[NETDATA_KEY_MOUNT_TABLE].map_fd;
+
+ for (idx = NETDATA_KEY_MOUNT_CALL; idx < NETDATA_MOUNT_END; idx++) {
+ if (!bpf_map_lookup_elem(fd, &idx, stored)) {
+ int i;
+ int end = ebpf_nprocs;
+ netdata_idx_t total = 0;
+ for (i = 0; i < end; i++)
+ total += stored[i];
+
+ val[idx] = total;
+ }
+ }
+}
+
+/**
+ * Mount read hash
+ *
+ * This is the thread callback.
+ * This thread is necessary, because we cannot freeze the whole plugin to read the data.
+ *
+ * @param ptr It is a NULL value for this thread.
+ *
+ * @return It always returns NULL.
+ */
+void *ebpf_mount_read_hash(void *ptr)
+{
+ read_thread_closed = 0;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+
+ usec_t step = NETDATA_LATENCY_MOUNT_SLEEP_MS * em->update_every;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ (void)dt;
+
+ read_global_table();
+ }
+ read_thread_closed = 1;
+
+ return NULL;
+}
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+*/
+static void ebpf_mount_send_data()
+{
+ int i, j;
+ int end = NETDATA_EBPF_MOUNT_SYSCALL;
+ for (i = NETDATA_KEY_MOUNT_CALL, j = NETDATA_KEY_MOUNT_ERROR; i < end; i++, j++) {
+ mount_publish_aggregated[i].ncall = mount_hash_values[i];
+ mount_publish_aggregated[i].nerr = mount_hash_values[j];
+ }
+
+ write_count_chart(NETDATA_EBPF_MOUNT_CALLS, NETDATA_EBPF_MOUNT_GLOBAL_FAMILY,
+ mount_publish_aggregated, NETDATA_EBPF_MOUNT_SYSCALL);
+
+ write_err_chart(NETDATA_EBPF_MOUNT_ERRORS, NETDATA_EBPF_MOUNT_GLOBAL_FAMILY,
+ mount_publish_aggregated, NETDATA_EBPF_MOUNT_SYSCALL);
+}
+
+/**
+* Main loop for this collector.
+*/
+static void mount_collector(ebpf_module_t *em)
+{
+ mount_thread.thread = mallocz(sizeof(netdata_thread_t));
+ mount_thread.start_routine = ebpf_mount_read_hash;
+ memset(mount_hash_values, 0, sizeof(mount_hash_values));
+
+ mount_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
+
+ netdata_thread_create(mount_thread.thread, mount_thread.name, NETDATA_THREAD_OPTION_JOINABLE,
+ ebpf_mount_read_hash, em);
+
+ int update_every = em->update_every;
+ int counter = update_every - 1;
+ while (!close_ebpf_plugin) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ if (++counter == update_every) {
+ counter = 0;
+ pthread_mutex_lock(&lock);
+
+ ebpf_mount_send_data();
+
+ pthread_mutex_unlock(&lock);
+ }
+
+ pthread_mutex_unlock(&collect_data_mutex);
+ }
+}
+
+/*****************************************************************
+ *
+ * INITIALIZE THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Create mount charts
+ *
+ * Call ebpf_create_chart to create the charts for the collector.
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_create_mount_charts(int update_every)
+{
+ ebpf_create_chart(NETDATA_EBPF_MOUNT_GLOBAL_FAMILY, NETDATA_EBPF_MOUNT_CALLS,
+ "Calls to mount and umount syscalls.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_EBPF_MOUNT_FAMILY,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_EBPF_MOUNT_CHARTS,
+ ebpf_create_global_dimension,
+ mount_publish_aggregated, NETDATA_EBPF_MOUNT_SYSCALL,
+ update_every, NETDATA_EBPF_MODULE_NAME_MOUNT);
+
+ ebpf_create_chart(NETDATA_EBPF_MOUNT_GLOBAL_FAMILY, NETDATA_EBPF_MOUNT_ERRORS,
+ "Errors to mount and umount syscalls.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_EBPF_MOUNT_FAMILY,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_EBPF_MOUNT_CHARTS + 1,
+ ebpf_create_global_dimension,
+ mount_publish_aggregated, NETDATA_EBPF_MOUNT_SYSCALL,
+ update_every, NETDATA_EBPF_MODULE_NAME_MOUNT);
+
+ fflush(stdout);
+}
+
+/*****************************************************************
+ *
+ * MAIN THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Mount thread
+ *
+ * Thread used to make mount thread
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always returns NULL
+ */
+void *ebpf_mount_thread(void *ptr)
+{
+ netdata_thread_cleanup_push(ebpf_mount_cleanup, ptr);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ em->maps = mount_maps;
+
+ if (!em->enabled)
+ goto endmount;
+
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ if (!probe_links) {
+ goto endmount;
+ }
+
+ int algorithms[NETDATA_EBPF_MOUNT_SYSCALL] = { NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX };
+
+ ebpf_global_labels(mount_aggregated_data, mount_publish_aggregated, mount_dimension_name, mount_dimension_name,
+ algorithms, NETDATA_EBPF_MOUNT_SYSCALL);
+
+ pthread_mutex_lock(&lock);
+ ebpf_create_mount_charts(em->update_every);
+ pthread_mutex_unlock(&lock);
+
+ mount_collector(em);
+
+endmount:
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_mount.h b/collectors/ebpf.plugin/ebpf_mount.h
new file mode 100644
index 000000000..700bea13b
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_mount.h
@@ -0,0 +1,36 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_MOUNT_H
+#define NETDATA_EBPF_MOUNT_H 1
+
+// Module name
+#define NETDATA_EBPF_MODULE_NAME_MOUNT "mount"
+
+#define NETDATA_EBPF_MOUNT_SYSCALL 2
+
+#define NETDATA_LATENCY_MOUNT_SLEEP_MS 700000ULL
+
+#define NETDATA_EBPF_MOUNT_CALLS "call"
+#define NETDATA_EBPF_MOUNT_ERRORS "error"
+#define NETDATA_EBPF_MOUNT_FAMILY "mount (eBPF)"
+
+// Process configuration name
+#define NETDATA_MOUNT_CONFIG_FILE "mount.conf"
+
+enum mount_counters {
+ NETDATA_KEY_MOUNT_CALL,
+ NETDATA_KEY_UMOUNT_CALL,
+ NETDATA_KEY_MOUNT_ERROR,
+ NETDATA_KEY_UMOUNT_ERROR,
+
+ NETDATA_MOUNT_END
+};
+
+enum mount_tables {
+ NETDATA_KEY_MOUNT_TABLE
+};
+
+extern struct config mount_config;
+extern void *ebpf_mount_thread(void *ptr);
+
+#endif /* NETDATA_EBPF_MOUNT_H */
diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c
new file mode 100644
index 000000000..7f7df36f9
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_oomkill.c
@@ -0,0 +1,400 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ebpf.h"
+#include "ebpf_oomkill.h"
+
+struct config oomkill_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+#define OOMKILL_MAP_KILLCNT 0
+static ebpf_local_maps_t oomkill_maps[] = {
+ {
+ .name = "tbl_oomkill",
+ .internal_input = NETDATA_OOMKILL_MAX_ENTRIES,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ },
+ /* end */
+ {
+ .name = NULL,
+ .internal_input = 0,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ }
+};
+
+static ebpf_tracepoint_t oomkill_tracepoints[] = {
+ {.enabled = false, .class = "oom", .event = "mark_victim"},
+ /* end */
+ {.enabled = false, .class = NULL, .event = NULL}
+};
+
+static struct bpf_link **probe_links = NULL;
+static struct bpf_object *objects = NULL;
+
+static netdata_publish_syscall_t oomkill_publish_aggregated = {.name = "oomkill", .dimension = "oomkill",
+ .algorithm = "absolute",
+ .next = NULL};
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void oomkill_cleanup(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled) {
+ return;
+ }
+
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+ }
+}
+
+static void oomkill_write_data(int32_t *keys, uint32_t total)
+{
+ // for each app, see if it was OOM killed. record as 1 if so otherwise 0.
+ struct target *w;
+ for (w = apps_groups_root_target; w != NULL; w = w->next) {
+ if (likely(w->exposed && w->processes)) {
+ bool was_oomkilled = false;
+ struct pid_on_target *pids = w->root_pid;
+ while (pids) {
+ uint32_t j;
+ for (j = 0; j < total; j++) {
+ if (pids->pid == keys[j]) {
+ was_oomkilled = true;
+ // set to 0 so we consider it "done".
+ keys[j] = 0;
+ goto write_dim;
+ }
+ }
+ pids = pids->next;
+ }
+
+ write_dim:;
+ write_chart_dimension(w->name, was_oomkilled);
+ }
+ }
+
+ // for any remaining keys for which we couldn't find a group, this could be
+ // for various reasons, but the primary one is that the PID has not yet
+ // been picked up by the process thread when parsing the proc filesystem.
+ // since it's been OOM killed, it will never be parsed in the future, so
+ // we have no choice but to dump it into `other`.
+ uint32_t j;
+ uint32_t rem_count = 0;
+ for (j = 0; j < total; j++) {
+ int32_t key = keys[j];
+ if (key != 0) {
+ rem_count += 1;
+ }
+ }
+ if (rem_count > 0) {
+ write_chart_dimension("other", rem_count);
+ }
+}
+
+/**
+ * Create specific OOMkill charts
+ *
+ * Create charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_create_specific_oomkill_charts(char *type, int update_every)
+{
+ ebpf_create_chart(type, NETDATA_OOMKILL_CHART, "OOM kills. This chart is provided by eBPF plugin.",
+ EBPF_COMMON_DIMENSION_KILLS, NETDATA_EBPF_MEMORY_GROUP,
+ NULL, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5600,
+ ebpf_create_global_dimension,
+ &oomkill_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_OOMKILL);
+}
+
+/**
+ * Create Systemd OOMkill Charts
+ *
+ * Create charts when systemd is enabled
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+ **/
+static void ebpf_create_systemd_oomkill_charts(int update_every)
+{
+ ebpf_create_charts_on_systemd(NETDATA_OOMKILL_CHART, "OOM kills. This chart is provided by eBPF plugin.",
+ EBPF_COMMON_DIMENSION_KILLS, NETDATA_EBPF_MEMORY_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, 20191,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL,
+ NETDATA_EBPF_MODULE_NAME_OOMKILL, update_every);
+}
+
+/**
+ * Send Systemd charts
+ *
+ * Send collected data to Netdata.
+ *
+ * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned
+ * otherwise function returns 1 to avoid chart recreation
+ */
+static int ebpf_send_systemd_oomkill_charts()
+{
+ int ret = 1;
+ ebpf_cgroup_target_t *ect;
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_OOMKILL_CHART);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long) ect->oomkill);
+ ect->oomkill = 0;
+ } else
+ ret = 0;
+ }
+ write_end_chart();
+
+ return ret;
+}
+
+/*
+ * Send Specific OOMkill data
+ *
+ * Send data for specific cgroup/apps.
+ *
+ * @param type chart type
+ * @param value value for oomkill
+ */
+static void ebpf_send_specific_oomkill_data(char *type, int value)
+{
+ write_begin_chart(type, NETDATA_OOMKILL_CHART);
+ write_chart_dimension(oomkill_publish_aggregated.name, (long long)value);
+ write_end_chart();
+}
+
+/**
+ * Create specific OOMkill charts
+ *
+ * Create charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_obsolete_specific_oomkill_charts(char *type, int update_every)
+{
+ ebpf_write_chart_obsolete(type, NETDATA_OOMKILL_CHART, "OOM kills. This chart is provided by eBPF plugin.",
+ EBPF_COMMON_DIMENSION_KILLS, NETDATA_EBPF_MEMORY_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NULL,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5600, update_every);
+}
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+*/
+void ebpf_oomkill_send_cgroup_data(int update_every)
+{
+ if (!ebpf_cgroup_pids)
+ return;
+
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ ebpf_cgroup_target_t *ect;
+
+ int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
+ if (has_systemd) {
+ static int systemd_charts = 0;
+ if (!systemd_charts) {
+ ebpf_create_systemd_oomkill_charts(update_every);
+ systemd_charts = 1;
+ }
+ systemd_charts = ebpf_send_systemd_oomkill_charts();
+ }
+
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (ect->systemd)
+ continue;
+
+ if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART) && ect->updated) {
+ ebpf_create_specific_oomkill_charts(ect->name, update_every);
+ ect->flags |= NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART;
+ }
+
+ if (ect->flags & NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART && ect->updated) {
+ ebpf_send_specific_oomkill_data(ect->name, ect->oomkill);
+ } else {
+ ebpf_obsolete_specific_oomkill_charts(ect->name, update_every);
+ ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_OOMKILL_CHART;
+ }
+ }
+
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
+/**
+ * Read data
+ *
+ * Read OOMKILL events from table.
+ *
+ * @param keys vector where data will be stored
+ *
+ * @return It returns the number of read elements
+ */
+static uint32_t oomkill_read_data(int32_t *keys)
+{
+ // the first `i` entries of `keys` will contain the currently active PIDs
+ // in the eBPF map.
+ uint32_t i = 0;
+
+ uint32_t curr_key = 0;
+ uint32_t key = 0;
+ int mapfd = oomkill_maps[OOMKILL_MAP_KILLCNT].map_fd;
+ while (bpf_map_get_next_key(mapfd, &curr_key, &key) == 0) {
+ curr_key = key;
+
+ keys[i] = (int32_t)key;
+ i += 1;
+
+ // delete this key now that we've recorded its existence. there's no
+ // race here, as the same PID will only get OOM killed once.
+ int test = bpf_map_delete_elem(mapfd, &key);
+ if (unlikely(test < 0)) {
+ // since there's only 1 thread doing these deletions, it should be
+ // impossible to get this condition.
+ error("key unexpectedly not available for deletion.");
+ }
+ }
+
+ return i;
+}
+
+/**
+ * Update cgroup
+ *
+ * Update cgroup data based in
+ *
+ * @param keys vector with pids that had oomkill event
+ * @param total number of elements in keys vector.
+ */
+static void ebpf_update_oomkill_cgroup(int32_t *keys, uint32_t total)
+{
+ ebpf_cgroup_target_t *ect;
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ ect->oomkill = 0;
+ struct pid_on_target2 *pids;
+ for (pids = ect->pids; pids; pids = pids->next) {
+ uint32_t j;
+ int32_t pid = pids->pid;
+ for (j = 0; j < total; j++) {
+ if (pid == keys[j]) {
+ ect->oomkill = 1;
+ break;
+ }
+ }
+ }
+ }
+}
+
+/**
+* Main loop for this collector.
+*/
+static void oomkill_collector(ebpf_module_t *em)
+{
+ int cgroups = em->cgroup_charts;
+ int update_every = em->update_every;
+ int counter = update_every - 1;
+ int32_t keys[NETDATA_OOMKILL_MAX_ENTRIES];
+ memset(keys, 0, sizeof(keys));
+
+ // loop and read until ebpf plugin is closed.
+ while (!close_ebpf_plugin) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ if (++counter == update_every) {
+ counter = 0;
+ pthread_mutex_lock(&lock);
+
+ uint32_t count = oomkill_read_data(keys);
+ if (cgroups && count)
+ ebpf_update_oomkill_cgroup(keys, count);
+
+ // write everything from the ebpf map.
+ if (cgroups)
+ ebpf_oomkill_send_cgroup_data(update_every);
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_OOMKILL_CHART);
+ oomkill_write_data(keys, count);
+ write_end_chart();
+
+ pthread_mutex_unlock(&lock);
+ }
+
+ pthread_mutex_unlock(&collect_data_mutex);
+ }
+}
+
+/**
+ * Create apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+void ebpf_oomkill_create_apps_charts(struct ebpf_module *em, void *ptr)
+{
+ struct target *root = ptr;
+ ebpf_create_charts_on_apps(NETDATA_OOMKILL_CHART,
+ "OOM kills",
+ EBPF_COMMON_DIMENSION_KILLS,
+ "mem",
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20020,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_OOMKILL);
+}
+
+/**
+ * OOM kill tracking thread.
+ *
+ * @param ptr a `ebpf_module_t *`.
+ * @return always NULL.
+ */
+void *ebpf_oomkill_thread(void *ptr)
+{
+ netdata_thread_cleanup_push(oomkill_cleanup, ptr);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ em->maps = oomkill_maps;
+
+ if (!em->enabled) {
+ goto endoomkill;
+ }
+
+ if (ebpf_enable_tracepoints(oomkill_tracepoints) == 0) {
+ em->enabled = CONFIG_BOOLEAN_NO;
+ goto endoomkill;
+ }
+
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ if (!probe_links) {
+ goto endoomkill;
+ }
+
+ oomkill_collector(em);
+
+endoomkill:
+ netdata_thread_cleanup_pop(1);
+
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_oomkill.h b/collectors/ebpf.plugin/ebpf_oomkill.h
new file mode 100644
index 000000000..86f9463dd
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_oomkill.h
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_OOMKILL_H
+#define NETDATA_EBPF_OOMKILL_H 1
+
+/*****************************************************************
+ * copied from kernel-collectors repo, with modifications needed
+ * for inclusion here.
+ *****************************************************************/
+
+#define NETDATA_OOMKILL_MAX_ENTRIES 64
+
+typedef uint8_t oomkill_ebpf_val_t;
+
+/*****************************************************************
+ * below this is eBPF plugin-specific code.
+ *****************************************************************/
+
+#define NETDATA_EBPF_MODULE_NAME_OOMKILL "oomkill"
+#define NETDATA_OOMKILL_SLEEP_MS 650000ULL
+#define NETDATA_OOMKILL_CONFIG_FILE "oomkill.conf"
+
+#define NETDATA_OOMKILL_CHART "oomkills"
+
+extern struct config oomkill_config;
+extern void *ebpf_oomkill_thread(void *ptr);
+extern void ebpf_oomkill_create_apps_charts(struct ebpf_module *em, void *ptr);
+
+#endif /* NETDATA_EBPF_OOMKILL_H */
diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c
index 9b15c8407..a4a6709e8 100644
--- a/collectors/ebpf.plugin/ebpf_process.c
+++ b/collectors/ebpf.plugin/ebpf_process.c
@@ -11,29 +11,42 @@
*
*****************************************************************/
-static char *process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "open", "close", "delete", "read", "write",
- "process", "task", "process", "thread" };
-static char *process_id_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "do_sys_open", "__close_fd", "vfs_unlink",
- "vfs_read", "vfs_write", "do_exit",
- "release_task", "_do_fork", "sys_clone" };
+static char *process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "process", "task", "process", "thread" };
+static char *process_id_names[NETDATA_KEY_PUBLISH_PROCESS_END] = { "do_exit", "release_task", "_do_fork", "sys_clone" };
static char *status[] = { "process", "zombie" };
static ebpf_local_maps_t process_maps[] = {{.name = "tbl_pid_stats", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
- .user_input = 0},
- {.name = NULL, .internal_input = 0, .user_input = 0}};
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_total_stats", .internal_input = NETDATA_KEY_END_VECTOR,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "process_ctrl", .internal_input = NETDATA_CONTROLLER_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+
+char *tracepoint_sched_type = { "sched" } ;
+char *tracepoint_sched_process_exit = { "sched_process_exit" };
+char *tracepoint_sched_process_exec = { "sched_process_exec" };
+char *tracepoint_sched_process_fork = { "sched_process_fork" };
+static int was_sched_process_exit_enabled = 0;
+static int was_sched_process_exec_enabled = 0;
+static int was_sched_process_fork_enabled = 0;
static netdata_idx_t *process_hash_values = NULL;
static netdata_syscall_stat_t process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_END];
static netdata_publish_syscall_t process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_END];
-static ebpf_data_t process_data;
-
ebpf_process_stat_t **global_process_stats = NULL;
ebpf_process_publish_apps_t **current_apps_data = NULL;
int process_enabled = 0;
-static int *map_fd = NULL;
static struct bpf_object *objects = NULL;
static struct bpf_link **probe_links = NULL;
@@ -43,6 +56,8 @@ struct config process_config = { .first_section = NULL,
.index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
.rwlock = AVL_LOCK_INITIALIZER } };
+static struct netdata_static_thread cgroup_thread = {"EBPF CGROUP", NULL, NULL,
+ 1, NULL, NULL, NULL};
/*****************************************************************
*
* PROCESS DATA AND SEND TO NETDATA
@@ -56,40 +71,30 @@ struct config process_config = { .first_section = NULL,
* @param pvc the second output structure with correlated dimensions
* @param input the structure with the input data.
*/
-static void ebpf_update_global_publish(
- netdata_publish_syscall_t *publish, netdata_publish_vfs_common_t *pvc, netdata_syscall_stat_t *input)
+static void ebpf_update_global_publish(netdata_publish_syscall_t *publish, netdata_publish_vfs_common_t *pvc,
+ netdata_syscall_stat_t *input)
{
netdata_publish_syscall_t *move = publish;
- int selector = NETDATA_KEY_PUBLISH_PROCESS_OPEN;
+ int selector = NETDATA_KEY_PUBLISH_PROCESS_EXIT;
while (move) {
- // Until NETDATA_KEY_PUBLISH_PROCESS_READ we are creating accumulators, so it is possible
- // to use incremental charts, but after this we will do some math with the values, so we are storing
- // absolute values
- if (selector < NETDATA_KEY_PUBLISH_PROCESS_READ) {
- move->ncall = input->call;
- move->nbyte = input->bytes;
- move->nerr = input->ecall;
- } else {
- move->ncall = (input->call > move->pcall) ? input->call - move->pcall : move->pcall - input->call;
- move->nbyte = (input->bytes > move->pbyte) ? input->bytes - move->pbyte : move->pbyte - input->bytes;
- move->nerr = (input->ecall > move->nerr) ? input->ecall - move->perr : move->perr - input->ecall;
+ move->ncall = (input->call > move->pcall) ? input->call - move->pcall : move->pcall - input->call;
+ move->nbyte = (input->bytes > move->pbyte) ? input->bytes - move->pbyte : move->pbyte - input->bytes;
+ move->nerr = (input->ecall > move->nerr) ? input->ecall - move->perr : move->perr - input->ecall;
- move->pcall = input->call;
- move->pbyte = input->bytes;
- move->perr = input->ecall;
- }
+ move->pcall = input->call;
+ move->pbyte = input->bytes;
+ move->perr = input->ecall;
input = input->next;
move = move->next;
selector++;
}
- pvc->write = -((long)publish[NETDATA_KEY_PUBLISH_PROCESS_WRITE].nbyte);
- pvc->read = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_READ].nbyte;
-
- pvc->running = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_FORK].ncall - (long)publish[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ncall;
+ pvc->running = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_FORK].ncall -
+ (long)publish[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ncall;
publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall = -publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall;
- pvc->zombie = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_EXIT].ncall + (long)publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall;
+ pvc->zombie = (long)publish[NETDATA_KEY_PUBLISH_PROCESS_EXIT].ncall +
+ (long)publish[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].ncall;
}
/**
@@ -109,7 +114,7 @@ static void write_status_chart(char *family, netdata_publish_vfs_common_t *pvc)
}
/**
- * Send data to Netdata calling auxiliar functions.
+ * Send data to Netdata calling auxiliary functions.
*
* @param em the structure with thread information
*/
@@ -118,33 +123,16 @@ static void ebpf_process_send_data(ebpf_module_t *em)
netdata_publish_vfs_common_t pvc;
ebpf_update_global_publish(process_publish_aggregated, &pvc, process_aggregated_data);
- write_count_chart(
- NETDATA_FILE_OPEN_CLOSE_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2);
-
- write_count_chart(
- NETDATA_VFS_FILE_CLEAN_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_DEL_START], 1);
-
- write_count_chart(
- NETDATA_VFS_FILE_IO_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_IN_START_BYTE], 2);
-
- write_count_chart(
- NETDATA_EXIT_SYSCALL, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_EXIT_START], 2);
- write_count_chart(
- NETDATA_PROCESS_SYSCALL, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_PROCESS_START], 2);
+ write_count_chart(NETDATA_EXIT_SYSCALL, NETDATA_EBPF_SYSTEM_GROUP,
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT], 2);
+ write_count_chart(NETDATA_PROCESS_SYSCALL, NETDATA_EBPF_SYSTEM_GROUP,
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK], 2);
- write_status_chart(NETDATA_EBPF_FAMILY, &pvc);
+ write_status_chart(NETDATA_EBPF_SYSTEM_GROUP, &pvc);
if (em->mode < MODE_ENTRY) {
- write_err_chart(
- NETDATA_FILE_OPEN_ERR_COUNT, NETDATA_EBPF_FAMILY, process_publish_aggregated, 2);
- write_err_chart(
- NETDATA_VFS_FILE_ERR_COUNT, NETDATA_EBPF_FAMILY, &process_publish_aggregated[2], NETDATA_VFS_ERRORS);
- write_err_chart(
- NETDATA_PROCESS_ERROR_NAME, NETDATA_EBPF_FAMILY, &process_publish_aggregated[NETDATA_PROCESS_START], 2);
+ write_err_chart(NETDATA_PROCESS_ERROR_NAME, NETDATA_EBPF_SYSTEM_GROUP,
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK], 2);
}
-
- write_io_chart(NETDATA_VFS_IO_FILE_BYTES, NETDATA_EBPF_FAMILY,
- process_id_names[NETDATA_KEY_PUBLISH_PROCESS_WRITE], (long long) pvc.write,
- process_id_names[NETDATA_KEY_PUBLISH_PROCESS_READ], (long long)pvc.read);
}
/**
@@ -180,7 +168,7 @@ long long ebpf_process_sum_values_for_pids(struct pid_on_target *root, size_t of
void ebpf_process_remove_pids()
{
struct pid_stat *pids = root_of_pids;
- int pid_fd = map_fd[0];
+ int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
while (pids) {
uint32_t pid = pids->pid;
ebpf_process_stat_t *w = global_process_stats[pid];
@@ -197,163 +185,65 @@ void ebpf_process_remove_pids()
}
/**
- * Send data to Netdata calling auxiliar functions.
+ * Send data to Netdata calling auxiliary functions.
*
- * @param em the structure with thread information
* @param root the target list.
*/
-void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root)
+void ebpf_process_send_apps_data(struct target *root, ebpf_module_t *em)
{
struct target *w;
collected_number value;
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_sys_open));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
-
- if (em->mode < MODE_ENTRY) {
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(
- w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_sys_open));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
- }
-
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSED);
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
- value =
- ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_close_fd));
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, create_process));
write_chart_dimension(w->name, value);
}
}
write_end_chart();
- if (em->mode < MODE_ENTRY) {
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(
- w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_close_fd));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
- }
-
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED);
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
- value =
- ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_vfs_unlink));
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, create_thread));
write_chart_dimension(w->name, value);
}
}
write_end_chart();
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS);
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_EXIT);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(
- w->root_pid, offsetof(ebpf_process_publish_apps_t, call_write));
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t,
+ call_do_exit));
write_chart_dimension(w->name, value);
}
}
write_end_chart();
- if (em->mode < MODE_ENTRY) {
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(
- w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_write));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
- }
-
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS);
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_CLOSE);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
- value =
- ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_read));
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t,
+ call_release_task));
write_chart_dimension(w->name, value);
}
}
write_end_chart();
if (em->mode < MODE_ENTRY) {
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR);
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_ERROR);
for (w = root; w; w = w->next) {
if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(
- w->root_pid, offsetof(ebpf_process_publish_apps_t, ecall_read));
+ value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t,
+ task_err));
write_chart_dimension(w->name, value);
}
}
write_end_chart();
}
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(
- w->root_pid, offsetof(ebpf_process_publish_apps_t, bytes_written));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
-
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(
- w->root_pid, offsetof(ebpf_process_publish_apps_t, bytes_read));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
-
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value =
- ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_do_fork));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
-
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value =
- ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t, call_sys_clone));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
-
- write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_TASK_CLOSE);
- for (w = root; w; w = w->next) {
- if (unlikely(w->exposed && w->processes)) {
- value = ebpf_process_sum_values_for_pids(w->root_pid, offsetof(ebpf_process_publish_apps_t,
- call_release_task));
- write_chart_dimension(w->name, value);
- }
- }
- write_end_chart();
-
ebpf_process_remove_pids();
}
@@ -369,14 +259,15 @@ void ebpf_process_send_apps_data(ebpf_module_t *em, struct target *root)
static void read_hash_global_tables()
{
uint64_t idx;
- netdata_idx_t res[NETDATA_GLOBAL_VECTOR];
+ netdata_idx_t res[NETDATA_KEY_END_VECTOR];
netdata_idx_t *val = process_hash_values;
- for (idx = 0; idx < NETDATA_GLOBAL_VECTOR; idx++) {
- if (!bpf_map_lookup_elem(map_fd[1], &idx, val)) {
+ int fd = process_maps[NETDATA_PROCESS_GLOBAL_TABLE].map_fd;
+ for (idx = 0; idx < NETDATA_KEY_END_VECTOR; idx++) {
+ if (!bpf_map_lookup_elem(fd, &idx, val)) {
uint64_t total = 0;
int i;
- int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+ int end = ebpf_nprocs;
for (i = 0; i < end; i++)
total += val[i];
@@ -386,28 +277,13 @@ static void read_hash_global_tables()
}
}
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_OPEN].call = res[NETDATA_KEY_CALLS_DO_SYS_OPEN];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLOSE].call = res[NETDATA_KEY_CALLS_CLOSE_FD];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_UNLINK].call = res[NETDATA_KEY_CALLS_VFS_UNLINK];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_READ].call = res[NETDATA_KEY_CALLS_VFS_READ] + res[NETDATA_KEY_CALLS_VFS_READV];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_WRITE].call = res[NETDATA_KEY_CALLS_VFS_WRITE] + res[NETDATA_KEY_CALLS_VFS_WRITEV];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_EXIT].call = res[NETDATA_KEY_CALLS_DO_EXIT];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].call = res[NETDATA_KEY_CALLS_RELEASE_TASK];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_FORK].call = res[NETDATA_KEY_CALLS_DO_FORK];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLONE].call = res[NETDATA_KEY_CALLS_SYS_CLONE];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_OPEN].ecall = res[NETDATA_KEY_ERROR_DO_SYS_OPEN];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLOSE].ecall = res[NETDATA_KEY_ERROR_CLOSE_FD];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_UNLINK].ecall = res[NETDATA_KEY_ERROR_VFS_UNLINK];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_READ].ecall = res[NETDATA_KEY_ERROR_VFS_READ] + res[NETDATA_KEY_ERROR_VFS_READV];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_WRITE].ecall = res[NETDATA_KEY_ERROR_VFS_WRITE] + res[NETDATA_KEY_ERROR_VFS_WRITEV];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_FORK].ecall = res[NETDATA_KEY_ERROR_DO_FORK];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_CLONE].ecall = res[NETDATA_KEY_ERROR_SYS_CLONE];
-
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_WRITE].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITE] +
- (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITEV];
- process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_READ].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_READ] +
- (uint64_t)res[NETDATA_KEY_BYTES_VFS_READV];
}
/**
@@ -431,67 +307,53 @@ static void ebpf_process_update_apps_data()
}
//Read data
- cad->call_sys_open = ps->open_call;
- cad->call_close_fd = ps->close_call;
- cad->call_vfs_unlink = ps->unlink_call;
- cad->call_read = ps->read_call + ps->readv_call;
- cad->call_write = ps->write_call + ps->writev_call;
cad->call_do_exit = ps->exit_call;
cad->call_release_task = ps->release_call;
- cad->call_do_fork = ps->fork_call;
- cad->call_sys_clone = ps->clone_call;
+ cad->create_process = ps->create_process;
+ cad->create_thread = ps->create_thread;
- cad->ecall_sys_open = ps->open_err;
- cad->ecall_close_fd = ps->close_err;
- cad->ecall_vfs_unlink = ps->unlink_err;
- cad->ecall_read = ps->read_err + ps->readv_err;
- cad->ecall_write = ps->write_err + ps->writev_err;
- cad->ecall_do_fork = ps->fork_err;
- cad->ecall_sys_clone = ps->clone_err;
-
- cad->bytes_written = (uint64_t)ps->write_bytes + (uint64_t)ps->write_bytes;
- cad->bytes_read = (uint64_t)ps->read_bytes + (uint64_t)ps->readv_bytes;
+ cad->task_err = ps->task_err;
pids = pids->next;
}
}
-/*****************************************************************
- *
- * FUNCTIONS TO CREATE CHARTS
- *
- *****************************************************************/
-
/**
- * Create IO chart
+ * Update cgroup
*
- * @param family the chart family
- * @param name the chart name
- * @param axis the axis label
- * @param web the group name used to attach the chart on dashboard
- * @param order the order number of the specified chart
- * @param algorithm the algorithm used to make the charts.
+ * Update cgroup data based in
*/
-static void ebpf_create_io_chart(char *family, char *name, char *axis, char *web, int order, int algorithm)
+static void ebpf_update_process_cgroup()
{
- printf("CHART %s.%s '' 'Bytes written and read' '%s' '%s' '' line %d %d\n",
- family,
- name,
- axis,
- web,
- order,
- update_every);
-
- printf("DIMENSION %s %s %s 1 1\n",
- process_id_names[NETDATA_KEY_PUBLISH_PROCESS_READ],
- process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_READ],
- ebpf_algorithms[algorithm]);
- printf("DIMENSION %s %s %s 1 1\n",
- process_id_names[NETDATA_KEY_PUBLISH_PROCESS_WRITE],
- process_dimension_names[NETDATA_KEY_PUBLISH_PROCESS_WRITE],
- ebpf_algorithms[algorithm]);
+ ebpf_cgroup_target_t *ect ;
+ int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
+
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ struct pid_on_target2 *pids;
+ for (pids = ect->pids; pids; pids = pids->next) {
+ int pid = pids->pid;
+ ebpf_process_stat_t *out = &pids->ps;
+ if (global_process_stats[pid]) {
+ ebpf_process_stat_t *in = global_process_stats[pid];
+
+ memcpy(out, in, sizeof(ebpf_process_stat_t));
+ } else {
+ if (bpf_map_lookup_elem(pid_fd, &pid, out)) {
+ memset(out, 0, sizeof(ebpf_process_stat_t));
+ }
+ }
+ }
+ }
+ pthread_mutex_unlock(&mutex_cgroup_shm);
}
+/*****************************************************************
+ *
+ * FUNCTIONS TO CREATE CHARTS
+ *
+ *****************************************************************/
+
/**
* Create process status chart
*
@@ -500,11 +362,12 @@ static void ebpf_create_io_chart(char *family, char *name, char *axis, char *web
* @param axis the axis label
* @param web the group name used to attach the chart on dashboard
* @param order the order number of the specified chart
+ * @param update_every value to overwrite the update frequency set by the server.
*/
static void ebpf_process_status_chart(char *family, char *name, char *axis,
- char *web, char *algorithm, int order)
+ char *web, char *algorithm, int order, int update_every)
{
- printf("CHART %s.%s '' 'Process not closed' '%s' '%s' '' line %d %d ''\n",
+ printf("CHART %s.%s '' 'Process not closed' '%s' '%s' '' line %d %d '' 'ebpf.plugin' 'process'\n",
family,
name,
axis,
@@ -525,119 +388,49 @@ static void ebpf_process_status_chart(char *family, char *name, char *axis,
*/
static void ebpf_create_global_charts(ebpf_module_t *em)
{
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
- NETDATA_FILE_OPEN_CLOSE_COUNT,
- "Open and close calls",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_FILE_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21000,
- ebpf_create_global_dimension,
- process_publish_aggregated,
- 2);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
- NETDATA_FILE_OPEN_ERR_COUNT,
- "Open fails",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_FILE_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21001,
- ebpf_create_global_dimension,
- process_publish_aggregated,
- 2);
- }
-
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
- NETDATA_VFS_FILE_CLEAN_COUNT,
- "Remove files",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21002,
- ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_DEL_START],
- 1);
-
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
- NETDATA_VFS_FILE_IO_COUNT,
- "Calls to IO",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21003,
- ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_IN_START_BYTE],
- 2);
-
- ebpf_create_io_chart(NETDATA_EBPF_FAMILY,
- NETDATA_VFS_IO_FILE_BYTES, EBPF_COMMON_DIMENSION_BYTES,
- NETDATA_VFS_GROUP,
- 21004,
- NETDATA_EBPF_ABSOLUTE_IDX);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
- NETDATA_VFS_FILE_ERR_COUNT,
- "Fails to write or read",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_VFS_GROUP,
- NULL,
- NETDATA_EBPF_CHART_TYPE_LINE,
- 21005,
- ebpf_create_global_dimension,
- &process_publish_aggregated[2],
- NETDATA_VFS_ERRORS);
- }
-
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ ebpf_create_chart(NETDATA_EBPF_SYSTEM_GROUP,
NETDATA_PROCESS_SYSCALL,
"Start process",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_PROCESS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
- 21006,
+ 21002,
ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_PROCESS_START],
- 2);
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK],
+ 2, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ ebpf_create_chart(NETDATA_EBPF_SYSTEM_GROUP,
NETDATA_EXIT_SYSCALL,
"Exit process",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_PROCESS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
- 21007,
+ 21003,
ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_EXIT_START],
- 2);
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT],
+ 2, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
- ebpf_process_status_chart(NETDATA_EBPF_FAMILY,
+ ebpf_process_status_chart(NETDATA_EBPF_SYSTEM_GROUP,
NETDATA_PROCESS_STATUS_NAME,
EBPF_COMMON_DIMENSION_DIFFERENCE,
NETDATA_PROCESS_GROUP,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- 21008);
+ 21004, em->update_every);
if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ ebpf_create_chart(NETDATA_EBPF_SYSTEM_GROUP,
NETDATA_PROCESS_ERROR_NAME,
"Fails to create process",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_PROCESS_GROUP,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
- 21009,
+ 21005,
ebpf_create_global_dimension,
- &process_publish_aggregated[NETDATA_PROCESS_START],
- 2);
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK],
+ 2, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
}
}
@@ -652,137 +445,53 @@ static void ebpf_create_global_charts(ebpf_module_t *em)
void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
{
struct target *root = ptr;
- ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN,
- "Number of open files",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20061,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR,
- "Fails to open files",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20062,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
- }
-
- ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSED,
- "Files closed",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20063,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR,
- "Fails to close files",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_FILE_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20064,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
- }
-
- ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_DELETED,
- "Files deleted",
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_PROCESS,
+ "Process started",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_VFS_GROUP,
+ NETDATA_PROCESS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20065,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
- ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS,
- "Write to disk",
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_THREAD,
+ "Threads started",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_VFS_GROUP,
+ NETDATA_PROCESS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
20066,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- apps_groups_root_target);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR,
- "Fails to write",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20067,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
- }
-
- ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS,
- "Read from disk",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20068,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
-
- if (em->mode < MODE_ENTRY) {
- ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR,
- "Fails to read",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20069,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
- }
-
- ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES,
- "Bytes written on disk", EBPF_COMMON_DIMENSION_BYTES,
- NETDATA_APPS_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20070,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
-
- ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_BYTES,
- "Bytes read from disk", EBPF_COMMON_DIMENSION_BYTES,
- NETDATA_APPS_VFS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20071,
- ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
-
- ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_PROCESS,
- "Process started",
- EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_PROCESS_GROUP,
- NETDATA_EBPF_CHART_TYPE_STACKED,
- 20072,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
- ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_THREAD,
- "Threads started",
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_EXIT,
+ "Tasks starts exit process.",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_PROCESS_GROUP,
+ NETDATA_PROCESS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
- 20073,
+ 20067,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_CLOSE,
"Tasks closed",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_APPS_PROCESS_GROUP,
+ NETDATA_PROCESS_GROUP,
NETDATA_EBPF_CHART_TYPE_STACKED,
- 20074,
+ 20068,
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_TASK_ERROR,
+ "Errors to create process or threads.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_PROCESS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20069,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX],
+ root,
+ em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
+ }
}
/**
@@ -790,10 +499,9 @@ void ebpf_process_create_apps_charts(struct ebpf_module *em, void *ptr)
*
* Call ebpf_create_chart to create the charts on apps submenu.
*
- * @param em a pointer to the structure with the default values.
* @param root a pointer for the targets.
*/
-static void ebpf_create_apps_charts(ebpf_module_t *em, struct target *root)
+static void ebpf_create_apps_charts(struct target *root)
{
struct target *w;
int newly_added = 0;
@@ -831,7 +539,7 @@ static void ebpf_create_apps_charts(ebpf_module_t *em, struct target *root)
for (counter = 0; ebpf_modules[counter].thread_name; counter++) {
ebpf_module_t *current = &ebpf_modules[counter];
if (current->enabled && current->apps_charts && current->apps_routine)
- current->apps_routine(em, root);
+ current->apps_routine(current, root);
}
}
@@ -842,47 +550,440 @@ static void ebpf_create_apps_charts(ebpf_module_t *em, struct target *root)
*****************************************************************/
/**
+ * Cgroup update shm
+ *
+ * This is the thread callback.
+ * This thread is necessary, because we cannot freeze the whole plugin to read the data from shared memory.
+ *
+ * @param ptr It is a NULL value for this thread.
+ *
+ * @return It always returns NULL.
+ */
+void *ebpf_cgroup_update_shm(void *ptr)
+{
+ UNUSED(ptr);
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ usec_t step = 30 * USEC_PER_SEC;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ (void)dt;
+
+ if (close_ebpf_plugin)
+ break;
+
+ if (!shm_ebpf_cgroup.header)
+ ebpf_map_cgroup_shared_memory();
+
+ ebpf_parse_cgroup_shm_data();
+ }
+
+ return NULL;
+}
+
+/**
+ * Sum PIDs
+ *
+ * Sum values for all targets.
+ *
+ * @param ps structure used to store data
+ * @param pids input data
+ */
+static void ebpf_process_sum_cgroup_pids(ebpf_process_stat_t *ps, struct pid_on_target2 *pids)
+{
+ ebpf_process_stat_t accumulator;
+ memset(&accumulator, 0, sizeof(accumulator));
+
+ while (pids) {
+ ebpf_process_stat_t *ps = &pids->ps;
+
+ accumulator.exit_call += ps->exit_call;
+ accumulator.release_call += ps->release_call;
+ accumulator.create_process += ps->create_process;
+ accumulator.create_thread += ps->create_thread;
+
+ accumulator.task_err += ps->task_err;
+
+ pids = pids->next;
+ }
+
+ ps->exit_call = (accumulator.exit_call >= ps->exit_call) ? accumulator.exit_call : ps->exit_call;
+ ps->release_call = (accumulator.release_call >= ps->release_call) ? accumulator.release_call : ps->release_call;
+ ps->create_process = (accumulator.create_process >= ps->create_process) ? accumulator.create_process : ps->create_process;
+ ps->create_thread = (accumulator.create_thread >= ps->create_thread) ? accumulator.create_thread : ps->create_thread;
+
+ ps->task_err = (accumulator.task_err >= ps->task_err) ? accumulator.task_err : ps->task_err;
+}
+
+/*
+ * Send Specific Process data
+ *
+ * Send data for specific cgroup/apps.
+ *
+ * @param type chart type
+ * @param values structure with values that will be sent to netdata
+ * @param em the structure with thread information
+ */
+static void ebpf_send_specific_process_data(char *type, ebpf_process_stat_t *values, ebpf_module_t *em)
+{
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_PROCESS);
+ write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK].name,
+ (long long) values->create_process);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_THREAD);
+ write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_CLONE].name,
+ (long long) values->create_thread);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_EXIT);
+ write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT].name,
+ (long long) values->release_call);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_CLOSE);
+ write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].name,
+ (long long) values->release_call);
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_TASK_ERROR);
+ write_chart_dimension(process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT].name,
+ (long long) values->task_err);
+ write_end_chart();
+ }
+}
+
+/**
+ * Create specific process charts
+ *
+ * Create charts for cgroup/application
+ *
+ * @param type the chart type.
+ * @param em the structure with thread information
+ */
+static void ebpf_create_specific_process_charts(char *type, ebpf_module_t *em)
+{
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_PROCESS, "Process started",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP,
+ NETDATA_CGROUP_PROCESS_CREATE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5000,
+ ebpf_create_global_dimension, &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_FORK],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_THREAD, "Threads started",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP,
+ NETDATA_CGROUP_THREAD_CREATE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5001,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_CLONE],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_EXIT, "Tasks starts exit process.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP,
+ NETDATA_CGROUP_PROCESS_EXIT_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5002,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_CLOSE, "Tasks closed",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP,
+ NETDATA_CGROUP_PROCESS_CLOSE_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5003,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_TASK_ERROR, "Errors to create process or threads.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_CGROUP_GROUP,
+ NETDATA_CGROUP_PROCESS_ERROR_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5004,
+ ebpf_create_global_dimension,
+ &process_publish_aggregated[NETDATA_KEY_PUBLISH_PROCESS_EXIT],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_PROCESS);
+ }
+}
+
+/**
+ * Obsolete specific process charts
+ *
+ * Obsolete charts for cgroup/application
+ *
+ * @param type the chart type.
+ * @param em the structure with thread information
+ */
+static void ebpf_obsolete_specific_process_charts(char *type, ebpf_module_t *em)
+{
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_PROCESS, "Process started",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CGROUP_PROCESS_CREATE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5000,
+ em->update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_THREAD, "Threads started",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CGROUP_THREAD_CREATE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5001,
+ em->update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_EXIT,"Tasks starts exit process.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CGROUP_PROCESS_EXIT_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5002,
+ em->update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_CLOSE,"Tasks closed",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CGROUP_PROCESS_CLOSE_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5003,
+ em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_TASK_ERROR,"Errors to create process or threads.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_PROCESS_GROUP, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CGROUP_PROCESS_ERROR_CONTEXT, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5004,
+ em->update_every);
+ }
+}
+
+/**
+ * Create Systemd process Charts
+ *
+ * Create charts when systemd is enabled
+ *
+ * @param em the structure with thread information
+ **/
+static void ebpf_create_systemd_process_charts(ebpf_module_t *em)
+{
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_PROCESS, "Process started",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20065,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_CREATE_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_THREAD, "Threads started",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20066,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_THREAD_CREATE_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_CLOSE, "Tasks starts exit process.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20067,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_EXIT_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_EXIT, "Tasks closed",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20068,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_CLOSE_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_TASK_ERROR, "Errors to create process or threads.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_PROCESS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20069,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_PROCESS_ERROR_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_PROCESS, em->update_every);
+ }
+}
+
+/**
+ * Send Systemd charts
+ *
+ * Send collected data to Netdata.
+ *
+ * @param em the structure with thread information
+ *
+ * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned
+ * otherwise function returns 1 to avoid chart recreation
+ */
+static int ebpf_send_systemd_process_charts(ebpf_module_t *em)
+{
+ int ret = 1;
+ ebpf_cgroup_target_t *ect;
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_PROCESS);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_ps.create_process);
+ } else
+ ret = 0;
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_THREAD);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_ps.create_thread);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_EXIT);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_ps.exit_call);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_CLOSE);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_ps.release_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_TASK_ERROR);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_ps.task_err);
+ }
+ }
+ write_end_chart();
+ }
+
+ return ret;
+}
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+ *
+ * @param em the structure with thread information
+*/
+static void ebpf_process_send_cgroup_data(ebpf_module_t *em)
+{
+ if (!ebpf_cgroup_pids)
+ return;
+
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ ebpf_cgroup_target_t *ect;
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ ebpf_process_sum_cgroup_pids(&ect->publish_systemd_ps, ect->pids);
+ }
+
+ int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
+
+ if (has_systemd) {
+ static int systemd_chart = 0;
+ if (!systemd_chart) {
+ ebpf_create_systemd_process_charts(em);
+ systemd_chart = 1;
+ }
+
+ systemd_chart = ebpf_send_systemd_process_charts(em);
+ }
+
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (ect->systemd)
+ continue;
+
+ if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART) && ect->updated) {
+ ebpf_create_specific_process_charts(ect->name, em);
+ ect->flags |= NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART;
+ }
+
+ if (ect->flags & NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART) {
+ if (ect->updated) {
+ ebpf_send_specific_process_data(ect->name, &ect->publish_systemd_ps, em);
+ } else {
+ ebpf_obsolete_specific_process_charts(ect->name, em);
+ ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_PROCESS_CHART;
+ }
+ }
+ }
+
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
+/**
+ * Update Cgroup algorithm
+ *
+ * Change algorithm from absolute to incremental
+ */
+void ebpf_process_update_cgroup_algorithm()
+{
+ int i;
+ for (i = 0; i < NETDATA_KEY_PUBLISH_PROCESS_END; i++) {
+ netdata_publish_syscall_t *ptr = &process_publish_aggregated[i];
+ freez(ptr->algorithm);
+ ptr->algorithm = strdupz(ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
+ }
+}
+
+/**
* Main loop for this collector.
*
- * @param step the number of microseconds used with heart beat
* @param em the structure with thread information
*/
-static void process_collector(usec_t step, ebpf_module_t *em)
+static void process_collector(ebpf_module_t *em)
{
+ cgroup_thread.thread = mallocz(sizeof(netdata_thread_t));
+ cgroup_thread.start_routine = ebpf_cgroup_update_shm;
+
+ netdata_thread_create(cgroup_thread.thread, cgroup_thread.name, NETDATA_THREAD_OPTION_JOINABLE,
+ ebpf_cgroup_update_shm, em);
+
heartbeat_t hb;
heartbeat_init(&hb);
int publish_global = em->global_charts;
int apps_enabled = em->apps_charts;
- int pid_fd = map_fd[0];
+ int cgroups = em->cgroup_charts;
+ int thread_enabled = em->enabled;
+ if (cgroups)
+ ebpf_process_update_cgroup_algorithm();
+
+ int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
+ int update_every = em->update_every;
+ int counter = update_every - 1;
while (!close_ebpf_plugin) {
- usec_t dt = heartbeat_next(&hb, step);
+ usec_t dt = heartbeat_next(&hb, USEC_PER_SEC);
(void)dt;
- read_hash_global_tables();
-
pthread_mutex_lock(&collect_data_mutex);
cleanup_exited_pids();
collect_data_for_all_processes(pid_fd);
- ebpf_create_apps_charts(em, apps_groups_root_target);
+ ebpf_create_apps_charts(apps_groups_root_target);
pthread_cond_broadcast(&collect_data_cond_var);
pthread_mutex_unlock(&collect_data_mutex);
- int publish_apps = 0;
- if (apps_enabled && all_pids_count > 0) {
- publish_apps = 1;
- ebpf_process_update_apps_data();
- }
+ if (++counter == update_every) {
+ counter = 0;
- pthread_mutex_lock(&lock);
- if (publish_global) {
- ebpf_process_send_data(em);
- }
+ read_hash_global_tables();
+
+ int publish_apps = 0;
+ if (all_pids_count > 0) {
+ if (apps_enabled) {
+ publish_apps = 1;
+ ebpf_process_update_apps_data();
+ }
- if (publish_apps) {
- ebpf_process_send_apps_data(em, apps_groups_root_target);
+ if (cgroups) {
+ ebpf_update_process_cgroup();
+ }
+ }
+
+ if (thread_enabled) {
+ pthread_mutex_lock(&lock);
+ if (publish_global) {
+ ebpf_process_send_data(em);
+ }
+
+ if (publish_apps) {
+ ebpf_process_send_apps_data(apps_groups_root_target, em);
+ }
+
+ if (cgroups) {
+ ebpf_process_send_cgroup_data(em);
+ }
+ pthread_mutex_unlock(&lock);
+ }
}
+
pthread_mutex_unlock(&lock);
fflush(stdout);
@@ -896,7 +997,7 @@ static void process_collector(usec_t step, ebpf_module_t *em)
*****************************************************************/
void clean_global_memory() {
- int pid_fd = map_fd[0];
+ int pid_fd = process_maps[NETDATA_PROCESS_PID_TABLE].map_fd;
struct pid_stat *pids = root_of_pids;
while (pids) {
uint32_t pid = pids->pid;
@@ -910,6 +1011,30 @@ void clean_global_memory() {
}
/**
+ * Process disable tracepoints
+ *
+ * Disable tracepoints when the plugin was responsible to enable it.
+ */
+static void ebpf_process_disable_tracepoints()
+{
+ char *default_message = { "Cannot disable the tracepoint" };
+ if (!was_sched_process_exit_enabled) {
+ if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exit))
+ error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_exit);
+ }
+
+ if (!was_sched_process_exec_enabled) {
+ if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exec))
+ error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_exec);
+ }
+
+ if (!was_sched_process_fork_enabled) {
+ if (ebpf_disable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_fork))
+ error("%s %s/%s.", default_message, tracepoint_sched_type, tracepoint_sched_process_fork);
+ }
+}
+
+/**
* Clean up the main thread.
*
* @param ptr thread data.
@@ -920,7 +1045,7 @@ static void ebpf_process_cleanup(void *ptr)
heartbeat_t hb;
heartbeat_init(&hb);
- uint32_t tick = 50*USEC_PER_MS;
+ uint32_t tick = 1 * USEC_PER_SEC;
while (!finalized_threads) {
usec_t dt = heartbeat_next(&hb, tick);
UNUSED(dt);
@@ -933,15 +1058,19 @@ static void ebpf_process_cleanup(void *ptr)
freez(global_process_stats);
freez(current_apps_data);
- freez(process_data.map_fd);
+ ebpf_process_disable_tracepoints();
- struct bpf_program *prog;
- size_t i = 0 ;
- bpf_object__for_each_program(prog, objects) {
- bpf_link__destroy(probe_links[i]);
- i++;
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
}
- bpf_object__close(objects);
+
+ freez(cgroup_thread.thread);
}
/*****************************************************************
@@ -970,7 +1099,7 @@ static void ebpf_process_allocate_global_vectors(size_t length)
static void change_syscalls()
{
static char *lfork = { "do_fork" };
- process_id_names[7] = lfork;
+ process_id_names[NETDATA_KEY_PUBLISH_PROCESS_FORK] = lfork;
}
/**
@@ -979,9 +1108,7 @@ static void change_syscalls()
*/
static void set_local_pointers()
{
- map_fd = process_data.map_fd;
-
- if (process_data.isrh >= NETDATA_MINIMUM_RH_VERSION && process_data.isrh < NETDATA_RH_8)
+ if (isrh >= NETDATA_MINIMUM_RH_VERSION && isrh < NETDATA_RH_8)
change_syscalls();
}
@@ -1020,6 +1147,45 @@ static void wait_for_all_threads_die()
}
/**
+ * Enable tracepoints
+ *
+ * Enable necessary tracepoints for thread.
+ *
+ * @return It returns 0 on success and -1 otherwise
+ */
+static int ebpf_process_enable_tracepoints()
+{
+ int test = ebpf_is_tracepoint_enabled(tracepoint_sched_type, tracepoint_sched_process_exit);
+ if (test == -1)
+ return -1;
+ else if (!test) {
+ if (ebpf_enable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exit))
+ return -1;
+ }
+ was_sched_process_exit_enabled = test;
+
+ test = ebpf_is_tracepoint_enabled(tracepoint_sched_type, tracepoint_sched_process_exec);
+ if (test == -1)
+ return -1;
+ else if (!test) {
+ if (ebpf_enable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_exec))
+ return -1;
+ }
+ was_sched_process_exec_enabled = test;
+
+ test = ebpf_is_tracepoint_enabled(tracepoint_sched_type, tracepoint_sched_process_fork);
+ if (test == -1)
+ return -1;
+ else if (!test) {
+ if (ebpf_enable_tracing_values(tracepoint_sched_type, tracepoint_sched_process_fork))
+ return -1;
+ }
+ was_sched_process_fork_enabled = test;
+
+ return 0;
+}
+
+/**
* Process thread
*
* Thread used to generate process charts.
@@ -1034,31 +1200,26 @@ void *ebpf_process_thread(void *ptr)
ebpf_module_t *em = (ebpf_module_t *)ptr;
em->maps = process_maps;
+
+ if (ebpf_process_enable_tracepoints()) {
+ em->enabled = em->global_charts = em->apps_charts = em->cgroup_charts = CONFIG_BOOLEAN_NO;
+ }
process_enabled = em->enabled;
- fill_ebpf_data(&process_data);
pthread_mutex_lock(&lock);
ebpf_process_allocate_global_vectors(NETDATA_KEY_PUBLISH_PROCESS_END);
- if (ebpf_update_kernel(&process_data)) {
- pthread_mutex_unlock(&lock);
- goto endprocess;
- }
-
- ebpf_update_module(em, &process_config, NETDATA_PROCESS_CONFIG_FILE);
ebpf_update_pid_table(&process_maps[0], em);
set_local_pointers();
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects, process_data.map_fd);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
if (!probe_links) {
pthread_mutex_unlock(&lock);
goto endprocess;
}
int algorithms[NETDATA_KEY_PUBLISH_PROCESS_END] = {
- NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX,NETDATA_EBPF_INCREMENTAL_IDX, //open, close, unlink
- NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX,
- NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX
+ NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX, NETDATA_EBPF_ABSOLUTE_IDX
};
ebpf_global_labels(
@@ -1071,7 +1232,7 @@ void *ebpf_process_thread(void *ptr)
pthread_mutex_unlock(&lock);
- process_collector((usec_t)(em->update_time * USEC_PER_SEC), em);
+ process_collector(em);
endprocess:
wait_for_all_threads_die();
diff --git a/collectors/ebpf.plugin/ebpf_process.h b/collectors/ebpf.plugin/ebpf_process.h
index a731227e1..73421049e 100644
--- a/collectors/ebpf.plugin/ebpf_process.h
+++ b/collectors/ebpf.plugin/ebpf_process.h
@@ -3,73 +3,44 @@
#ifndef NETDATA_EBPF_PROCESS_H
#define NETDATA_EBPF_PROCESS_H 1
-// Groups used on Dashboard
-#define NETDATA_FILE_GROUP "File"
-#define NETDATA_VFS_GROUP "VFS"
-#define NETDATA_PROCESS_GROUP "Process"
-
-// Internal constants
-#define NETDATA_GLOBAL_VECTOR 24
-#define NETDATA_VFS_ERRORS 3
+// Module name
+#define NETDATA_EBPF_MODULE_NAME_PROCESS "process"
-// Map index
-#define NETDATA_DEL_START 2
-#define NETDATA_IN_START_BYTE 3
-#define NETDATA_EXIT_START 5
-#define NETDATA_PROCESS_START 7
+// Groups used on Dashboard
+#define NETDATA_PROCESS_GROUP "processes"
+#define NETDATA_PROCESS_CGROUP_GROUP "processes (eBPF)"
// Global chart name
-#define NETDATA_FILE_OPEN_CLOSE_COUNT "file_descriptor"
-#define NETDATA_FILE_OPEN_ERR_COUNT "file_error"
-#define NETDATA_VFS_FILE_CLEAN_COUNT "deleted_objects"
-#define NETDATA_VFS_FILE_IO_COUNT "io"
-#define NETDATA_VFS_FILE_ERR_COUNT "io_error"
-
#define NETDATA_EXIT_SYSCALL "exit"
#define NETDATA_PROCESS_SYSCALL "process_thread"
#define NETDATA_PROCESS_ERROR_NAME "task_error"
#define NETDATA_PROCESS_STATUS_NAME "process_status"
-#define NETDATA_VFS_IO_FILE_BYTES "io_bytes"
-
// Charts created on Apps submenu
-#define NETDATA_SYSCALL_APPS_FILE_OPEN "file_open"
-#define NETDATA_SYSCALL_APPS_FILE_CLOSED "file_closed"
-#define NETDATA_SYSCALL_APPS_FILE_DELETED "file_deleted"
-#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS "vfs_write_call"
-#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS "vfs_read_call"
-#define NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES "vfs_write_bytes"
-#define NETDATA_SYSCALL_APPS_VFS_READ_BYTES "vfs_read_bytes"
#define NETDATA_SYSCALL_APPS_TASK_PROCESS "process_create"
#define NETDATA_SYSCALL_APPS_TASK_THREAD "thread_create"
+#define NETDATA_SYSCALL_APPS_TASK_EXIT "task_exit"
#define NETDATA_SYSCALL_APPS_TASK_CLOSE "task_close"
-
-// Charts created on Apps submenu, if and only if, the return mode is active
-
-#define NETDATA_SYSCALL_APPS_FILE_OPEN_ERROR "file_open_error"
-#define NETDATA_SYSCALL_APPS_FILE_CLOSE_ERROR "file_close_error"
-#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR "vfs_write_error"
-#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR "vfs_read_error"
+#define NETDATA_SYSCALL_APPS_TASK_ERROR "task_error"
// Process configuration name
#define NETDATA_PROCESS_CONFIG_FILE "process.conf"
-// Index from kernel
-typedef enum ebpf_process_index {
- NETDATA_KEY_CALLS_DO_SYS_OPEN,
- NETDATA_KEY_ERROR_DO_SYS_OPEN,
-
- NETDATA_KEY_CALLS_VFS_WRITE,
- NETDATA_KEY_ERROR_VFS_WRITE,
- NETDATA_KEY_BYTES_VFS_WRITE,
-
- NETDATA_KEY_CALLS_VFS_READ,
- NETDATA_KEY_ERROR_VFS_READ,
- NETDATA_KEY_BYTES_VFS_READ,
+// Contexts
+#define NETDATA_CGROUP_PROCESS_CREATE_CONTEXT "cgroup.process_create"
+#define NETDATA_CGROUP_THREAD_CREATE_CONTEXT "cgroup.thread_create"
+#define NETDATA_CGROUP_PROCESS_CLOSE_CONTEXT "cgroup.task_close"
+#define NETDATA_CGROUP_PROCESS_EXIT_CONTEXT "cgroup.task_exit"
+#define NETDATA_CGROUP_PROCESS_ERROR_CONTEXT "cgroup.task_error"
- NETDATA_KEY_CALLS_VFS_UNLINK,
- NETDATA_KEY_ERROR_VFS_UNLINK,
+#define NETDATA_SYSTEMD_PROCESS_CREATE_CONTEXT "services.process_create"
+#define NETDATA_SYSTEMD_THREAD_CREATE_CONTEXT "services.thread_create"
+#define NETDATA_SYSTEMD_PROCESS_CLOSE_CONTEXT "services.task_close"
+#define NETDATA_SYSTEMD_PROCESS_EXIT_CONTEXT "services.task_exit"
+#define NETDATA_SYSTEMD_PROCESS_ERROR_CONTEXT "services.task_error"
+// Index from kernel
+typedef enum ebpf_process_index {
NETDATA_KEY_CALLS_DO_EXIT,
NETDATA_KEY_CALLS_RELEASE_TASK,
@@ -77,20 +48,10 @@ typedef enum ebpf_process_index {
NETDATA_KEY_CALLS_DO_FORK,
NETDATA_KEY_ERROR_DO_FORK,
- NETDATA_KEY_CALLS_CLOSE_FD,
- NETDATA_KEY_ERROR_CLOSE_FD,
-
NETDATA_KEY_CALLS_SYS_CLONE,
NETDATA_KEY_ERROR_SYS_CLONE,
- NETDATA_KEY_CALLS_VFS_WRITEV,
- NETDATA_KEY_ERROR_VFS_WRITEV,
- NETDATA_KEY_BYTES_VFS_WRITEV,
-
- NETDATA_KEY_CALLS_VFS_READV,
- NETDATA_KEY_ERROR_VFS_READV,
- NETDATA_KEY_BYTES_VFS_READV
-
+ NETDATA_KEY_END_VECTOR
} ebpf_process_index_t;
// This enum acts as an index for publish vector.
@@ -99,11 +60,6 @@ typedef enum ebpf_process_index {
// values (the three initial positions) and absolute values
// (the remaining charts).
typedef enum netdata_publish_process {
- NETDATA_KEY_PUBLISH_PROCESS_OPEN,
- NETDATA_KEY_PUBLISH_PROCESS_CLOSE,
- NETDATA_KEY_PUBLISH_PROCESS_UNLINK,
- NETDATA_KEY_PUBLISH_PROCESS_READ,
- NETDATA_KEY_PUBLISH_PROCESS_WRITE,
NETDATA_KEY_PUBLISH_PROCESS_EXIT,
NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK,
NETDATA_KEY_PUBLISH_PROCESS_FORK,
@@ -114,28 +70,21 @@ typedef enum netdata_publish_process {
typedef struct ebpf_process_publish_apps {
// Number of calls during the last read
- uint64_t call_sys_open;
- uint64_t call_close_fd;
- uint64_t call_vfs_unlink;
- uint64_t call_read;
- uint64_t call_write;
uint64_t call_do_exit;
uint64_t call_release_task;
- uint64_t call_do_fork;
- uint64_t call_sys_clone;
+ uint64_t create_process;
+ uint64_t create_thread;
// Number of errors during the last read
- uint64_t ecall_sys_open;
- uint64_t ecall_close_fd;
- uint64_t ecall_vfs_unlink;
- uint64_t ecall_read;
- uint64_t ecall_write;
- uint64_t ecall_do_fork;
- uint64_t ecall_sys_clone;
-
- // Number of bytes during the last read
- uint64_t bytes_written;
- uint64_t bytes_read;
+ uint64_t task_err;
} ebpf_process_publish_apps_t;
+enum ebpf_process_tables {
+ NETDATA_PROCESS_PID_TABLE,
+ NETDATA_PROCESS_GLOBAL_TABLE,
+ NETDATA_PROCESS_CTRL_TABLE
+};
+
+extern struct config process_config;
+
#endif /* NETDATA_EBPF_PROCESS_H */
diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c
new file mode 100644
index 000000000..156ae9aa5
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_shm.c
@@ -0,0 +1,855 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ebpf.h"
+#include "ebpf_shm.h"
+
+static char *shm_dimension_name[NETDATA_SHM_END] = { "get", "at", "dt", "ctl" };
+static netdata_syscall_stat_t shm_aggregated_data[NETDATA_SHM_END];
+static netdata_publish_syscall_t shm_publish_aggregated[NETDATA_SHM_END];
+
+static int read_thread_closed = 1;
+netdata_publish_shm_t *shm_vector = NULL;
+
+static netdata_idx_t shm_hash_values[NETDATA_SHM_END];
+static netdata_idx_t *shm_values = NULL;
+
+netdata_publish_shm_t **shm_pid = NULL;
+
+struct config shm_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+static ebpf_local_maps_t shm_maps[] = {{.name = "tbl_pid_shm", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "shm_ctrl", .internal_input = NETDATA_CONTROLLER_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_shm", .internal_input = NETDATA_SHM_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = NULL, .internal_input = 0, .user_input = 0}};
+
+static struct bpf_link **probe_links = NULL;
+static struct bpf_object *objects = NULL;
+
+struct netdata_static_thread shm_threads = {"SHM KERNEL", NULL, NULL, 1,
+ NULL, NULL, NULL};
+
+/*****************************************************************
+ * FUNCTIONS TO CLOSE THE THREAD
+ *****************************************************************/
+
+/**
+ * Clean shm structure
+ */
+void clean_shm_pid_structures() {
+ struct pid_stat *pids = root_of_pids;
+ while (pids) {
+ freez(shm_pid[pids->pid]);
+
+ pids = pids->next;
+ }
+}
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_shm_cleanup(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled) {
+ return;
+ }
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 2 * USEC_PER_MS;
+ while (!read_thread_closed) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ ebpf_cleanup_publish_syscall(shm_publish_aggregated);
+
+ freez(shm_vector);
+ freez(shm_values);
+
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+ }
+}
+
+/*****************************************************************
+ * COLLECTOR THREAD
+ *****************************************************************/
+
+/**
+ * Apps Accumulator
+ *
+ * Sum all values read from kernel and store in the first address.
+ *
+ * @param out the vector with read values.
+ */
+static void shm_apps_accumulator(netdata_publish_shm_t *out)
+{
+ int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ netdata_publish_shm_t *total = &out[0];
+ for (i = 1; i < end; i++) {
+ netdata_publish_shm_t *w = &out[i];
+ total->get += w->get;
+ total->at += w->at;
+ total->dt += w->dt;
+ total->ctl += w->ctl;
+ }
+}
+
+/**
+ * Fill PID
+ *
+ * Fill PID structures
+ *
+ * @param current_pid pid that we are collecting data
+ * @param out values read from hash tables;
+ */
+static void shm_fill_pid(uint32_t current_pid, netdata_publish_shm_t *publish)
+{
+ netdata_publish_shm_t *curr = shm_pid[current_pid];
+ if (!curr) {
+ curr = callocz(1, sizeof(netdata_publish_shm_t));
+ shm_pid[current_pid] = curr;
+ }
+
+ memcpy(curr, publish, sizeof(netdata_publish_shm_t));
+}
+
+/**
+ * Update cgroup
+ *
+ * Update cgroup data based in
+ */
+static void ebpf_update_shm_cgroup()
+{
+ netdata_publish_shm_t *cv = shm_vector;
+ int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd;
+ size_t length = sizeof(netdata_publish_shm_t) * ebpf_nprocs;
+ ebpf_cgroup_target_t *ect;
+
+ memset(cv, 0, length);
+
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ struct pid_on_target2 *pids;
+ for (pids = ect->pids; pids; pids = pids->next) {
+ int pid = pids->pid;
+ netdata_publish_shm_t *out = &pids->shm;
+ if (likely(shm_pid) && shm_pid[pid]) {
+ netdata_publish_shm_t *in = shm_pid[pid];
+
+ memcpy(out, in, sizeof(netdata_publish_shm_t));
+ } else {
+ if (!bpf_map_lookup_elem(fd, &pid, cv)) {
+ shm_apps_accumulator(cv);
+
+ memcpy(out, cv, sizeof(netdata_publish_shm_t));
+
+ // now that we've consumed the value, zero it out in the map.
+ memset(cv, 0, length);
+ bpf_map_update_elem(fd, &pid, cv, BPF_EXIST);
+ }
+ }
+ }
+ }
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
+/**
+ * Read APPS table
+ *
+ * Read the apps table and store data inside the structure.
+ */
+static void read_apps_table()
+{
+ netdata_publish_shm_t *cv = shm_vector;
+ uint32_t key;
+ struct pid_stat *pids = root_of_pids;
+ int fd = shm_maps[NETDATA_PID_SHM_TABLE].map_fd;
+ size_t length = sizeof(netdata_publish_shm_t)*ebpf_nprocs;
+ while (pids) {
+ key = pids->pid;
+
+ if (bpf_map_lookup_elem(fd, &key, cv)) {
+ pids = pids->next;
+ continue;
+ }
+
+ shm_apps_accumulator(cv);
+
+ shm_fill_pid(key, cv);
+
+ // now that we've consumed the value, zero it out in the map.
+ memset(cv, 0, length);
+ bpf_map_update_elem(fd, &key, cv, BPF_EXIST);
+
+ pids = pids->next;
+ }
+}
+
+/**
+* Send global charts to netdata agent.
+*/
+static void shm_send_global()
+{
+ write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, NETDATA_SHM_GLOBAL_CHART);
+ write_chart_dimension(
+ shm_publish_aggregated[NETDATA_KEY_SHMGET_CALL].dimension,
+ (long long) shm_hash_values[NETDATA_KEY_SHMGET_CALL]
+ );
+ write_chart_dimension(
+ shm_publish_aggregated[NETDATA_KEY_SHMAT_CALL].dimension,
+ (long long) shm_hash_values[NETDATA_KEY_SHMAT_CALL]
+ );
+ write_chart_dimension(
+ shm_publish_aggregated[NETDATA_KEY_SHMDT_CALL].dimension,
+ (long long) shm_hash_values[NETDATA_KEY_SHMDT_CALL]
+ );
+ write_chart_dimension(
+ shm_publish_aggregated[NETDATA_KEY_SHMCTL_CALL].dimension,
+ (long long) shm_hash_values[NETDATA_KEY_SHMCTL_CALL]
+ );
+ write_end_chart();
+}
+
+/**
+ * Read global counter
+ *
+ * Read the table with number of calls for all functions
+ */
+static void read_global_table()
+{
+ netdata_idx_t *stored = shm_values;
+ netdata_idx_t *val = shm_hash_values;
+ int fd = shm_maps[NETDATA_SHM_GLOBAL_TABLE].map_fd;
+
+ uint32_t i, end = NETDATA_SHM_END;
+ for (i = NETDATA_KEY_SHMGET_CALL; i < end; i++) {
+ if (!bpf_map_lookup_elem(fd, &i, stored)) {
+ int j;
+ int last = ebpf_nprocs;
+ netdata_idx_t total = 0;
+ for (j = 0; j < last; j++)
+ total += stored[j];
+
+ val[i] = total;
+ }
+ }
+}
+
+/**
+ * Shared memory reader thread.
+ *
+ * @param ptr It is a NULL value for this thread.
+ * @return It always returns NULL.
+ */
+void *ebpf_shm_read_hash(void *ptr)
+{
+ read_thread_closed = 0;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ usec_t step = NETDATA_SHM_SLEEP_MS * em->update_every;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ (void)dt;
+
+ read_global_table();
+ }
+
+ read_thread_closed = 1;
+ return NULL;
+}
+
+/**
+ * Sum values for all targets.
+ */
+static void ebpf_shm_sum_pids(netdata_publish_shm_t *shm, struct pid_on_target *root)
+{
+ while (root) {
+ int32_t pid = root->pid;
+ netdata_publish_shm_t *w = shm_pid[pid];
+ if (w) {
+ shm->get += w->get;
+ shm->at += w->at;
+ shm->dt += w->dt;
+ shm->ctl += w->ctl;
+
+ // reset for next collection.
+ w->get = 0;
+ w->at = 0;
+ w->dt = 0;
+ w->ctl = 0;
+ }
+ root = root->next;
+ }
+}
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+ *
+ * @param root the target list.
+*/
+void ebpf_shm_send_apps_data(struct target *root)
+{
+ struct target *w;
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ ebpf_shm_sum_pids(&w->shm, w->root_pid);
+ }
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SHMGET_CHART);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, (long long) w->shm.get);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SHMAT_CHART);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, (long long) w->shm.at);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SHMDT_CHART);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, (long long) w->shm.dt);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SHMCTL_CHART);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, (long long) w->shm.ctl);
+ }
+ }
+ write_end_chart();
+}
+
+/**
+ * Sum values for all targets.
+ */
+static void ebpf_shm_sum_cgroup_pids(netdata_publish_shm_t *shm, struct pid_on_target2 *root)
+{
+ netdata_publish_shm_t shmv;
+ memset(&shmv, 0, sizeof(shmv));
+ while (root) {
+ netdata_publish_shm_t *w = &root->shm;
+ shmv.get += w->get;
+ shmv.at += w->at;
+ shmv.dt += w->dt;
+ shmv.ctl += w->ctl;
+
+ root = root->next;
+ }
+
+ memcpy(shm, &shmv, sizeof(shmv));
+}
+
+/**
+ * Create specific shared memory charts
+ *
+ * Create charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_create_specific_shm_charts(char *type, int update_every)
+{
+ ebpf_create_chart(type, NETDATA_SHMGET_CHART,
+ "Calls to syscall <code>shmget(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_CGROUP_SHM_GET_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5800,
+ ebpf_create_global_dimension,
+ &shm_publish_aggregated[NETDATA_KEY_SHMGET_CALL],
+ 1,
+ update_every,
+ NETDATA_EBPF_MODULE_NAME_SHM);
+
+ ebpf_create_chart(type, NETDATA_SHMAT_CHART,
+ "Calls to syscall <code>shmat(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_CGROUP_SHM_AT_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5801,
+ ebpf_create_global_dimension,
+ &shm_publish_aggregated[NETDATA_KEY_SHMAT_CALL],
+ 1,
+ update_every,
+ NETDATA_EBPF_MODULE_NAME_SHM);
+
+ ebpf_create_chart(type, NETDATA_SHMDT_CHART,
+ "Calls to syscall <code>shmdt(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_CGROUP_SHM_DT_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5802,
+ ebpf_create_global_dimension,
+ &shm_publish_aggregated[NETDATA_KEY_SHMDT_CALL],
+ 1,
+ update_every,
+ NETDATA_EBPF_MODULE_NAME_SHM);
+
+ ebpf_create_chart(type, NETDATA_SHMCTL_CHART,
+ "Calls to syscall <code>shmctl(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_CGROUP_SHM_CTL_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5803,
+ ebpf_create_global_dimension,
+ &shm_publish_aggregated[NETDATA_KEY_SHMCTL_CALL],
+ 1,
+ update_every,
+ NETDATA_EBPF_MODULE_NAME_SHM);
+}
+
+/**
+ * Obsolete specific shared memory charts
+ *
+ * Obsolete charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_obsolete_specific_shm_charts(char *type, int update_every)
+{
+ ebpf_write_chart_obsolete(type, NETDATA_SHMGET_CHART,
+ "Calls to syscall <code>shmget(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_GET_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5800, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_SHMAT_CHART,
+ "Calls to syscall <code>shmat(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_AT_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5801, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_SHMDT_CHART,
+ "Calls to syscall <code>shmdt(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_DT_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5802, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_SHMCTL_CHART,
+ "Calls to syscall <code>shmctl(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SHM_CTL_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5803, update_every);
+}
+
+/**
+ * Create Systemd Swap Charts
+ *
+ * Create charts when systemd is enabled
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+ **/
+static void ebpf_create_systemd_shm_charts(int update_every)
+{
+ ebpf_create_charts_on_systemd(NETDATA_SHMGET_CHART,
+ "Calls to syscall <code>shmget(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20191,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ NETDATA_SYSTEMD_SHM_GET_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_SHMAT_CHART,
+ "Calls to syscall <code>shmat(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20192,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ NETDATA_SYSTEMD_SHM_AT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_SHMDT_CHART,
+ "Calls to syscall <code>shmdt(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20193,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ NETDATA_SYSTEMD_SHM_DT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_SHMCTL_CHART,
+ "Calls to syscall <code>shmctl(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20193,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ NETDATA_SYSTEMD_SHM_CTL_CONTEXT, NETDATA_EBPF_MODULE_NAME_SHM, update_every);
+}
+
+/**
+ * Send Systemd charts
+ *
+ * Send collected data to Netdata.
+ *
+ * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned
+ * otherwise function returns 1 to avoid chart recreation
+ */
+static int ebpf_send_systemd_shm_charts()
+{
+ int ret = 1;
+ ebpf_cgroup_target_t *ect;
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMGET_CHART);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long)ect->publish_shm.get);
+ } else
+ ret = 0;
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMAT_CHART);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long)ect->publish_shm.at);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMDT_CHART);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long)ect->publish_shm.dt);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SHMCTL_CHART);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long)ect->publish_shm.ctl);
+ }
+ }
+ write_end_chart();
+
+ return ret;
+}
+
+/*
+ * Send Specific Shared memory data
+ *
+ * Send data for specific cgroup/apps.
+ *
+ * @param type chart type
+ * @param values structure with values that will be sent to netdata
+ */
+static void ebpf_send_specific_shm_data(char *type, netdata_publish_shm_t *values)
+{
+ write_begin_chart(type, NETDATA_SHMGET_CHART);
+ write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMGET_CALL].name, (long long)values->get);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_SHMAT_CHART);
+ write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMAT_CALL].name, (long long)values->at);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_SHMDT_CHART);
+ write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMDT_CALL].name, (long long)values->dt);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_SHMCTL_CHART);
+ write_chart_dimension(shm_publish_aggregated[NETDATA_KEY_SHMCTL_CALL].name, (long long)values->ctl);
+ write_end_chart();
+}
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+*/
+void ebpf_shm_send_cgroup_data(int update_every)
+{
+ if (!ebpf_cgroup_pids)
+ return;
+
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ ebpf_cgroup_target_t *ect;
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ ebpf_shm_sum_cgroup_pids(&ect->publish_shm, ect->pids);
+ }
+
+ int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
+ if (has_systemd) {
+ static int systemd_charts = 0;
+ if (!systemd_charts) {
+ ebpf_create_systemd_shm_charts(update_every);
+ systemd_charts = 1;
+ }
+
+ systemd_charts = ebpf_send_systemd_shm_charts();
+ }
+
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (ect->systemd)
+ continue;
+
+ if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_SHM_CHART) && ect->updated) {
+ ebpf_create_specific_shm_charts(ect->name, update_every);
+ ect->flags |= NETDATA_EBPF_CGROUP_HAS_SHM_CHART;
+ }
+
+ if (ect->flags & NETDATA_EBPF_CGROUP_HAS_SHM_CHART) {
+ if (ect->updated) {
+ ebpf_send_specific_shm_data(ect->name, &ect->publish_shm);
+ } else {
+ ebpf_obsolete_specific_shm_charts(ect->name, update_every);
+ ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_SWAP_CHART;
+ }
+ }
+ }
+
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
+/**
+* Main loop for this collector.
+*/
+static void shm_collector(ebpf_module_t *em)
+{
+ shm_threads.thread = mallocz(sizeof(netdata_thread_t));
+ shm_threads.start_routine = ebpf_shm_read_hash;
+
+ netdata_thread_create(
+ shm_threads.thread,
+ shm_threads.name,
+ NETDATA_THREAD_OPTION_JOINABLE,
+ ebpf_shm_read_hash,
+ em
+ );
+
+ int apps = em->apps_charts;
+ int cgroups = em->cgroup_charts;
+ int update_every = em->update_every;
+ int counter = update_every - 1;
+ while (!close_ebpf_plugin) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ if (++counter == update_every) {
+ counter = 0;
+ if (apps) {
+ read_apps_table();
+ }
+
+ if (cgroups) {
+ ebpf_update_shm_cgroup();
+ }
+
+ pthread_mutex_lock(&lock);
+
+ shm_send_global();
+
+ if (apps) {
+ ebpf_shm_send_apps_data(apps_groups_root_target);
+ }
+
+ if (cgroups) {
+ ebpf_shm_send_cgroup_data(update_every);
+ }
+
+ pthread_mutex_unlock(&lock);
+ }
+
+ pthread_mutex_unlock(&collect_data_mutex);
+ }
+}
+
+/*****************************************************************
+ * INITIALIZE THREAD
+ *****************************************************************/
+
+/**
+ * Create apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr)
+{
+ struct target *root = ptr;
+ ebpf_create_charts_on_apps(NETDATA_SHMGET_CHART,
+ "Calls to syscall <code>shmget(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20191,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_SHM);
+
+ ebpf_create_charts_on_apps(NETDATA_SHMAT_CHART,
+ "Calls to syscall <code>shmat(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20192,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_SHM);
+
+ ebpf_create_charts_on_apps(NETDATA_SHMDT_CHART,
+ "Calls to syscall <code>shmdt(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20193,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_SHM);
+
+ ebpf_create_charts_on_apps(NETDATA_SHMCTL_CHART,
+ "Calls to syscall <code>shmctl(2)</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_IPC_SHM_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20194,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_SHM);
+}
+
+/**
+ * Allocate vectors used with this thread.
+ *
+ * We are not testing the return, because callocz does this and shutdown the software
+ * case it was not possible to allocate.
+ *
+ * @param apps is apps enabled?
+ */
+static void ebpf_shm_allocate_global_vectors(int apps)
+{
+ if (apps)
+ shm_pid = callocz((size_t)pid_max, sizeof(netdata_publish_shm_t *));
+
+ shm_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_publish_shm_t));
+
+ shm_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
+
+ memset(shm_hash_values, 0, sizeof(shm_hash_values));
+}
+
+/*****************************************************************
+ * MAIN THREAD
+ *****************************************************************/
+
+/**
+ * Create global charts
+ *
+ * Call ebpf_create_chart to create the charts for the collector.
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_create_shm_charts(int update_every)
+{
+ ebpf_create_chart(
+ NETDATA_EBPF_SYSTEM_GROUP,
+ NETDATA_SHM_GLOBAL_CHART,
+ "Calls to shared memory system calls.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_SYSTEM_IPC_SHM_SUBMENU,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_SYSTEM_IPC_SHARED_MEM_CALLS,
+ ebpf_create_global_dimension,
+ shm_publish_aggregated,
+ NETDATA_SHM_END,
+ update_every, NETDATA_EBPF_MODULE_NAME_SHM
+ );
+
+ fflush(stdout);
+}
+
+/**
+ * Shared memory thread.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ * @return It always return NULL
+ */
+void *ebpf_shm_thread(void *ptr)
+{
+ netdata_thread_cleanup_push(ebpf_shm_cleanup, ptr);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ em->maps = shm_maps;
+
+ ebpf_update_pid_table(&shm_maps[NETDATA_PID_SHM_TABLE], em);
+
+ if (!em->enabled) {
+ goto endshm;
+ }
+
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ if (!probe_links) {
+ goto endshm;
+ }
+
+ ebpf_shm_allocate_global_vectors(em->apps_charts);
+
+ int algorithms[NETDATA_SHM_END] = {
+ NETDATA_EBPF_INCREMENTAL_IDX,
+ NETDATA_EBPF_INCREMENTAL_IDX,
+ NETDATA_EBPF_INCREMENTAL_IDX,
+ NETDATA_EBPF_INCREMENTAL_IDX
+ };
+ ebpf_global_labels(
+ shm_aggregated_data,
+ shm_publish_aggregated,
+ shm_dimension_name,
+ shm_dimension_name,
+ algorithms,
+ NETDATA_SHM_END
+ );
+
+ pthread_mutex_lock(&lock);
+ ebpf_create_shm_charts(em->update_every);
+ pthread_mutex_unlock(&lock);
+
+ shm_collector(em);
+
+endshm:
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_shm.h b/collectors/ebpf.plugin/ebpf_shm.h
new file mode 100644
index 000000000..4e7e183a7
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_shm.h
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_SHM_H
+#define NETDATA_EBPF_SHM_H 1
+
+// Module name
+#define NETDATA_EBPF_MODULE_NAME_SHM "shm"
+
+#define NETDATA_SHM_SLEEP_MS 850000ULL
+
+// charts
+#define NETDATA_SHM_GLOBAL_CHART "shared_memory_calls"
+#define NETDATA_SHMGET_CHART "shmget_call"
+#define NETDATA_SHMAT_CHART "shmat_call"
+#define NETDATA_SHMDT_CHART "shmdt_call"
+#define NETDATA_SHMCTL_CHART "shmctl_call"
+
+// configuration file
+#define NETDATA_DIRECTORY_SHM_CONFIG_FILE "shm.conf"
+
+// Contexts
+#define NETDATA_CGROUP_SHM_GET_CONTEXT "cgroup.shmget"
+#define NETDATA_CGROUP_SHM_AT_CONTEXT "cgroup.shmat"
+#define NETDATA_CGROUP_SHM_DT_CONTEXT "cgroup.shmdt"
+#define NETDATA_CGROUP_SHM_CTL_CONTEXT "cgroup.shmctl"
+
+#define NETDATA_SYSTEMD_SHM_GET_CONTEXT "services.shmget"
+#define NETDATA_SYSTEMD_SHM_AT_CONTEXT "services.shmat"
+#define NETDATA_SYSTEMD_SHM_DT_CONTEXT "services.shmdt"
+#define NETDATA_SYSTEMD_SHM_CTL_CONTEXT "services.shmctl"
+
+typedef struct netdata_publish_shm {
+ uint64_t get;
+ uint64_t at;
+ uint64_t dt;
+ uint64_t ctl;
+} netdata_publish_shm_t;
+
+enum shm_tables {
+ NETDATA_PID_SHM_TABLE,
+ NETDATA_SHM_CONTROLLER,
+ NETDATA_SHM_GLOBAL_TABLE
+};
+
+enum shm_counters {
+ NETDATA_KEY_SHMGET_CALL,
+ NETDATA_KEY_SHMAT_CALL,
+ NETDATA_KEY_SHMDT_CALL,
+ NETDATA_KEY_SHMCTL_CALL,
+
+ // Keep this as last and don't skip numbers as it is used as element counter
+ NETDATA_SHM_END
+};
+
+extern netdata_publish_shm_t **shm_pid;
+
+extern void *ebpf_shm_thread(void *ptr);
+extern void ebpf_shm_create_apps_charts(struct ebpf_module *em, void *ptr);
+extern void clean_shm_pid_structures();
+
+extern struct config shm_config;
+
+#endif
diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c
index cbb4dded0..f7710ff22 100644
--- a/collectors/ebpf.plugin/ebpf_socket.c
+++ b/collectors/ebpf.plugin/ebpf_socket.c
@@ -11,31 +11,49 @@
*
*****************************************************************/
-static char *socket_dimension_names[NETDATA_MAX_SOCKET_VECTOR] = { "sent", "received", "close", "sent",
- "received", "retransmitted" };
-static char *socket_id_names[NETDATA_MAX_SOCKET_VECTOR] = { "tcp_sendmsg", "tcp_cleanup_rbuf", "tcp_close",
- "udp_sendmsg", "udp_recvmsg", "tcp_retransmit_skb" };
+static char *socket_dimension_names[NETDATA_MAX_SOCKET_VECTOR] = { "received", "sent", "close",
+ "received", "sent", "retransmitted" };
+static char *socket_id_names[NETDATA_MAX_SOCKET_VECTOR] = { "tcp_cleanup_rbuf", "tcp_sendmsg", "tcp_close",
+ "udp_recvmsg", "udp_sendmsg", "tcp_retransmit_skb" };
static ebpf_local_maps_t socket_maps[] = {{.name = "tbl_bandwidth",
.internal_input = NETDATA_COMPILED_CONNECTIONS_ALLOWED,
- .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED},
+ .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED,
+ .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_global_sock",
+ .internal_input = NETDATA_SOCKET_COUNTER,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_lports",
+ .internal_input = NETDATA_SOCKET_COUNTER,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
{.name = "tbl_conn_ipv4",
.internal_input = NETDATA_COMPILED_CONNECTIONS_ALLOWED,
- .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED},
+ .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED,
+ .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
{.name = "tbl_conn_ipv6",
.internal_input = NETDATA_COMPILED_CONNECTIONS_ALLOWED,
- .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED},
- {.name = "tbl_nv_udp_conn_stats",
+ .user_input = NETDATA_MAXIMUM_CONNECTIONS_ALLOWED,
+ .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_nv_udp",
.internal_input = NETDATA_COMPILED_UDP_CONNECTIONS_ALLOWED,
- .user_input = NETDATA_MAXIMUM_UDP_CONNECTIONS_ALLOWED},
+ .user_input = NETDATA_MAXIMUM_UDP_CONNECTIONS_ALLOWED,
+ .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "socket_ctrl", .internal_input = NETDATA_CONTROLLER_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
{.name = NULL, .internal_input = 0, .user_input = 0}};
static netdata_idx_t *socket_hash_values = NULL;
static netdata_syscall_stat_t socket_aggregated_data[NETDATA_MAX_SOCKET_VECTOR];
static netdata_publish_syscall_t socket_publish_aggregated[NETDATA_MAX_SOCKET_VECTOR];
-static ebpf_data_t socket_data;
-
ebpf_socket_publish_apps_t **socket_bandwidth_curr = NULL;
static ebpf_bandwidth_t *bandwidth_vector = NULL;
@@ -50,7 +68,6 @@ netdata_socket_t *socket_values;
ebpf_network_viewer_port_list_t *listen_ports = NULL;
-static int *map_fd = NULL;
static struct bpf_object *objects = NULL;
static struct bpf_link **probe_links = NULL;
@@ -277,7 +294,7 @@ static void ebpf_socket_send_nv_data(netdata_vector_plot_t *ptr)
}
/**
- * Send data to Netdata calling auxiliar functions.
+ * Send data to Netdata calling auxiliary functions.
*
* @param em the structure with thread information
*/
@@ -287,32 +304,26 @@ static void ebpf_socket_send_data(ebpf_module_t *em)
netdata_publish_vfs_common_t common_udp;
ebpf_update_global_publish(socket_publish_aggregated, &common_tcp, &common_udp, socket_aggregated_data);
- // We read bytes from function arguments, but bandiwdth is given in bits,
+ // We read bytes from function arguments, but bandwidth is given in bits,
// so we need to multiply by 8 to convert for the final value.
- write_count_chart(
- NETDATA_TCP_FUNCTION_COUNT, NETDATA_EBPF_FAMILY, socket_publish_aggregated, 3);
- write_io_chart(
- NETDATA_TCP_FUNCTION_BITS, NETDATA_EBPF_FAMILY, socket_id_names[0], common_tcp.write*8/1000,
- socket_id_names[1], common_tcp.read*8/1000);
+ write_count_chart(NETDATA_TCP_FUNCTION_COUNT, NETDATA_EBPF_IP_FAMILY, socket_publish_aggregated, 3);
+ write_io_chart(NETDATA_TCP_FUNCTION_BITS, NETDATA_EBPF_IP_FAMILY, socket_id_names[0],
+ common_tcp.read * 8/BITS_IN_A_KILOBIT, socket_id_names[1],
+ common_tcp.write * 8/BITS_IN_A_KILOBIT);
if (em->mode < MODE_ENTRY) {
- write_err_chart(
- NETDATA_TCP_FUNCTION_ERROR, NETDATA_EBPF_FAMILY, socket_publish_aggregated, 2);
- }
- write_count_chart(
- NETDATA_TCP_RETRANSMIT, NETDATA_EBPF_FAMILY, &socket_publish_aggregated[NETDATA_IDX_TCP_RETRANSMIT],
- 1);
-
- write_count_chart(
- NETDATA_UDP_FUNCTION_COUNT, NETDATA_EBPF_FAMILY, &socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF],
- 2);
- write_io_chart(
- NETDATA_UDP_FUNCTION_BITS, NETDATA_EBPF_FAMILY,
- socket_id_names[3],(long long)common_udp.write*8/100,
- socket_id_names[4], (long long)common_udp.read*8/1000);
+ write_err_chart(NETDATA_TCP_FUNCTION_ERROR, NETDATA_EBPF_IP_FAMILY, socket_publish_aggregated, 2);
+ }
+ write_count_chart(NETDATA_TCP_RETRANSMIT, NETDATA_EBPF_IP_FAMILY,
+ &socket_publish_aggregated[NETDATA_IDX_TCP_RETRANSMIT],1);
+
+ write_count_chart(NETDATA_UDP_FUNCTION_COUNT, NETDATA_EBPF_IP_FAMILY,
+ &socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF],2);
+ write_io_chart(NETDATA_UDP_FUNCTION_BITS, NETDATA_EBPF_IP_FAMILY,
+ socket_id_names[3], (long long)common_udp.read * 8/BITS_IN_A_KILOBIT,
+ socket_id_names[4], (long long)common_udp.write * 8/BITS_IN_A_KILOBIT);
if (em->mode < MODE_ENTRY) {
- write_err_chart(
- NETDATA_UDP_FUNCTION_ERROR, NETDATA_EBPF_FAMILY, &socket_publish_aggregated[NETDATA_UDP_START],
- 2);
+ write_err_chart(NETDATA_UDP_FUNCTION_ERROR, NETDATA_EBPF_IP_FAMILY,
+ &socket_publish_aggregated[NETDATA_UDP_START], 2);
}
}
@@ -342,7 +353,7 @@ long long ebpf_socket_sum_values_for_pids(struct pid_on_target *root, size_t off
}
/**
- * Send data to Netdata calling auxiliar functions.
+ * Send data to Netdata calling auxiliary functions.
*
* @param em the structure with thread information
* @param root the target list.
@@ -445,88 +456,88 @@ void ebpf_socket_send_apps_data(ebpf_module_t *em, struct target *root)
*/
static void ebpf_create_global_charts(ebpf_module_t *em)
{
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ ebpf_create_chart(NETDATA_EBPF_IP_FAMILY,
NETDATA_TCP_FUNCTION_COUNT,
"Calls to internal functions",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SOCKET_GROUP,
+ NETDATA_SOCKET_KERNEL_FUNCTIONS,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
21070,
ebpf_create_global_dimension,
socket_publish_aggregated,
- 3);
+ 3, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
- ebpf_create_chart(NETDATA_EBPF_FAMILY, NETDATA_TCP_FUNCTION_BITS,
+ ebpf_create_chart(NETDATA_EBPF_IP_FAMILY, NETDATA_TCP_FUNCTION_BITS,
"TCP bandwidth", EBPF_COMMON_DIMENSION_BITS,
- NETDATA_SOCKET_GROUP,
+ NETDATA_SOCKET_KERNEL_FUNCTIONS,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
21071,
ebpf_create_global_dimension,
socket_publish_aggregated,
- 3);
+ 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ ebpf_create_chart(NETDATA_EBPF_IP_FAMILY,
NETDATA_TCP_FUNCTION_ERROR,
"TCP errors",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SOCKET_GROUP,
+ NETDATA_SOCKET_KERNEL_FUNCTIONS,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
21072,
ebpf_create_global_dimension,
socket_publish_aggregated,
- 2);
+ 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
}
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ ebpf_create_chart(NETDATA_EBPF_IP_FAMILY,
NETDATA_TCP_RETRANSMIT,
"Packages retransmitted",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SOCKET_GROUP,
+ NETDATA_SOCKET_KERNEL_FUNCTIONS,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
21073,
ebpf_create_global_dimension,
&socket_publish_aggregated[NETDATA_IDX_TCP_RETRANSMIT],
- 1);
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ ebpf_create_chart(NETDATA_EBPF_IP_FAMILY,
NETDATA_UDP_FUNCTION_COUNT,
"UDP calls",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SOCKET_GROUP,
+ NETDATA_SOCKET_KERNEL_FUNCTIONS,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
21074,
ebpf_create_global_dimension,
&socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF],
- 2);
+ 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
- ebpf_create_chart(NETDATA_EBPF_FAMILY, NETDATA_UDP_FUNCTION_BITS,
+ ebpf_create_chart(NETDATA_EBPF_IP_FAMILY, NETDATA_UDP_FUNCTION_BITS,
"UDP bandwidth", EBPF_COMMON_DIMENSION_BITS,
- NETDATA_SOCKET_GROUP,
+ NETDATA_SOCKET_KERNEL_FUNCTIONS,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
21075,
ebpf_create_global_dimension,
&socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF],
- 2);
+ 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
if (em->mode < MODE_ENTRY) {
- ebpf_create_chart(NETDATA_EBPF_FAMILY,
+ ebpf_create_chart(NETDATA_EBPF_IP_FAMILY,
NETDATA_UDP_FUNCTION_ERROR,
"UDP errors",
EBPF_COMMON_DIMENSION_CALL,
- NETDATA_SOCKET_GROUP,
+ NETDATA_SOCKET_KERNEL_FUNCTIONS,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
21076,
ebpf_create_global_dimension,
&socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF],
- 2);
+ 2, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
}
}
@@ -540,7 +551,6 @@ static void ebpf_create_global_charts(ebpf_module_t *em)
*/
void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr)
{
- UNUSED(em);
struct target *root = ptr;;
ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_SENT,
"Bytes sent", EBPF_COMMON_DIMENSION_BITS,
@@ -548,7 +558,7 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_CHART_TYPE_STACKED,
20080,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_RECV,
"bytes received", EBPF_COMMON_DIMENSION_BITS,
@@ -556,7 +566,7 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_CHART_TYPE_STACKED,
20081,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS,
"Calls for tcp_sendmsg",
@@ -565,7 +575,7 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_CHART_TYPE_STACKED,
20082,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS,
"Calls for tcp_cleanup_rbuf",
@@ -574,7 +584,7 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_CHART_TYPE_STACKED,
20083,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT,
"Calls for tcp_retransmit",
@@ -583,7 +593,7 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_CHART_TYPE_STACKED,
20084,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS,
"Calls for udp_sendmsg",
@@ -592,7 +602,7 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_CHART_TYPE_STACKED,
20085,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
ebpf_create_charts_on_apps(NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS,
"Calls for udp_recvmsg",
@@ -601,7 +611,7 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr)
NETDATA_EBPF_CHART_TYPE_STACKED,
20086,
ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
- root);
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_SOCKET);
socket_apps_created = 1;
}
@@ -611,15 +621,16 @@ void ebpf_socket_create_apps_charts(struct ebpf_module *em, void *ptr)
*
* Create common charts.
*
- * @param id the chart id
- * @param title the chart title
- * @param units the units label
- * @param family the group name used to attach the chart on dashboard
- * @param order the chart order
- * @param ptr the plot structure with values.
+ * @param id chart id
+ * @param title chart title
+ * @param units units label
+ * @param family group name used to attach the chart on dashboard
+ * @param order chart order
+ * @param update_every value to overwrite the update frequency set by the server.
+ * @param ptr plot structure with values.
*/
static void ebpf_socket_create_nv_chart(char *id, char *title, char *units,
- char *family, int order, netdata_vector_plot_t *ptr)
+ char *family, int order, int update_every, netdata_vector_plot_t *ptr)
{
ebpf_write_chart_cmd(NETDATA_EBPF_FAMILY,
id,
@@ -628,7 +639,9 @@ static void ebpf_socket_create_nv_chart(char *id, char *title, char *units,
family,
NETDATA_EBPF_CHART_TYPE_STACKED,
NULL,
- order);
+ order,
+ update_every,
+ NETDATA_EBPF_MODULE_NAME_SOCKET);
uint32_t i;
uint32_t end = ptr->last_plot;
@@ -653,10 +666,11 @@ static void ebpf_socket_create_nv_chart(char *id, char *title, char *units,
* @param units the units label
* @param family the group name used to attach the chart on dashboard
* @param order the chart order
+ * @param update_every value to overwrite the update frequency set by the server.
* @param ptr the plot structure with values.
*/
static void ebpf_socket_create_nv_retransmit(char *id, char *title, char *units,
- char *family, int order, netdata_vector_plot_t *ptr)
+ char *family, int order, int update_every, netdata_vector_plot_t *ptr)
{
ebpf_write_chart_cmd(NETDATA_EBPF_FAMILY,
id,
@@ -665,7 +679,9 @@ static void ebpf_socket_create_nv_retransmit(char *id, char *title, char *units,
family,
NETDATA_EBPF_CHART_TYPE_STACKED,
NULL,
- order);
+ order,
+ update_every,
+ NETDATA_EBPF_MODULE_NAME_SOCKET);
uint32_t i;
uint32_t end = ptr->last_plot;
@@ -684,8 +700,9 @@ static void ebpf_socket_create_nv_retransmit(char *id, char *title, char *units,
* Recreate the charts when new sockets are created.
*
* @param ptr a pointer for inbound or outbound vectors.
+ * @param update_every value to overwrite the update frequency set by the server.
*/
-static void ebpf_socket_create_nv_charts(netdata_vector_plot_t *ptr)
+static void ebpf_socket_create_nv_charts(netdata_vector_plot_t *ptr, int update_every)
{
// We do not have new sockets, so we do not need move forward
if (ptr->max_plot == ptr->last_plot)
@@ -698,34 +715,34 @@ static void ebpf_socket_create_nv_charts(netdata_vector_plot_t *ptr)
"Outbound connections (bytes).", EBPF_COMMON_DIMENSION_BYTES,
NETDATA_NETWORK_CONNECTIONS_GROUP,
21080,
- ptr);
+ update_every, ptr);
ebpf_socket_create_nv_chart(NETDATA_NV_OUTBOUND_PACKETS,
"Outbound connections (packets)",
EBPF_COMMON_DIMENSION_PACKETS,
NETDATA_NETWORK_CONNECTIONS_GROUP,
21082,
- ptr);
+ update_every, ptr);
ebpf_socket_create_nv_retransmit(NETDATA_NV_OUTBOUND_RETRANSMIT,
"Retransmitted packets",
EBPF_COMMON_DIMENSION_CALL,
NETDATA_NETWORK_CONNECTIONS_GROUP,
21083,
- ptr);
+ update_every, ptr);
} else {
ebpf_socket_create_nv_chart(NETDATA_NV_INBOUND_BYTES,
"Inbound connections (bytes)", EBPF_COMMON_DIMENSION_BYTES,
NETDATA_NETWORK_CONNECTIONS_GROUP,
21084,
- ptr);
+ update_every, ptr);
ebpf_socket_create_nv_chart(NETDATA_NV_INBOUND_PACKETS,
"Inbound connections (packets)",
EBPF_COMMON_DIMENSION_PACKETS,
NETDATA_NETWORK_CONNECTIONS_GROUP,
21085,
- ptr);
+ update_every, ptr);
}
ptr->flags |= NETWORK_VIEWER_CHARTS_CREATED;
@@ -1437,7 +1454,7 @@ static void read_listen_table()
uint16_t key = 0;
uint16_t next_key = 0;
- int fd = map_fd[NETDATA_SOCKET_LISTEN_TABLE];
+ int fd = socket_maps[NETDATA_SOCKET_LPORTS].map_fd;
uint8_t value;
while (bpf_map_get_next_key(fd, &key, &next_key) == 0) {
int test = bpf_map_lookup_elem(fd, &key, &value);
@@ -1475,9 +1492,9 @@ void *ebpf_socket_read_hash(void *ptr)
read_thread_closed = 0;
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = NETDATA_SOCKET_READ_SLEEP_MS * em->update_time;
- int fd_ipv4 = map_fd[NETDATA_SOCKET_IPV4_HASH_TABLE];
- int fd_ipv6 = map_fd[NETDATA_SOCKET_IPV6_HASH_TABLE];
+ usec_t step = NETDATA_SOCKET_READ_SLEEP_MS * em->update_every;
+ int fd_ipv4 = socket_maps[NETDATA_SOCKET_TABLE_IPV4].map_fd;
+ int fd_ipv6 = socket_maps[NETDATA_SOCKET_TABLE_IPV6].map_fd;
int network_connection = em->optional;
while (!close_ebpf_plugin) {
usec_t dt = heartbeat_next(&hb, step);
@@ -1504,12 +1521,12 @@ static void read_hash_global_tables()
netdata_idx_t res[NETDATA_SOCKET_COUNTER];
netdata_idx_t *val = socket_hash_values;
- int fd = map_fd[NETDATA_SOCKET_GLOBAL_HASH_TABLE];
+ int fd = socket_maps[NETDATA_SOCKET_GLOBAL].map_fd;
for (idx = 0; idx < NETDATA_SOCKET_COUNTER; idx++) {
if (!bpf_map_lookup_elem(fd, &idx, val)) {
uint64_t total = 0;
int i;
- int end = (running_on_kernel < NETDATA_KERNEL_V4_15) ? 1 : ebpf_nprocs;
+ int end = ebpf_nprocs;
for (i = 0; i < end; i++)
total += val[i];
@@ -1586,7 +1603,7 @@ void ebpf_socket_bandwidth_accumulator(ebpf_bandwidth_t *out)
*/
static void ebpf_socket_update_apps_data()
{
- int fd = map_fd[NETDATA_SOCKET_APPS_HASH_TABLE];
+ int fd = socket_maps[NETDATA_SOCKET_TABLE_BANDWIDTH].map_fd;
ebpf_bandwidth_t *eb = bandwidth_vector;
uint32_t key;
struct pid_stat *pids = root_of_pids;
@@ -1606,6 +1623,475 @@ static void ebpf_socket_update_apps_data()
}
}
+/**
+ * Update cgroup
+ *
+ * Update cgroup data based in
+ */
+static void ebpf_update_socket_cgroup()
+{
+ ebpf_cgroup_target_t *ect ;
+
+ ebpf_bandwidth_t *eb = bandwidth_vector;
+ int fd = socket_maps[NETDATA_SOCKET_TABLE_BANDWIDTH].map_fd;
+
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ struct pid_on_target2 *pids;
+ for (pids = ect->pids; pids; pids = pids->next) {
+ int pid = pids->pid;
+ ebpf_bandwidth_t *out = &pids->socket;
+ ebpf_socket_publish_apps_t *publish = &ect->publish_socket;
+ if (likely(socket_bandwidth_curr) && socket_bandwidth_curr[pid]) {
+ ebpf_socket_publish_apps_t *in = socket_bandwidth_curr[pid];
+
+ publish->bytes_sent = in->bytes_sent;
+ publish->bytes_received = in->bytes_received;
+ publish->call_tcp_sent = in->call_tcp_sent;
+ publish->call_tcp_received = in->call_tcp_received;
+ publish->retransmit = in->retransmit;
+ publish->call_udp_sent = in->call_udp_sent;
+ publish->call_udp_received = in->call_udp_received;
+ } else {
+ if (!bpf_map_lookup_elem(fd, &pid, eb)) {
+ ebpf_socket_bandwidth_accumulator(eb);
+
+ memcpy(out, eb, sizeof(ebpf_bandwidth_t));
+
+ publish->bytes_sent = out->bytes_sent;
+ publish->bytes_received = out->bytes_received;
+ publish->call_tcp_sent = out->call_tcp_sent;
+ publish->call_tcp_received = out->call_tcp_received;
+ publish->retransmit = out->retransmit;
+ publish->call_udp_sent = out->call_udp_sent;
+ publish->call_udp_received = out->call_udp_received;
+ }
+ }
+ }
+ }
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
+/**
+ * Sum PIDs
+ *
+ * Sum values for all targets.
+ *
+ * @param fd structure used to store data
+ * @param pids input data
+ */
+static void ebpf_socket_sum_cgroup_pids(ebpf_socket_publish_apps_t *socket, struct pid_on_target2 *pids)
+{
+ ebpf_socket_publish_apps_t accumulator;
+ memset(&accumulator, 0, sizeof(accumulator));
+
+ while (pids) {
+ ebpf_bandwidth_t *w = &pids->socket;
+
+ accumulator.bytes_received += w->bytes_received;
+ accumulator.bytes_sent += w->bytes_sent;
+ accumulator.call_tcp_received += w->call_tcp_received;
+ accumulator.call_tcp_sent += w->call_tcp_sent;
+ accumulator.retransmit += w->retransmit;
+ accumulator.call_udp_received += w->call_udp_received;
+ accumulator.call_udp_sent += w->call_udp_sent;
+
+ pids = pids->next;
+ }
+
+ socket->bytes_sent = (accumulator.bytes_sent >= socket->bytes_sent) ? accumulator.bytes_sent : socket->bytes_sent;
+ socket->bytes_received = (accumulator.bytes_received >= socket->bytes_received) ? accumulator.bytes_received : socket->bytes_received;
+ socket->call_tcp_sent = (accumulator.call_tcp_sent >= socket->call_tcp_sent) ? accumulator.call_tcp_sent : socket->call_tcp_sent;
+ socket->call_tcp_received = (accumulator.call_tcp_received >= socket->call_tcp_received) ? accumulator.call_tcp_received : socket->call_tcp_received;
+ socket->retransmit = (accumulator.retransmit >= socket->retransmit) ? accumulator.retransmit : socket->retransmit;
+ socket->call_udp_sent = (accumulator.call_udp_sent >= socket->call_udp_sent) ? accumulator.call_udp_sent : socket->call_udp_sent;
+ socket->call_udp_received = (accumulator.call_udp_received >= socket->call_udp_received) ? accumulator.call_udp_received : socket->call_udp_received;
+}
+
+/**
+ * Create specific socket charts
+ *
+ * Create charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_create_specific_socket_charts(char *type, int update_every)
+{
+ ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_RECV,
+ "Bytes received",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP,
+ NETDATA_CGROUP_SOCKET_BYTES_RECV_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5300,
+ ebpf_create_global_dimension,
+ &socket_publish_aggregated[NETDATA_IDX_TCP_CLEANUP_RBUF], 1,
+ update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_SENT,
+ "Bytes sent",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP,
+ NETDATA_CGROUP_SOCKET_BYTES_SEND_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5301,
+ ebpf_create_global_dimension,
+ socket_publish_aggregated, 1,
+ update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS,
+ "Calls to tcp_cleanup_rbuf.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP,
+ NETDATA_CGROUP_SOCKET_TCP_RECV_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5302,
+ ebpf_create_global_dimension,
+ &socket_publish_aggregated[NETDATA_IDX_TCP_CLEANUP_RBUF], 1,
+ update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS,
+ "Calls to tcp_sendmsg.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP,
+ NETDATA_CGROUP_SOCKET_TCP_SEND_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5303,
+ ebpf_create_global_dimension,
+ socket_publish_aggregated, 1,
+ update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT,
+ "Calls to tcp_retransmit.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP,
+ NETDATA_CGROUP_SOCKET_TCP_RETRANSMIT_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5304,
+ ebpf_create_global_dimension,
+ &socket_publish_aggregated[NETDATA_IDX_TCP_RETRANSMIT], 1,
+ update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS,
+ "Calls to udp_sendmsg",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP,
+ NETDATA_CGROUP_SOCKET_UDP_SEND_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5305,
+ ebpf_create_global_dimension,
+ &socket_publish_aggregated[NETDATA_IDX_UDP_SENDMSG], 1,
+ update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ ebpf_create_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS,
+ "Calls to udp_recvmsg",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_CGROUP_NET_GROUP,
+ NETDATA_CGROUP_SOCKET_UDP_RECV_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5306,
+ ebpf_create_global_dimension,
+ &socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF], 1,
+ update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+}
+
+/**
+ * Obsolete specific socket charts
+ *
+ * Obsolete charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_obsolete_specific_socket_charts(char *type, int update_every)
+{
+ ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_RECV, "Bytes received",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5300, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_SENT,"Bytes sent",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5301, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS, "Calls to tcp_cleanup_rbuf.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5302, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS, "Calls to tcp_sendmsg.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5303, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT, "Calls to tcp_retransmit.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5304, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS, "Calls to udp_sendmsg",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5305, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS, "Calls to udp_recvmsg",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_APPS_NET_GROUP, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5306, update_every);
+}
+
+/*
+ * Send Specific Swap data
+ *
+ * Send data for specific cgroup/apps.
+ *
+ * @param type chart type
+ * @param values structure with values that will be sent to netdata
+ */
+static void ebpf_send_specific_socket_data(char *type, ebpf_socket_publish_apps_t *values)
+{
+ write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_SENT);
+ write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_SENDMSG].name,
+ (long long) values->bytes_sent);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_RECV);
+ write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_CLEANUP_RBUF].name,
+ (long long) values->bytes_received);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS);
+ write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_SENDMSG].name,
+ (long long) values->call_tcp_sent);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS);
+ write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_CLEANUP_RBUF].name,
+ (long long) values->call_tcp_received);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT);
+ write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_TCP_RETRANSMIT].name,
+ (long long) values->retransmit);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS);
+ write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_UDP_SENDMSG].name,
+ (long long) values->call_udp_sent);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS);
+ write_chart_dimension(socket_publish_aggregated[NETDATA_IDX_UDP_RECVBUF].name,
+ (long long) values->call_udp_received);
+ write_end_chart();
+}
+
+/**
+ * Create Systemd Socket Charts
+ *
+ * Create charts when systemd is enabled
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+ **/
+static void ebpf_create_systemd_socket_charts(int update_every)
+{
+ ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_RECV,
+ "Bytes received", EBPF_COMMON_DIMENSION_BITS,
+ NETDATA_APPS_NET_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20080,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
+ update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_SENT,
+ "Bytes sent", EBPF_COMMON_DIMENSION_BITS,
+ NETDATA_APPS_NET_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20081,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
+ update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS,
+ "Calls to tcp_cleanup_rbuf.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_NET_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20082,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
+ update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS,
+ "Calls to tcp_sendmsg.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_NET_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20083,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
+ update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT,
+ "Calls to tcp_retransmit",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_NET_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20084,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
+ update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS,
+ "Calls to udp_sendmsg",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_NET_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20085,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
+ update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS,
+ "Calls to udp_recvmsg",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_APPS_NET_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20086,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT, NETDATA_EBPF_MODULE_NAME_SOCKET,
+ update_every);
+}
+
+/**
+ * Send Systemd charts
+ *
+ * Send collected data to Netdata.
+ *
+ * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned
+ * otherwise function returns 1 to avoid chart recreation
+ */
+static int ebpf_send_systemd_socket_charts()
+{
+ int ret = 1;
+ ebpf_cgroup_target_t *ect;
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_SENT);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long)ect->publish_socket.bytes_sent);
+ } else
+ ret = 0;
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_RECV);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long)ect->publish_socket.bytes_received);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_SEND_CALLS);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_sent);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RECV_CALLS);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long)ect->publish_socket.call_tcp_received);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_TCP_RETRANSMIT);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long)ect->publish_socket.retransmit);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_SEND_CALLS);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long)ect->publish_socket.call_udp_sent);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_NET_APPS_BANDWIDTH_UDP_RECV_CALLS);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long)ect->publish_socket.call_udp_received);
+ }
+ }
+ write_end_chart();
+
+ return ret;
+}
+
+/**
+ * Update Cgroup algorithm
+ *
+ * Change algorithm from absolute to incremental
+ */
+void ebpf_socket_update_cgroup_algorithm()
+{
+ int i;
+ for (i = 0; i < NETDATA_MAX_SOCKET_VECTOR; i++) {
+ netdata_publish_syscall_t *ptr = &socket_publish_aggregated[i];
+ freez(ptr->algorithm);
+ ptr->algorithm = strdupz(ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
+ }
+}
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+*/
+static void ebpf_socket_send_cgroup_data(int update_every)
+{
+ if (!ebpf_cgroup_pids)
+ return;
+
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ ebpf_cgroup_target_t *ect;
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ ebpf_socket_sum_cgroup_pids(&ect->publish_socket, ect->pids);
+ }
+
+ int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
+ if (has_systemd) {
+ static int systemd_charts = 0;
+ if (!systemd_charts) {
+ ebpf_create_systemd_socket_charts(update_every);
+ systemd_charts = 1;
+ }
+ systemd_charts = ebpf_send_systemd_socket_charts();
+ }
+
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (ect->systemd)
+ continue;
+
+ if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART)) {
+ ebpf_create_specific_socket_charts(ect->name, update_every);
+ ect->flags |= NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART;
+ }
+
+ if (ect->flags & NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART && ect->updated) {
+ ebpf_send_specific_socket_data(ect->name, &ect->publish_socket);
+ } else {
+ ebpf_obsolete_specific_socket_charts(ect->name, update_every);
+ ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_SOCKET_CHART;
+ }
+ }
+
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
/*****************************************************************
*
* FUNCTIONS WITH THE MAIN LOOP
@@ -1624,7 +2110,6 @@ struct netdata_static_thread socket_threads = {"EBPF SOCKET READ",
*/
static void socket_collector(usec_t step, ebpf_module_t *em)
{
- UNUSED(em);
UNUSED(step);
heartbeat_t hb;
heartbeat_init(&hb);
@@ -1634,49 +2119,63 @@ static void socket_collector(usec_t step, ebpf_module_t *em)
netdata_thread_create(socket_threads.thread, socket_threads.name,
NETDATA_THREAD_OPTION_JOINABLE, ebpf_socket_read_hash, em);
+ int cgroups = em->cgroup_charts;
+ if (cgroups)
+ ebpf_socket_update_cgroup_algorithm();
+
int socket_apps_enabled = ebpf_modules[EBPF_MODULE_SOCKET_IDX].apps_charts;
int socket_global_enabled = ebpf_modules[EBPF_MODULE_SOCKET_IDX].global_charts;
int network_connection = em->optional;
+ int update_every = em->update_every;
+ int counter = update_every - 1;
while (!close_ebpf_plugin) {
pthread_mutex_lock(&collect_data_mutex);
pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
- if (socket_global_enabled)
- read_hash_global_tables();
+ if (++counter == update_every) {
+ counter = 0;
+ if (socket_global_enabled)
+ read_hash_global_tables();
- if (socket_apps_enabled)
- ebpf_socket_update_apps_data();
+ if (socket_apps_enabled)
+ ebpf_socket_update_apps_data();
- calculate_nv_plot();
+ if (cgroups)
+ ebpf_update_socket_cgroup();
- pthread_mutex_lock(&lock);
- if (socket_global_enabled)
- ebpf_socket_send_data(em);
+ calculate_nv_plot();
- if (socket_apps_enabled)
- ebpf_socket_send_apps_data(em, apps_groups_root_target);
+ pthread_mutex_lock(&lock);
+ if (socket_global_enabled)
+ ebpf_socket_send_data(em);
- fflush(stdout);
+ if (socket_apps_enabled)
+ ebpf_socket_send_apps_data(em, apps_groups_root_target);
- if (network_connection) {
- // We are calling fflush many times, because when we have a lot of dimensions
- // we began to have not expected outputs and Netdata closed the plugin.
- pthread_mutex_lock(&nv_mutex);
- ebpf_socket_create_nv_charts(&inbound_vectors);
- fflush(stdout);
- ebpf_socket_send_nv_data(&inbound_vectors);
+ if (cgroups)
+ ebpf_socket_send_cgroup_data(update_every);
- ebpf_socket_create_nv_charts(&outbound_vectors);
fflush(stdout);
- ebpf_socket_send_nv_data(&outbound_vectors);
- wait_to_plot = 0;
- pthread_mutex_unlock(&nv_mutex);
+ if (network_connection) {
+ // We are calling fflush many times, because when we have a lot of dimensions
+ // we began to have not expected outputs and Netdata closed the plugin.
+ pthread_mutex_lock(&nv_mutex);
+ ebpf_socket_create_nv_charts(&inbound_vectors, update_every);
+ fflush(stdout);
+ ebpf_socket_send_nv_data(&inbound_vectors);
+
+ ebpf_socket_create_nv_charts(&outbound_vectors, update_every);
+ fflush(stdout);
+ ebpf_socket_send_nv_data(&outbound_vectors);
+ wait_to_plot = 0;
+ pthread_mutex_unlock(&nv_mutex);
+
+ }
+ pthread_mutex_unlock(&lock);
}
pthread_mutex_unlock(&collect_data_mutex);
- pthread_mutex_unlock(&lock);
-
}
}
@@ -1885,17 +2384,18 @@ static void ebpf_socket_cleanup(void *ptr)
clean_hostnames(network_viewer_opt.excluded_hostnames);
pthread_mutex_destroy(&nv_mutex);
- freez(socket_data.map_fd);
freez(socket_threads.thread);
- struct bpf_program *prog;
- size_t i = 0 ;
- bpf_object__for_each_program(prog, objects) {
- bpf_link__destroy(probe_links[i]);
- i++;
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
}
- bpf_object__close(objects);
finalized_threads = 1;
}
@@ -1910,15 +2410,17 @@ static void ebpf_socket_cleanup(void *ptr)
* We are not testing the return, because callocz does this and shutdown the software
* case it was not possible to allocate.
*
- * @param length is the length for the vectors used inside the collector.
+ * @param apps is apps enabled?
*/
-static void ebpf_socket_allocate_global_vectors(size_t length)
+static void ebpf_socket_allocate_global_vectors(int apps)
{
- memset(socket_aggregated_data, 0 ,length * sizeof(netdata_syscall_stat_t));
- memset(socket_publish_aggregated, 0 ,length * sizeof(netdata_publish_syscall_t));
+ memset(socket_aggregated_data, 0 ,NETDATA_MAX_SOCKET_VECTOR * sizeof(netdata_syscall_stat_t));
+ memset(socket_publish_aggregated, 0 ,NETDATA_MAX_SOCKET_VECTOR * sizeof(netdata_publish_syscall_t));
socket_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
- socket_bandwidth_curr = callocz((size_t)pid_max, sizeof(ebpf_socket_publish_apps_t *));
+ if (apps)
+ socket_bandwidth_curr = callocz((size_t)pid_max, sizeof(ebpf_socket_publish_apps_t *));
+
bandwidth_vector = callocz((size_t)ebpf_nprocs, sizeof(ebpf_bandwidth_t));
socket_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_socket_t));
@@ -1927,14 +2429,6 @@ static void ebpf_socket_allocate_global_vectors(size_t length)
}
/**
- * Set local function pointers, this function will never be compiled with static libraries
- */
-static void set_local_pointers()
-{
- map_fd = socket_data.map_fd;
-}
-
-/**
* Initialize Inbound and Outbound
*
* Initialize the common outbound and inbound sockets.
@@ -2860,9 +3354,7 @@ void *ebpf_socket_thread(void *ptr)
ebpf_module_t *em = (ebpf_module_t *)ptr;
em->maps = socket_maps;
- fill_ebpf_data(&socket_data);
- ebpf_update_module(em, &socket_config, NETDATA_NETWORK_CONFIG_FILE);
parse_network_viewer_section(&socket_config);
parse_service_name_section(&socket_config);
parse_table_size_options(&socket_config);
@@ -2876,16 +3368,13 @@ void *ebpf_socket_thread(void *ptr)
}
pthread_mutex_lock(&lock);
- ebpf_socket_allocate_global_vectors(NETDATA_MAX_SOCKET_VECTOR);
+ ebpf_socket_allocate_global_vectors(em->apps_charts);
initialize_inbound_outbound();
- if (ebpf_update_kernel(&socket_data)) {
- pthread_mutex_unlock(&lock);
- goto endsocket;
- }
+ if (running_on_kernel < NETDATA_EBPF_KERNEL_5_0)
+ em->mode = MODE_ENTRY;
- set_local_pointers();
- probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects, socket_data.map_fd);
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
if (!probe_links) {
pthread_mutex_unlock(&lock);
goto endsocket;
@@ -2904,7 +3393,7 @@ void *ebpf_socket_thread(void *ptr)
finalized_threads = 0;
pthread_mutex_unlock(&lock);
- socket_collector((usec_t)(em->update_time * USEC_PER_SEC), em);
+ socket_collector((usec_t)(em->update_every * USEC_PER_SEC), em);
endsocket:
netdata_thread_cleanup_pop(1);
diff --git a/collectors/ebpf.plugin/ebpf_socket.h b/collectors/ebpf.plugin/ebpf_socket.h
index 8dd422507..e3c000c76 100644
--- a/collectors/ebpf.plugin/ebpf_socket.h
+++ b/collectors/ebpf.plugin/ebpf_socket.h
@@ -4,15 +4,11 @@
#include <stdint.h>
#include "libnetdata/avl/avl.h"
+// Module name
+#define NETDATA_EBPF_MODULE_NAME_SOCKET "socket"
+
// Vector indexes
#define NETDATA_UDP_START 3
-#define NETDATA_RETRANSMIT_START 5
-
-#define NETDATA_SOCKET_APPS_HASH_TABLE 0
-#define NETDATA_SOCKET_IPV4_HASH_TABLE 1
-#define NETDATA_SOCKET_IPV6_HASH_TABLE 2
-#define NETDATA_SOCKET_GLOBAL_HASH_TABLE 4
-#define NETDATA_SOCKET_LISTEN_TABLE 5
#define NETDATA_SOCKET_READ_SLEEP_MS 800000ULL
@@ -32,9 +28,12 @@
enum ebpf_socket_table_list {
NETDATA_SOCKET_TABLE_BANDWIDTH,
+ NETDATA_SOCKET_GLOBAL,
+ NETDATA_SOCKET_LPORTS,
NETDATA_SOCKET_TABLE_IPV4,
NETDATA_SOCKET_TABLE_IPV6,
- NETDATA_SOCKET_TABLE_UDP
+ NETDATA_SOCKET_TABLE_UDP,
+ NETDATA_SOCKET_TABLE_CTRL
};
enum ebpf_socket_publish_index {
@@ -74,8 +73,9 @@ typedef enum ebpf_socket_idx {
NETDATA_SOCKET_COUNTER
} ebpf_socket_index_t;
-#define NETDATA_SOCKET_GROUP "Socket"
-#define NETDATA_NETWORK_CONNECTIONS_GROUP "Network connections"
+#define NETDATA_SOCKET_KERNEL_FUNCTIONS "kernel"
+#define NETDATA_NETWORK_CONNECTIONS_GROUP "network connections"
+#define NETDATA_CGROUP_NET_GROUP "network (eBPF)"
// Global chart name
#define NETDATA_TCP_FUNCTION_COUNT "tcp_functions"
@@ -113,6 +113,23 @@ typedef enum ebpf_socket_idx {
#define NETDATA_MINIMUM_IPV4_CIDR 0
#define NETDATA_MAXIMUM_IPV4_CIDR 32
+// Contexts
+#define NETDATA_CGROUP_SOCKET_BYTES_RECV_CONTEXT "cgroup.net_bytes_recv"
+#define NETDATA_CGROUP_SOCKET_BYTES_SEND_CONTEXT "cgroup.net_bytes_send"
+#define NETDATA_CGROUP_SOCKET_TCP_RECV_CONTEXT "cgroup.net_tcp_recv"
+#define NETDATA_CGROUP_SOCKET_TCP_SEND_CONTEXT "cgroup.net_tcp_send"
+#define NETDATA_CGROUP_SOCKET_TCP_RETRANSMIT_CONTEXT "cgroup.net_retransmit"
+#define NETDATA_CGROUP_SOCKET_UDP_RECV_CONTEXT "cgroup.net_udp_recv"
+#define NETDATA_CGROUP_SOCKET_UDP_SEND_CONTEXT "cgroup.net_udp_send"
+
+#define NETDATA_SERVICES_SOCKET_BYTES_RECV_CONTEXT "services.net_bytes_recv"
+#define NETDATA_SERVICES_SOCKET_BYTES_SEND_CONTEXT "services.net_bytes_send"
+#define NETDATA_SERVICES_SOCKET_TCP_RECV_CONTEXT "services.net_tcp_recv"
+#define NETDATA_SERVICES_SOCKET_TCP_SEND_CONTEXT "services.net_tcp_send"
+#define NETDATA_SERVICES_SOCKET_TCP_RETRANSMIT_CONTEXT "services.net_retransmit"
+#define NETDATA_SERVICES_SOCKET_UDP_RECV_CONTEXT "services.net_udp_recv"
+#define NETDATA_SERVICES_SOCKET_UDP_SEND_CONTEXT "services.net_udp_send"
+
typedef struct ebpf_socket_publish_apps {
// Data read
uint64_t bytes_sent; // Bytes sent
@@ -312,5 +329,6 @@ extern void parse_service_name_section(struct config *cfg);
extern void clean_socket_apps_structures();
extern ebpf_socket_publish_apps_t **socket_bandwidth_curr;
+extern struct config socket_config;
#endif
diff --git a/collectors/ebpf.plugin/ebpf_softirq.c b/collectors/ebpf.plugin/ebpf_softirq.c
new file mode 100644
index 000000000..119c1222a
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_softirq.c
@@ -0,0 +1,273 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ebpf.h"
+#include "ebpf_softirq.h"
+
+struct config softirq_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+#define SOFTIRQ_MAP_LATENCY 0
+static ebpf_local_maps_t softirq_maps[] = {
+ {
+ .name = "tbl_softirq",
+ .internal_input = NETDATA_SOFTIRQ_MAX_IRQS,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ },
+ /* end */
+ {
+ .name = NULL,
+ .internal_input = 0,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED
+ }
+};
+
+#define SOFTIRQ_TP_CLASS_IRQ "irq"
+static ebpf_tracepoint_t softirq_tracepoints[] = {
+ {.enabled = false, .class = SOFTIRQ_TP_CLASS_IRQ, .event = "softirq_entry"},
+ {.enabled = false, .class = SOFTIRQ_TP_CLASS_IRQ, .event = "softirq_exit"},
+ /* end */
+ {.enabled = false, .class = NULL, .event = NULL}
+};
+
+// these must be in the order defined by the kernel:
+// https://elixir.bootlin.com/linux/v5.12.19/source/include/trace/events/irq.h#L13
+static softirq_val_t softirq_vals[] = {
+ {.name = "HI", .latency = 0},
+ {.name = "TIMER", .latency = 0},
+ {.name = "NET_TX", .latency = 0},
+ {.name = "NET_RX", .latency = 0},
+ {.name = "BLOCK", .latency = 0},
+ {.name = "IRQ_POLL", .latency = 0},
+ {.name = "TASKLET", .latency = 0},
+ {.name = "SCHED", .latency = 0},
+ {.name = "HRTIMER", .latency = 0},
+ {.name = "RCU", .latency = 0},
+};
+
+// tmp store for soft IRQ values we get from a per-CPU eBPF map.
+static softirq_ebpf_val_t *softirq_ebpf_vals = NULL;
+
+static struct bpf_link **probe_links = NULL;
+static struct bpf_object *objects = NULL;
+
+static int read_thread_closed = 1;
+
+static struct netdata_static_thread softirq_threads = {"SOFTIRQ KERNEL",
+ NULL, NULL, 1, NULL,
+ NULL, NULL };
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void softirq_cleanup(void *ptr)
+{
+ for (int i = 0; softirq_tracepoints[i].class != NULL; i++) {
+ ebpf_disable_tracepoint(&softirq_tracepoints[i]);
+ }
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled) {
+ return;
+ }
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 1 * USEC_PER_MS;
+ while (!read_thread_closed) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ freez(softirq_ebpf_vals);
+ freez(softirq_threads.thread);
+
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+ }
+}
+
+/*****************************************************************
+ * MAIN LOOP
+ *****************************************************************/
+
+static void softirq_read_latency_map()
+{
+ int fd = softirq_maps[SOFTIRQ_MAP_LATENCY].map_fd;
+ int i;
+ for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
+ int test = bpf_map_lookup_elem(fd, &i, softirq_ebpf_vals);
+ if (unlikely(test < 0)) {
+ continue;
+ }
+
+ uint64_t total_latency = 0;
+ int cpu_i;
+ int end = ebpf_nprocs;
+ for (cpu_i = 0; cpu_i < end; cpu_i++) {
+ total_latency += softirq_ebpf_vals[cpu_i].latency/1000;
+ }
+
+ softirq_vals[i].latency = total_latency;
+ }
+}
+
+/**
+ * Read eBPF maps for soft IRQ.
+ */
+static void *softirq_reader(void *ptr)
+{
+ read_thread_closed = 0;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+
+ usec_t step = NETDATA_SOFTIRQ_SLEEP_MS * em->update_every;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ UNUSED(dt);
+
+ softirq_read_latency_map();
+ }
+
+ read_thread_closed = 1;
+ return NULL;
+}
+
+static void softirq_create_charts(int update_every)
+{
+ ebpf_create_chart(
+ NETDATA_EBPF_SYSTEM_GROUP,
+ "softirq_latency",
+ "Software IRQ latency",
+ EBPF_COMMON_DIMENSION_MILLISECONDS,
+ "softirqs",
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ NETDATA_CHART_PRIO_SYSTEM_SOFTIRQS+1,
+ NULL, NULL, 0, update_every,
+ NETDATA_EBPF_MODULE_NAME_SOFTIRQ
+ );
+
+ fflush(stdout);
+}
+
+static void softirq_create_dims()
+{
+ uint32_t i;
+ for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
+ ebpf_write_global_dimension(
+ softirq_vals[i].name, softirq_vals[i].name,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]
+ );
+ }
+}
+
+static inline void softirq_write_dims()
+{
+ uint32_t i;
+ for (i = 0; i < NETDATA_SOFTIRQ_MAX_IRQS; i++) {
+ write_chart_dimension(softirq_vals[i].name, softirq_vals[i].latency);
+ }
+}
+
+/**
+* Main loop for this collector.
+*/
+static void softirq_collector(ebpf_module_t *em)
+{
+ softirq_ebpf_vals = callocz(ebpf_nprocs, sizeof(softirq_ebpf_val_t));
+
+ // create reader thread.
+ softirq_threads.thread = mallocz(sizeof(netdata_thread_t));
+ softirq_threads.start_routine = softirq_reader;
+ netdata_thread_create(
+ softirq_threads.thread,
+ softirq_threads.name,
+ NETDATA_THREAD_OPTION_JOINABLE,
+ softirq_reader,
+ em
+ );
+
+ // create chart and static dims.
+ pthread_mutex_lock(&lock);
+ softirq_create_charts(em->update_every);
+ softirq_create_dims();
+ pthread_mutex_unlock(&lock);
+
+ // loop and read from published data until ebpf plugin is closed.
+ int update_every = em->update_every;
+ int counter = update_every - 1;
+ while (!close_ebpf_plugin) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ if (++counter == update_every) {
+ counter = 0;
+ pthread_mutex_lock(&lock);
+
+ // write dims now for all hitherto discovered IRQs.
+ write_begin_chart(NETDATA_EBPF_SYSTEM_GROUP, "softirq_latency");
+ softirq_write_dims();
+ write_end_chart();
+
+ pthread_mutex_unlock(&lock);
+ }
+ pthread_mutex_unlock(&collect_data_mutex);
+ }
+}
+
+/*****************************************************************
+ * EBPF SOFTIRQ THREAD
+ *****************************************************************/
+
+/**
+ * Soft IRQ latency thread.
+ *
+ * @param ptr a `ebpf_module_t *`.
+ * @return always NULL.
+ */
+void *ebpf_softirq_thread(void *ptr)
+{
+ netdata_thread_cleanup_push(softirq_cleanup, ptr);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ em->maps = softirq_maps;
+
+ if (!em->enabled) {
+ goto endsoftirq;
+ }
+
+ if (ebpf_enable_tracepoints(softirq_tracepoints) == 0) {
+ em->enabled = CONFIG_BOOLEAN_NO;
+ goto endsoftirq;
+ }
+
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ if (!probe_links) {
+ goto endsoftirq;
+ }
+
+ softirq_collector(em);
+
+endsoftirq:
+ netdata_thread_cleanup_pop(1);
+
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_softirq.h b/collectors/ebpf.plugin/ebpf_softirq.h
new file mode 100644
index 000000000..a22751895
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_softirq.h
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_SOFTIRQ_H
+#define NETDATA_EBPF_SOFTIRQ_H 1
+
+/*****************************************************************
+ * copied from kernel-collectors repo, with modifications needed
+ * for inclusion here.
+ *****************************************************************/
+
+#define NETDATA_SOFTIRQ_MAX_IRQS 10
+
+typedef struct softirq_ebpf_val {
+ uint64_t latency;
+ uint64_t ts;
+} softirq_ebpf_val_t;
+
+/*****************************************************************
+ * below this is eBPF plugin-specific code.
+ *****************************************************************/
+
+#define NETDATA_EBPF_MODULE_NAME_SOFTIRQ "softirq"
+#define NETDATA_SOFTIRQ_SLEEP_MS 650000ULL
+#define NETDATA_SOFTIRQ_CONFIG_FILE "softirq.conf"
+
+typedef struct sofirq_val {
+ uint64_t latency;
+ char *name;
+} softirq_val_t;
+
+extern struct config softirq_config;
+extern void *ebpf_softirq_thread(void *ptr);
+
+#endif /* NETDATA_EBPF_SOFTIRQ_H */
diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c
new file mode 100644
index 000000000..34750c79d
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_swap.c
@@ -0,0 +1,698 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include "ebpf.h"
+#include "ebpf_swap.h"
+
+static char *swap_dimension_name[NETDATA_SWAP_END] = { "read", "write" };
+static netdata_syscall_stat_t swap_aggregated_data[NETDATA_SWAP_END];
+static netdata_publish_syscall_t swap_publish_aggregated[NETDATA_SWAP_END];
+
+static int read_thread_closed = 1;
+netdata_publish_swap_t *swap_vector = NULL;
+
+static netdata_idx_t swap_hash_values[NETDATA_SWAP_END];
+static netdata_idx_t *swap_values = NULL;
+
+netdata_publish_swap_t **swap_pid = NULL;
+
+struct config swap_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+static ebpf_local_maps_t swap_maps[] = {{.name = "tbl_pid_swap", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "swap_ctrl", .internal_input = NETDATA_CONTROLLER_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_swap", .internal_input = NETDATA_SWAP_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = NULL, .internal_input = 0, .user_input = 0}};
+
+static struct bpf_link **probe_links = NULL;
+static struct bpf_object *objects = NULL;
+
+struct netdata_static_thread swap_threads = {"SWAP KERNEL", NULL, NULL, 1,
+ NULL, NULL, NULL};
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CLOSE THE THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Clean swap structure
+ */
+void clean_swap_pid_structures() {
+ struct pid_stat *pids = root_of_pids;
+ while (pids) {
+ freez(swap_pid[pids->pid]);
+
+ pids = pids->next;
+ }
+}
+
+/**
+ * Clean up the main thread.
+ *
+ * @param ptr thread data.
+ */
+static void ebpf_swap_cleanup(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled)
+ return;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 2 * USEC_PER_MS;
+ while (!read_thread_closed) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ ebpf_cleanup_publish_syscall(swap_publish_aggregated);
+
+ freez(swap_vector);
+ freez(swap_values);
+
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+ }
+}
+
+/*****************************************************************
+ *
+ * COLLECTOR THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Apps Accumulator
+ *
+ * Sum all values read from kernel and store in the first address.
+ *
+ * @param out the vector with read values.
+ */
+static void swap_apps_accumulator(netdata_publish_swap_t *out)
+{
+ int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ netdata_publish_swap_t *total = &out[0];
+ for (i = 1; i < end; i++) {
+ netdata_publish_swap_t *w = &out[i];
+ total->write += w->write;
+ total->read += w->read;
+ }
+}
+
+/**
+ * Fill PID
+ *
+ * Fill PID structures
+ *
+ * @param current_pid pid that we are collecting data
+ * @param out values read from hash tables;
+ */
+static void swap_fill_pid(uint32_t current_pid, netdata_publish_swap_t *publish)
+{
+ netdata_publish_swap_t *curr = swap_pid[current_pid];
+ if (!curr) {
+ curr = callocz(1, sizeof(netdata_publish_swap_t));
+ swap_pid[current_pid] = curr;
+ }
+
+ memcpy(curr, publish, sizeof(netdata_publish_swap_t));
+}
+
+/**
+ * Update cgroup
+ *
+ * Update cgroup data based in
+ */
+static void ebpf_update_swap_cgroup()
+{
+ ebpf_cgroup_target_t *ect ;
+ netdata_publish_swap_t *cv = swap_vector;
+ int fd = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd;
+ size_t length = sizeof(netdata_publish_swap_t)*ebpf_nprocs;
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ struct pid_on_target2 *pids;
+ for (pids = ect->pids; pids; pids = pids->next) {
+ int pid = pids->pid;
+ netdata_publish_swap_t *out = &pids->swap;
+ if (likely(swap_pid) && swap_pid[pid]) {
+ netdata_publish_swap_t *in = swap_pid[pid];
+
+ memcpy(out, in, sizeof(netdata_publish_swap_t));
+ } else {
+ memset(cv, 0, length);
+ if (!bpf_map_lookup_elem(fd, &pid, cv)) {
+ swap_apps_accumulator(cv);
+
+ memcpy(out, cv, sizeof(netdata_publish_swap_t));
+ }
+ }
+ }
+ }
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
+/**
+ * Read APPS table
+ *
+ * Read the apps table and store data inside the structure.
+ */
+static void read_apps_table()
+{
+ netdata_publish_swap_t *cv = swap_vector;
+ uint32_t key;
+ struct pid_stat *pids = root_of_pids;
+ int fd = swap_maps[NETDATA_PID_SWAP_TABLE].map_fd;
+ size_t length = sizeof(netdata_publish_swap_t)*ebpf_nprocs;
+ while (pids) {
+ key = pids->pid;
+
+ if (bpf_map_lookup_elem(fd, &key, cv)) {
+ pids = pids->next;
+ continue;
+ }
+
+ swap_apps_accumulator(cv);
+
+ swap_fill_pid(key, cv);
+
+ // We are cleaning to avoid passing data read from one process to other.
+ memset(cv, 0, length);
+
+ pids = pids->next;
+ }
+}
+
+/**
+* Send global
+*
+* Send global charts to Netdata
+*/
+static void swap_send_global()
+{
+ write_io_chart(NETDATA_MEM_SWAP_CHART, NETDATA_EBPF_SYSTEM_GROUP,
+ swap_publish_aggregated[NETDATA_KEY_SWAP_WRITEPAGE_CALL].dimension,
+ (long long) swap_hash_values[NETDATA_KEY_SWAP_WRITEPAGE_CALL],
+ swap_publish_aggregated[NETDATA_KEY_SWAP_READPAGE_CALL].dimension,
+ (long long) swap_hash_values[NETDATA_KEY_SWAP_READPAGE_CALL]);
+}
+
+/**
+ * Read global counter
+ *
+ * Read the table with number of calls to all functions
+ */
+static void read_global_table()
+{
+ netdata_idx_t *stored = swap_values;
+ netdata_idx_t *val = swap_hash_values;
+ int fd = swap_maps[NETDATA_SWAP_GLOBAL_TABLE].map_fd;
+
+ uint32_t i, end = NETDATA_SWAP_END;
+ for (i = NETDATA_KEY_SWAP_READPAGE_CALL; i < end; i++) {
+ if (!bpf_map_lookup_elem(fd, &i, stored)) {
+ int j;
+ int last = ebpf_nprocs;
+ netdata_idx_t total = 0;
+ for (j = 0; j < last; j++)
+ total += stored[j];
+
+ val[i] = total;
+ }
+ }
+}
+
+/**
+ * Swap read hash
+ *
+ * This is the thread callback.
+ *
+ * @param ptr It is a NULL value for this thread.
+ *
+ * @return It always returns NULL.
+ */
+void *ebpf_swap_read_hash(void *ptr)
+{
+ read_thread_closed = 0;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ usec_t step = NETDATA_SWAP_SLEEP_MS * em->update_every;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ (void)dt;
+
+ read_global_table();
+ }
+
+ read_thread_closed = 1;
+ return NULL;
+}
+
+/**
+ * Sum PIDs
+ *
+ * Sum values for all targets.
+ *
+ * @param swap
+ * @param root
+ */
+static void ebpf_swap_sum_pids(netdata_publish_swap_t *swap, struct pid_on_target *root)
+{
+ uint64_t local_read = 0;
+ uint64_t local_write = 0;
+
+ while (root) {
+ int32_t pid = root->pid;
+ netdata_publish_swap_t *w = swap_pid[pid];
+ if (w) {
+ local_write += w->write;
+ local_read += w->read;
+ }
+ root = root->next;
+ }
+
+ // These conditions were added, because we are using incremental algorithm
+ swap->write = (local_write >= swap->write) ? local_write : swap->write;
+ swap->read = (local_read >= swap->read) ? local_read : swap->read;
+}
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+ *
+ * @param root the target list.
+*/
+void ebpf_swap_send_apps_data(struct target *root)
+{
+ struct target *w;
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ ebpf_swap_sum_pids(&w->swap, w->root_pid);
+ }
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_MEM_SWAP_READ_CHART);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, (long long) w->swap.read);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_MEM_SWAP_WRITE_CHART);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, (long long) w->swap.write);
+ }
+ }
+ write_end_chart();
+}
+
+/**
+ * Sum PIDs
+ *
+ * Sum values for all targets.
+ *
+ * @param swap
+ * @param root
+ */
+static void ebpf_swap_sum_cgroup_pids(netdata_publish_swap_t *swap, struct pid_on_target2 *pids)
+{
+ uint64_t local_read = 0;
+ uint64_t local_write = 0;
+
+ while (pids) {
+ netdata_publish_swap_t *w = &pids->swap;
+ local_write += w->write;
+ local_read += w->read;
+
+ pids = pids->next;
+ }
+
+ // These conditions were added, because we are using incremental algorithm
+ swap->write = (local_write >= swap->write) ? local_write : swap->write;
+ swap->read = (local_read >= swap->read) ? local_read : swap->read;
+}
+
+/**
+ * Send Systemd charts
+ *
+ * Send collected data to Netdata.
+ *
+ * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned
+ * otherwise function returns 1 to avoid chart recreation
+ */
+static int ebpf_send_systemd_swap_charts()
+{
+ int ret = 1;
+ ebpf_cgroup_target_t *ect;
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_MEM_SWAP_READ_CHART);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long) ect->publish_systemd_swap.read);
+ } else
+ ret = 0;
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_MEM_SWAP_WRITE_CHART);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, (long long) ect->publish_systemd_swap.write);
+ }
+ }
+ write_end_chart();
+
+ return ret;
+}
+
+/**
+ * Create specific swap charts
+ *
+ * Create charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_create_specific_swap_charts(char *type, int update_every)
+{
+ ebpf_create_chart(type, NETDATA_MEM_SWAP_READ_CHART,
+ "Calls to function <code>swap_readpage</code>.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
+ NETDATA_CGROUP_SWAP_READ_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5100,
+ ebpf_create_global_dimension,
+ swap_publish_aggregated, 1, update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ ebpf_create_chart(type, NETDATA_MEM_SWAP_WRITE_CHART,
+ "Calls to function <code>swap_writepage</code>.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
+ NETDATA_CGROUP_SWAP_WRITE_CONTEXT, NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5101,
+ ebpf_create_global_dimension,
+ &swap_publish_aggregated[NETDATA_KEY_SWAP_WRITEPAGE_CALL], 1,
+ update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+}
+
+/**
+ * Create specific swap charts
+ *
+ * Create charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_obsolete_specific_swap_charts(char *type, int update_every)
+{
+ ebpf_write_chart_obsolete(type, NETDATA_MEM_SWAP_READ_CHART,"Calls to function <code>swap_readpage</code>.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SWAP_READ_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5100, update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_MEM_SWAP_WRITE_CHART, "Calls to function <code>swap_writepage</code>.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_SWAP_WRITE_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5101, update_every);
+}
+
+/*
+ * Send Specific Swap data
+ *
+ * Send data for specific cgroup/apps.
+ *
+ * @param type chart type
+ * @param values structure with values that will be sent to netdata
+ */
+static void ebpf_send_specific_swap_data(char *type, netdata_publish_swap_t *values)
+{
+ write_begin_chart(type, NETDATA_MEM_SWAP_READ_CHART);
+ write_chart_dimension(swap_publish_aggregated[NETDATA_KEY_SWAP_READPAGE_CALL].name, (long long) values->read);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_MEM_SWAP_WRITE_CHART);
+ write_chart_dimension(swap_publish_aggregated[NETDATA_KEY_SWAP_WRITEPAGE_CALL].name, (long long) values->write);
+ write_end_chart();
+}
+
+/**
+ * Create Systemd Swap Charts
+ *
+ * Create charts when systemd is enabled
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+ **/
+static void ebpf_create_systemd_swap_charts(int update_every)
+{
+ ebpf_create_charts_on_systemd(NETDATA_MEM_SWAP_READ_CHART,
+ "Calls to <code>swap_readpage</code>.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20191,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_SWAP_READ_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_SWAP, update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_MEM_SWAP_WRITE_CHART,
+ "Calls to function <code>swap_writepage</code>.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_CGROUP_SWAP_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20192,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_SWAP_WRITE_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_SWAP, update_every);
+}
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+*/
+void ebpf_swap_send_cgroup_data(int update_every)
+{
+ if (!ebpf_cgroup_pids)
+ return;
+
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ ebpf_cgroup_target_t *ect;
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ ebpf_swap_sum_cgroup_pids(&ect->publish_systemd_swap, ect->pids);
+ }
+
+ int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
+
+ if (has_systemd) {
+ static int systemd_charts = 0;
+ if (!systemd_charts) {
+ ebpf_create_systemd_swap_charts(update_every);
+ systemd_charts = 1;
+ fflush(stdout);
+ }
+
+ systemd_charts = ebpf_send_systemd_swap_charts();
+ }
+
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (ect->systemd)
+ continue;
+
+ if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_SWAP_CHART) && ect->updated) {
+ ebpf_create_specific_swap_charts(ect->name, update_every);
+ ect->flags |= NETDATA_EBPF_CGROUP_HAS_SWAP_CHART;
+ }
+
+ if (ect->flags & NETDATA_EBPF_CGROUP_HAS_SWAP_CHART) {
+ if (ect->updated) {
+ ebpf_send_specific_swap_data(ect->name, &ect->publish_systemd_swap);
+ } else {
+ ebpf_obsolete_specific_swap_charts(ect->name, update_every);
+ ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_SWAP_CHART;
+ }
+ }
+ }
+
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
+/**
+* Main loop for this collector.
+*/
+static void swap_collector(ebpf_module_t *em)
+{
+ swap_threads.thread = mallocz(sizeof(netdata_thread_t));
+ swap_threads.start_routine = ebpf_swap_read_hash;
+
+ netdata_thread_create(swap_threads.thread, swap_threads.name, NETDATA_THREAD_OPTION_JOINABLE,
+ ebpf_swap_read_hash, em);
+
+ int apps = em->apps_charts;
+ int cgroup = em->cgroup_charts;
+ int update_every = em->update_every;
+ int counter = update_every - 1;
+ while (!close_ebpf_plugin) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ if (++counter == update_every) {
+ counter = 0;
+ if (apps)
+ read_apps_table();
+
+ if (cgroup)
+ ebpf_update_swap_cgroup();
+
+ pthread_mutex_lock(&lock);
+
+ swap_send_global();
+
+ if (apps)
+ ebpf_swap_send_apps_data(apps_groups_root_target);
+
+ if (cgroup)
+ ebpf_swap_send_cgroup_data(update_every);
+
+ pthread_mutex_unlock(&lock);
+ }
+ pthread_mutex_unlock(&collect_data_mutex);
+ }
+}
+
+/*****************************************************************
+ *
+ * INITIALIZE THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Create apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr)
+{
+ struct target *root = ptr;
+ ebpf_create_charts_on_apps(NETDATA_MEM_SWAP_READ_CHART,
+ "Calls to function <code>swap_readpage</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_SWAP_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20191,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ ebpf_create_charts_on_apps(NETDATA_MEM_SWAP_WRITE_CHART,
+ "Calls to function <code>swap_writepage</code>.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_SWAP_SUBMENU,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20192,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+}
+
+/**
+ * Allocate vectors used with this thread.
+ *
+ * We are not testing the return, because callocz does this and shutdown the software
+ * case it was not possible to allocate.
+ *
+ * @param apps is apps enabled?
+ */
+static void ebpf_swap_allocate_global_vectors(int apps)
+{
+ if (apps)
+ swap_pid = callocz((size_t)pid_max, sizeof(netdata_publish_swap_t *));
+
+ swap_vector = callocz((size_t)ebpf_nprocs, sizeof(netdata_publish_swap_t));
+
+ swap_values = callocz((size_t)ebpf_nprocs, sizeof(netdata_idx_t));
+
+ memset(swap_hash_values, 0, sizeof(swap_hash_values));
+}
+
+/*****************************************************************
+ *
+ * MAIN THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Create global charts
+ *
+ * Call ebpf_create_chart to create the charts for the collector.
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_create_swap_charts(int update_every)
+{
+ ebpf_create_chart(NETDATA_EBPF_SYSTEM_GROUP, NETDATA_MEM_SWAP_CHART,
+ "Calls to internal functions used to access swap.",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_SWAP_SUBMENU,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ 202,
+ ebpf_create_global_dimension,
+ swap_publish_aggregated, NETDATA_SWAP_END,
+ update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+}
+
+/**
+ * SWAP thread
+ *
+ * Thread used to make swap thread
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_swap_thread(void *ptr)
+{
+ netdata_thread_cleanup_push(ebpf_swap_cleanup, ptr);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ em->maps = swap_maps;
+
+ ebpf_update_pid_table(&swap_maps[NETDATA_PID_SWAP_TABLE], em);
+
+ if (!em->enabled)
+ goto endswap;
+
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ if (!probe_links) {
+ goto endswap;
+ }
+
+ ebpf_swap_allocate_global_vectors(em->apps_charts);
+
+ int algorithms[NETDATA_SWAP_END] = { NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX };
+ ebpf_global_labels(swap_aggregated_data, swap_publish_aggregated, swap_dimension_name, swap_dimension_name,
+ algorithms, NETDATA_SWAP_END);
+
+ pthread_mutex_lock(&lock);
+ ebpf_create_swap_charts(em->update_every);
+ pthread_mutex_unlock(&lock);
+
+ swap_collector(em);
+
+endswap:
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_swap.h b/collectors/ebpf.plugin/ebpf_swap.h
new file mode 100644
index 000000000..1dba9c17a
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_swap.h
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_SWAP_H
+#define NETDATA_EBPF_SWAP_H 1
+
+// Module name
+#define NETDATA_EBPF_MODULE_NAME_SWAP "swap"
+
+#define NETDATA_SWAP_SLEEP_MS 850000ULL
+
+// charts
+#define NETDATA_MEM_SWAP_CHART "swapcalls"
+#define NETDATA_MEM_SWAP_READ_CHART "swap_read_call"
+#define NETDATA_MEM_SWAP_WRITE_CHART "swap_write_call"
+#define NETDATA_SWAP_SUBMENU "swap"
+
+// configuration file
+#define NETDATA_DIRECTORY_SWAP_CONFIG_FILE "swap.conf"
+
+// Contexts
+#define NETDATA_CGROUP_SWAP_READ_CONTEXT "cgroup.swap_read"
+#define NETDATA_CGROUP_SWAP_WRITE_CONTEXT "cgroup.swap_write"
+#define NETDATA_SYSTEMD_SWAP_READ_CONTEXT "services.swap_read"
+#define NETDATA_SYSTEMD_SWAP_WRITE_CONTEXT "services.swap_write"
+
+typedef struct netdata_publish_swap {
+ uint64_t read;
+ uint64_t write;
+} netdata_publish_swap_t;
+
+enum swap_tables {
+ NETDATA_PID_SWAP_TABLE,
+ NETDATA_SWAP_CONTROLLER,
+ NETDATA_SWAP_GLOBAL_TABLE
+};
+
+enum swap_counters {
+ NETDATA_KEY_SWAP_READPAGE_CALL,
+ NETDATA_KEY_SWAP_WRITEPAGE_CALL,
+
+ // Keep this as last and don't skip numbers as it is used as element counter
+ NETDATA_SWAP_END
+};
+
+extern netdata_publish_swap_t **swap_pid;
+
+extern void *ebpf_swap_thread(void *ptr);
+extern void ebpf_swap_create_apps_charts(struct ebpf_module *em, void *ptr);
+extern void clean_swap_pid_structures();
+
+extern struct config swap_config;
+
+#endif
diff --git a/collectors/ebpf.plugin/ebpf_sync.c b/collectors/ebpf.plugin/ebpf_sync.c
index f0db1cc4a..4bd62bcae 100644
--- a/collectors/ebpf.plugin/ebpf_sync.c
+++ b/collectors/ebpf.plugin/ebpf_sync.c
@@ -3,8 +3,6 @@
#include "ebpf.h"
#include "ebpf_sync.h"
-static ebpf_data_t sync_data;
-
static char *sync_counter_dimension_name[NETDATA_SYNC_IDX_END] = { "sync", "syncfs", "msync", "fsync", "fdatasync",
"sync_file_range" };
static netdata_syscall_stat_t sync_counter_aggregated_data[NETDATA_SYNC_IDX_END];
@@ -17,6 +15,28 @@ static netdata_idx_t sync_hash_values[NETDATA_SYNC_IDX_END];
struct netdata_static_thread sync_threads = {"SYNC KERNEL", NULL, NULL, 1,
NULL, NULL, NULL};
+static ebpf_local_maps_t sync_maps[] = {{.name = "tbl_sync", .internal_input = NETDATA_SYNC_END,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_syncfs", .internal_input = NETDATA_SYNC_END,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_msync", .internal_input = NETDATA_SYNC_END,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_fsync", .internal_input = NETDATA_SYNC_END,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_fdatasync", .internal_input = NETDATA_SYNC_END,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_syncfr", .internal_input = NETDATA_SYNC_END,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = NULL, .internal_input = 0, .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED}};
+
struct config sync_config = { .first_section = NULL,
.last_section = NULL,
.mutex = NETDATA_MUTEX_INITIALIZER,
@@ -53,15 +73,8 @@ static int ebpf_sync_initialize_syscall(ebpf_module_t *em)
for (i = 0; local_syscalls[i].syscall; i++) {
ebpf_sync_syscalls_t *w = &local_syscalls[i];
if (!w->probe_links && w->enabled) {
- fill_ebpf_data(&w->kernel_info);
- if (ebpf_update_kernel(&w->kernel_info)) {
- em->thread_name = saved_name;
- error("Cannot update the kernel for eBPF module %s", w->syscall);
- return -1;
- }
-
em->thread_name = w->syscall;
- w->probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &w->objects, w->kernel_info.map_fd);
+ w->probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &w->objects);
if (!w->probe_links) {
em->thread_name = saved_name;
return -1;
@@ -95,7 +108,7 @@ static void read_global_table()
int i;
for (i = 0; local_syscalls[i].syscall; i++) {
if (local_syscalls[i].enabled) {
- int fd = local_syscalls[i].kernel_info.map_fd[NETDATA_SYNC_GLOBLAL_TABLE];
+ int fd = sync_maps[i].map_fd;
if (!bpf_map_lookup_elem(fd, &idx, &stored)) {
sync_hash_values[i] = stored;
}
@@ -119,7 +132,7 @@ void *ebpf_sync_read_hash(void *ptr)
heartbeat_t hb;
heartbeat_init(&hb);
- usec_t step = NETDATA_EBPF_SYNC_SLEEP_MS * em->update_time;
+ usec_t step = NETDATA_EBPF_SYNC_SLEEP_MS * em->update_every;
while (!close_ebpf_plugin) {
usec_t dt = heartbeat_next(&hb, step);
@@ -197,15 +210,20 @@ static void sync_collector(ebpf_module_t *em)
netdata_thread_create(sync_threads.thread, sync_threads.name, NETDATA_THREAD_OPTION_JOINABLE,
ebpf_sync_read_hash, em);
+ int update_every = em->update_every;
+ int counter = update_every - 1;
while (!close_ebpf_plugin) {
pthread_mutex_lock(&collect_data_mutex);
pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
- pthread_mutex_lock(&lock);
+ if (++counter == update_every) {
+ counter = 0;
+ pthread_mutex_lock(&lock);
- sync_send_data();
+ sync_send_data();
- pthread_mutex_unlock(&lock);
+ pthread_mutex_unlock(&lock);
+ }
pthread_mutex_unlock(&collect_data_mutex);
}
}
@@ -228,8 +246,6 @@ void ebpf_sync_cleanup_objects()
for (i = 0; local_syscalls[i].syscall; i++) {
ebpf_sync_syscalls_t *w = &local_syscalls[i];
if (w->probe_links) {
- freez(w->kernel_info.map_fd);
-
struct bpf_program *prog;
size_t j = 0 ;
bpf_object__for_each_program(prog, w->objects) {
@@ -280,15 +296,19 @@ static void ebpf_sync_cleanup(void *ptr)
* @param order order number of the specified chart
* @param idx the first index with data.
* @param end the last index with data.
+ * @param update_every value to overwrite the update frequency set by the server.
*/
static void ebpf_create_sync_chart(char *id,
char *title,
int order,
int idx,
- int end)
+ int end,
+ int update_every)
{
ebpf_write_chart_cmd(NETDATA_EBPF_MEMORY_GROUP, id, title, EBPF_COMMON_DIMENSION_CALL,
- NETDATA_EBPF_SYNC_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NULL, order);
+ NETDATA_EBPF_SYNC_SUBMENU, NETDATA_EBPF_CHART_TYPE_LINE, NULL, order,
+ update_every,
+ NETDATA_EBPF_MODULE_NAME_SYNC);
netdata_publish_syscall_t *move = &sync_counter_publish_aggregated[idx];
@@ -305,28 +325,30 @@ static void ebpf_create_sync_chart(char *id,
* Create global charts
*
* Call ebpf_create_chart to create the charts for the collector.
+ *
+ * @param update_every value to overwrite the update frequency set by the server.
*/
-static void ebpf_create_sync_charts()
+static void ebpf_create_sync_charts(int update_every)
{
if (local_syscalls[NETDATA_SYNC_FSYNC_IDX].enabled || local_syscalls[NETDATA_SYNC_FDATASYNC_IDX].enabled)
ebpf_create_sync_chart(NETDATA_EBPF_FILE_SYNC_CHART,
"Monitor calls for <code>fsync(2)</code> and <code>fdatasync(2)</code>.", 21300,
- NETDATA_SYNC_FSYNC_IDX, NETDATA_SYNC_FDATASYNC_IDX);
+ NETDATA_SYNC_FSYNC_IDX, NETDATA_SYNC_FDATASYNC_IDX, update_every);
if (local_syscalls[NETDATA_SYNC_MSYNC_IDX].enabled)
ebpf_create_sync_chart(NETDATA_EBPF_MSYNC_CHART,
"Monitor calls for <code>msync(2)</code>.", 21301,
- NETDATA_SYNC_MSYNC_IDX, NETDATA_SYNC_MSYNC_IDX);
+ NETDATA_SYNC_MSYNC_IDX, NETDATA_SYNC_MSYNC_IDX, update_every);
if (local_syscalls[NETDATA_SYNC_SYNC_IDX].enabled || local_syscalls[NETDATA_SYNC_SYNCFS_IDX].enabled)
ebpf_create_sync_chart(NETDATA_EBPF_SYNC_CHART,
"Monitor calls for <code>sync(2)</code> and <code>syncfs(2)</code>.", 21302,
- NETDATA_SYNC_SYNC_IDX, NETDATA_SYNC_SYNCFS_IDX);
+ NETDATA_SYNC_SYNC_IDX, NETDATA_SYNC_SYNCFS_IDX, update_every);
if (local_syscalls[NETDATA_SYNC_SYNC_FILE_RANGE_IDX].enabled)
ebpf_create_sync_chart(NETDATA_EBPF_FILE_SEGMENT_CHART,
"Monitor calls for <code>sync_file_range(2)</code>.", 21303,
- NETDATA_SYNC_SYNC_FILE_RANGE_IDX, NETDATA_SYNC_SYNC_FILE_RANGE_IDX);
+ NETDATA_SYNC_SYNC_FILE_RANGE_IDX, NETDATA_SYNC_SYNC_FILE_RANGE_IDX, update_every);
}
/**
@@ -357,9 +379,8 @@ void *ebpf_sync_thread(void *ptr)
netdata_thread_cleanup_push(ebpf_sync_cleanup, ptr);
ebpf_module_t *em = (ebpf_module_t *)ptr;
- fill_ebpf_data(&sync_data);
+ em->maps = sync_maps;
- ebpf_update_module(em, &sync_config, NETDATA_SYNC_CONFIG_FILE);
ebpf_sync_parse_syscalls();
if (!em->enabled)
@@ -378,7 +399,7 @@ void *ebpf_sync_thread(void *ptr)
algorithms, NETDATA_SYNC_IDX_END);
pthread_mutex_lock(&lock);
- ebpf_create_sync_charts();
+ ebpf_create_sync_charts(em->update_every);
pthread_mutex_unlock(&lock);
sync_collector(em);
diff --git a/collectors/ebpf.plugin/ebpf_sync.h b/collectors/ebpf.plugin/ebpf_sync.h
index 458318218..1f811d341 100644
--- a/collectors/ebpf.plugin/ebpf_sync.h
+++ b/collectors/ebpf.plugin/ebpf_sync.h
@@ -3,6 +3,9 @@
#ifndef NETDATA_EBPF_SYNC_H
#define NETDATA_EBPF_SYNC_H 1
+// Module name
+#define NETDATA_EBPF_MODULE_NAME_SYNC "sync"
+
// charts
#define NETDATA_EBPF_SYNC_CHART "sync"
#define NETDATA_EBPF_MSYNC_CHART "memory_map"
@@ -34,8 +37,6 @@ typedef struct ebpf_sync_syscalls {
struct bpf_object *objects;
struct bpf_link **probe_links;
-
- ebpf_data_t kernel_info;
} ebpf_sync_syscalls_t;
enum netdata_sync_charts {
@@ -50,5 +51,6 @@ enum netdata_sync_table {
};
extern void *ebpf_sync_thread(void *ptr);
+extern struct config sync_config;
#endif /* NETDATA_EBPF_SYNC_H */
diff --git a/collectors/ebpf.plugin/ebpf_vfs.c b/collectors/ebpf.plugin/ebpf_vfs.c
new file mode 100644
index 000000000..060469ec5
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_vfs.c
@@ -0,0 +1,1601 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#include <sys/resource.h>
+
+#include "ebpf.h"
+#include "ebpf_vfs.h"
+
+static char *vfs_dimension_names[NETDATA_KEY_PUBLISH_VFS_END] = { "delete", "read", "write",
+ "fsync", "open", "create" };
+static char *vfs_id_names[NETDATA_KEY_PUBLISH_VFS_END] = { "vfs_unlink", "vfs_read", "vfs_write",
+ "vfs_fsync", "vfs_open", "vfs_create"};
+
+static netdata_idx_t *vfs_hash_values = NULL;
+static netdata_syscall_stat_t vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_END];
+static netdata_publish_syscall_t vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_END];
+netdata_publish_vfs_t **vfs_pid = NULL;
+netdata_publish_vfs_t *vfs_vector = NULL;
+
+static ebpf_local_maps_t vfs_maps[] = {{.name = "tbl_vfs_pid", .internal_input = ND_EBPF_DEFAULT_PID_SIZE,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_RESIZABLE | NETDATA_EBPF_MAP_PID,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "tbl_vfs_stats", .internal_input = NETDATA_VFS_COUNTER,
+ .user_input = 0, .type = NETDATA_EBPF_MAP_STATIC,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = "vfs_ctrl", .internal_input = NETDATA_CONTROLLER_END,
+ .user_input = 0,
+ .type = NETDATA_EBPF_MAP_CONTROLLER,
+ .map_fd = ND_EBPF_MAP_FD_NOT_INITIALIZED},
+ {.name = NULL, .internal_input = 0, .user_input = 0}};
+
+struct config vfs_config = { .first_section = NULL,
+ .last_section = NULL,
+ .mutex = NETDATA_MUTEX_INITIALIZER,
+ .index = { .avl_tree = { .root = NULL, .compar = appconfig_section_compare },
+ .rwlock = AVL_LOCK_INITIALIZER } };
+
+static struct bpf_object *objects = NULL;
+static struct bpf_link **probe_links = NULL;
+
+struct netdata_static_thread vfs_threads = {"VFS KERNEL",
+ NULL, NULL, 1, NULL,
+ NULL, NULL};
+
+static int read_thread_closed = 1;
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CLOSE THE THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Clean PID structures
+ *
+ * Clean the allocated structures.
+ */
+void clean_vfs_pid_structures() {
+ struct pid_stat *pids = root_of_pids;
+ while (pids) {
+ freez(vfs_pid[pids->pid]);
+
+ pids = pids->next;
+ }
+}
+
+/**
+* Clean up the main thread.
+*
+* @param ptr thread data.
+**/
+static void ebpf_vfs_cleanup(void *ptr)
+{
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ if (!em->enabled)
+ return;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+ uint32_t tick = 50 * USEC_PER_MS;
+ while (!read_thread_closed) {
+ usec_t dt = heartbeat_next(&hb, tick);
+ UNUSED(dt);
+ }
+
+ freez(vfs_hash_values);
+ freez(vfs_vector);
+
+ if (probe_links) {
+ struct bpf_program *prog;
+ size_t i = 0 ;
+ bpf_object__for_each_program(prog, objects) {
+ bpf_link__destroy(probe_links[i]);
+ i++;
+ }
+ bpf_object__close(objects);
+ }
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS WITH THE MAIN LOOP
+ *
+ *****************************************************************/
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+ *
+ * @param em the structure with thread information
+*/
+static void ebpf_vfs_send_data(ebpf_module_t *em)
+{
+ netdata_publish_vfs_common_t pvc;
+
+ pvc.write = (long)vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_WRITE].bytes;
+ pvc.read = (long)vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_READ].bytes;
+
+ write_count_chart(NETDATA_VFS_FILE_CLEAN_COUNT, NETDATA_FILESYSTEM_FAMILY,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK], 1);
+
+ write_count_chart(NETDATA_VFS_FILE_IO_COUNT, NETDATA_FILESYSTEM_FAMILY,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ], 2);
+
+ if (em->mode < MODE_ENTRY) {
+ write_err_chart(NETDATA_VFS_FILE_ERR_COUNT, NETDATA_FILESYSTEM_FAMILY,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ], 2);
+ }
+
+ write_io_chart(NETDATA_VFS_IO_FILE_BYTES, NETDATA_FILESYSTEM_FAMILY, vfs_id_names[NETDATA_KEY_PUBLISH_VFS_WRITE],
+ (long long)pvc.write, vfs_id_names[NETDATA_KEY_PUBLISH_VFS_READ], (long long)pvc.read);
+
+ write_count_chart(NETDATA_VFS_FSYNC, NETDATA_FILESYSTEM_FAMILY,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC], 1);
+
+ if (em->mode < MODE_ENTRY) {
+ write_err_chart(NETDATA_VFS_FSYNC_ERR, NETDATA_FILESYSTEM_FAMILY,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC], 1);
+ }
+
+ write_count_chart(NETDATA_VFS_OPEN, NETDATA_FILESYSTEM_FAMILY,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN], 1);
+
+ if (em->mode < MODE_ENTRY) {
+ write_err_chart(NETDATA_VFS_OPEN_ERR, NETDATA_FILESYSTEM_FAMILY,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN], 1);
+ }
+
+ write_count_chart(NETDATA_VFS_CREATE, NETDATA_FILESYSTEM_FAMILY,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE], 1);
+
+ if (em->mode < MODE_ENTRY) {
+ write_err_chart(
+ NETDATA_VFS_CREATE_ERR,
+ NETDATA_FILESYSTEM_FAMILY,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
+ 1);
+ }
+}
+
+/**
+ * Read the hash table and store data to allocated vectors.
+ */
+static void read_global_table()
+{
+ uint64_t idx;
+ netdata_idx_t res[NETDATA_VFS_COUNTER];
+
+ netdata_idx_t *val = vfs_hash_values;
+ int fd = vfs_maps[NETDATA_VFS_ALL].map_fd;
+ for (idx = 0; idx < NETDATA_VFS_COUNTER; idx++) {
+ uint64_t total = 0;
+ if (!bpf_map_lookup_elem(fd, &idx, val)) {
+ int i;
+ int end = ebpf_nprocs;
+ for (i = 0; i < end; i++)
+ total += val[i];
+ }
+ res[idx] = total;
+ }
+
+ vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK].ncall = res[NETDATA_KEY_CALLS_VFS_UNLINK];
+ vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].ncall = res[NETDATA_KEY_CALLS_VFS_READ] +
+ res[NETDATA_KEY_CALLS_VFS_READV];
+ vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].ncall = res[NETDATA_KEY_CALLS_VFS_WRITE] +
+ res[NETDATA_KEY_CALLS_VFS_WRITEV];
+ vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].ncall = res[NETDATA_KEY_CALLS_VFS_FSYNC];
+ vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].ncall = res[NETDATA_KEY_CALLS_VFS_OPEN];
+ vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].ncall = res[NETDATA_KEY_CALLS_VFS_CREATE];
+
+ vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK].nerr = res[NETDATA_KEY_ERROR_VFS_UNLINK];
+ vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].nerr = res[NETDATA_KEY_ERROR_VFS_READ] +
+ res[NETDATA_KEY_ERROR_VFS_READV];
+ vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].nerr = res[NETDATA_KEY_ERROR_VFS_WRITE] +
+ res[NETDATA_KEY_ERROR_VFS_WRITEV];
+ vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].nerr = res[NETDATA_KEY_ERROR_VFS_FSYNC];
+ vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].nerr = res[NETDATA_KEY_ERROR_VFS_OPEN];
+ vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].nerr = res[NETDATA_KEY_ERROR_VFS_CREATE];
+
+ vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_WRITE].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITE] +
+ (uint64_t)res[NETDATA_KEY_BYTES_VFS_WRITEV];
+ vfs_aggregated_data[NETDATA_KEY_PUBLISH_VFS_READ].bytes = (uint64_t)res[NETDATA_KEY_BYTES_VFS_READ] +
+ (uint64_t)res[NETDATA_KEY_BYTES_VFS_READV];
+}
+
+/**
+ * Sum PIDs
+ *
+ * Sum values for all targets.
+ *
+ * @param swap output structure
+ * @param root link list with structure to be used
+ */
+static void ebpf_vfs_sum_pids(netdata_publish_vfs_t *vfs, struct pid_on_target *root)
+{
+ netdata_publish_vfs_t accumulator;
+ memset(&accumulator, 0, sizeof(accumulator));
+
+ while (root) {
+ int32_t pid = root->pid;
+ netdata_publish_vfs_t *w = vfs_pid[pid];
+ if (w) {
+ accumulator.write_call += w->write_call;
+ accumulator.writev_call += w->writev_call;
+ accumulator.read_call += w->read_call;
+ accumulator.readv_call += w->readv_call;
+ accumulator.unlink_call += w->unlink_call;
+ accumulator.fsync_call += w->fsync_call;
+ accumulator.open_call += w->open_call;
+ accumulator.create_call += w->create_call;
+
+ accumulator.write_bytes += w->write_bytes;
+ accumulator.writev_bytes += w->writev_bytes;
+ accumulator.read_bytes += w->read_bytes;
+ accumulator.readv_bytes += w->readv_bytes;
+
+ accumulator.write_err += w->write_err;
+ accumulator.writev_err += w->writev_err;
+ accumulator.read_err += w->read_err;
+ accumulator.readv_err += w->readv_err;
+ accumulator.unlink_err += w->unlink_err;
+ accumulator.fsync_err += w->fsync_err;
+ accumulator.open_err += w->open_err;
+ accumulator.create_err += w->create_err;
+ }
+ root = root->next;
+ }
+
+ // These conditions were added, because we are using incremental algorithm
+ vfs->write_call = (accumulator.write_call >= vfs->write_call) ? accumulator.write_call : vfs->write_call;
+ vfs->writev_call = (accumulator.writev_call >= vfs->writev_call) ? accumulator.writev_call : vfs->writev_call;
+ vfs->read_call = (accumulator.read_call >= vfs->read_call) ? accumulator.read_call : vfs->read_call;
+ vfs->readv_call = (accumulator.readv_call >= vfs->readv_call) ? accumulator.readv_call : vfs->readv_call;
+ vfs->unlink_call = (accumulator.unlink_call >= vfs->unlink_call) ? accumulator.unlink_call : vfs->unlink_call;
+ vfs->fsync_call = (accumulator.fsync_call >= vfs->fsync_call) ? accumulator.fsync_call : vfs->fsync_call;
+ vfs->open_call = (accumulator.open_call >= vfs->open_call) ? accumulator.open_call : vfs->open_call;
+ vfs->create_call = (accumulator.create_call >= vfs->create_call) ? accumulator.create_call : vfs->create_call;
+
+ vfs->write_bytes = (accumulator.write_bytes >= vfs->write_bytes) ? accumulator.write_bytes : vfs->write_bytes;
+ vfs->writev_bytes = (accumulator.writev_bytes >= vfs->writev_bytes) ? accumulator.writev_bytes : vfs->writev_bytes;
+ vfs->read_bytes = (accumulator.read_bytes >= vfs->read_bytes) ? accumulator.read_bytes : vfs->read_bytes;
+ vfs->readv_bytes = (accumulator.readv_bytes >= vfs->readv_bytes) ? accumulator.readv_bytes : vfs->readv_bytes;
+
+ vfs->write_err = (accumulator.write_err >= vfs->write_err) ? accumulator.write_err : vfs->write_err;
+ vfs->writev_err = (accumulator.writev_err >= vfs->writev_err) ? accumulator.writev_err : vfs->writev_err;
+ vfs->read_err = (accumulator.read_err >= vfs->read_err) ? accumulator.read_err : vfs->read_err;
+ vfs->readv_err = (accumulator.readv_err >= vfs->readv_err) ? accumulator.readv_err : vfs->readv_err;
+ vfs->unlink_err = (accumulator.unlink_err >= vfs->unlink_err) ? accumulator.unlink_err : vfs->unlink_err;
+ vfs->fsync_err = (accumulator.fsync_err >= vfs->fsync_err) ? accumulator.fsync_err : vfs->fsync_err;
+ vfs->open_err = (accumulator.open_err >= vfs->open_err) ? accumulator.open_err : vfs->open_err;
+ vfs->create_err = (accumulator.create_err >= vfs->create_err) ? accumulator.create_err : vfs->create_err;
+}
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+ *
+ * @param em the structure with thread information
+ * @param root the target list.
+ */
+void ebpf_vfs_send_apps_data(ebpf_module_t *em, struct target *root)
+{
+ struct target *w;
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ ebpf_vfs_sum_pids(&w->vfs, w->root_pid);
+ }
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->vfs.unlink_call);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->vfs.write_call + w->vfs.writev_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->vfs.write_err + w->vfs.writev_err);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->vfs.read_call + w->vfs.readv_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->vfs.read_err + w->vfs.readv_err);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->vfs.write_bytes + w->vfs.writev_bytes);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->vfs.read_bytes + w->vfs.readv_bytes);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->vfs.fsync_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->vfs.fsync_err);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->vfs.open_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->vfs.open_err);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->vfs.create_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_APPS_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR);
+ for (w = root; w; w = w->next) {
+ if (unlikely(w->exposed && w->processes)) {
+ write_chart_dimension(w->name, w->vfs.create_err);
+ }
+ }
+ write_end_chart();
+ }
+}
+
+/**
+ * Apps Accumulator
+ *
+ * Sum all values read from kernel and store in the first address.
+ *
+ * @param out the vector with read values.
+ */
+static void vfs_apps_accumulator(netdata_publish_vfs_t *out)
+{
+ int i, end = (running_on_kernel >= NETDATA_KERNEL_V4_15) ? ebpf_nprocs : 1;
+ netdata_publish_vfs_t *total = &out[0];
+ for (i = 1; i < end; i++) {
+ netdata_publish_vfs_t *w = &out[i];
+
+ total->write_call += w->write_call;
+ total->writev_call += w->writev_call;
+ total->read_call += w->read_call;
+ total->readv_call += w->readv_call;
+ total->unlink_call += w->unlink_call;
+
+ total->write_bytes += w->write_bytes;
+ total->writev_bytes += w->writev_bytes;
+ total->read_bytes += w->read_bytes;
+ total->readv_bytes += w->readv_bytes;
+
+ total->write_err += w->write_err;
+ total->writev_err += w->writev_err;
+ total->read_err += w->read_err;
+ total->readv_err += w->readv_err;
+ total->unlink_err += w->unlink_err;
+ }
+}
+
+/**
+ * Fill PID
+ *
+ * Fill PID structures
+ *
+ * @param current_pid pid that we are collecting data
+ * @param out values read from hash tables;
+ */
+static void vfs_fill_pid(uint32_t current_pid, netdata_publish_vfs_t *publish)
+{
+ netdata_publish_vfs_t *curr = vfs_pid[current_pid];
+ if (!curr) {
+ curr = callocz(1, sizeof(netdata_publish_vfs_t));
+ vfs_pid[current_pid] = curr;
+ }
+
+ memcpy(curr, &publish[0], sizeof(netdata_publish_vfs_t));
+}
+
+/**
+ * Read the hash table and store data to allocated vectors.
+ */
+static void ebpf_vfs_read_apps()
+{
+ struct pid_stat *pids = root_of_pids;
+ netdata_publish_vfs_t *vv = vfs_vector;
+ int fd = vfs_maps[NETDATA_VFS_PID].map_fd;
+ size_t length = sizeof(netdata_publish_vfs_t) * ebpf_nprocs;
+ while (pids) {
+ uint32_t key = pids->pid;
+
+ if (bpf_map_lookup_elem(fd, &key, vv)) {
+ pids = pids->next;
+ continue;
+ }
+
+ vfs_apps_accumulator(vv);
+
+ vfs_fill_pid(key, vv);
+
+ // We are cleaning to avoid passing data read from one process to other.
+ memset(vv, 0, length);
+
+ pids = pids->next;
+ }
+}
+
+/**
+ * Update cgroup
+ *
+ * Update cgroup data based in
+ */
+static void read_update_vfs_cgroup()
+{
+ ebpf_cgroup_target_t *ect ;
+ netdata_publish_vfs_t *vv = vfs_vector;
+ int fd = vfs_maps[NETDATA_VFS_PID].map_fd;
+ size_t length = sizeof(netdata_publish_vfs_t) * ebpf_nprocs;
+
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ for (ect = ebpf_cgroup_pids; ect; ect = ect->next) {
+ struct pid_on_target2 *pids;
+ for (pids = ect->pids; pids; pids = pids->next) {
+ int pid = pids->pid;
+ netdata_publish_vfs_t *out = &pids->vfs;
+ if (likely(vfs_pid) && vfs_pid[pid]) {
+ netdata_publish_vfs_t *in = vfs_pid[pid];
+
+ memcpy(out, in, sizeof(netdata_publish_vfs_t));
+ } else {
+ memset(vv, 0, length);
+ if (!bpf_map_lookup_elem(fd, &pid, vv)) {
+ vfs_apps_accumulator(vv);
+
+ memcpy(out, vv, sizeof(netdata_publish_vfs_t));
+ }
+ }
+ }
+ }
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
+/**
+ * VFS read hash
+ *
+ * This is the thread callback.
+ * This thread is necessary, because we cannot freeze the whole plugin to read the data.
+ *
+ * @param ptr It is a NULL value for this thread.
+ *
+ * @return It always returns NULL.
+ */
+void *ebpf_vfs_read_hash(void *ptr)
+{
+ read_thread_closed = 0;
+
+ heartbeat_t hb;
+ heartbeat_init(&hb);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+
+ usec_t step = NETDATA_LATENCY_VFS_SLEEP_MS * em->update_every;
+ while (!close_ebpf_plugin) {
+ usec_t dt = heartbeat_next(&hb, step);
+ (void)dt;
+
+ read_global_table();
+ }
+
+ read_thread_closed = 1;
+
+ return NULL;
+}
+
+/**
+ * Sum PIDs
+ *
+ * Sum values for all targets.
+ *
+ * @param vfs structure used to store data
+ * @param pids input data
+ */
+static void ebpf_vfs_sum_cgroup_pids(netdata_publish_vfs_t *vfs, struct pid_on_target2 *pids)
+ {
+ netdata_publish_vfs_t accumulator;
+ memset(&accumulator, 0, sizeof(accumulator));
+
+ while (pids) {
+ netdata_publish_vfs_t *w = &pids->vfs;
+
+ accumulator.write_call += w->write_call;
+ accumulator.writev_call += w->writev_call;
+ accumulator.read_call += w->read_call;
+ accumulator.readv_call += w->readv_call;
+ accumulator.unlink_call += w->unlink_call;
+ accumulator.fsync_call += w->fsync_call;
+ accumulator.open_call += w->open_call;
+ accumulator.create_call += w->create_call;
+
+ accumulator.write_bytes += w->write_bytes;
+ accumulator.writev_bytes += w->writev_bytes;
+ accumulator.read_bytes += w->read_bytes;
+ accumulator.readv_bytes += w->readv_bytes;
+
+ accumulator.write_err += w->write_err;
+ accumulator.writev_err += w->writev_err;
+ accumulator.read_err += w->read_err;
+ accumulator.readv_err += w->readv_err;
+ accumulator.unlink_err += w->unlink_err;
+ accumulator.fsync_err += w->fsync_err;
+ accumulator.open_err += w->open_err;
+ accumulator.create_err += w->create_err;
+
+ pids = pids->next;
+ }
+
+ // These conditions were added, because we are using incremental algorithm
+ vfs->write_call = (accumulator.write_call >= vfs->write_call) ? accumulator.write_call : vfs->write_call;
+ vfs->writev_call = (accumulator.writev_call >= vfs->writev_call) ? accumulator.writev_call : vfs->writev_call;
+ vfs->read_call = (accumulator.read_call >= vfs->read_call) ? accumulator.read_call : vfs->read_call;
+ vfs->readv_call = (accumulator.readv_call >= vfs->readv_call) ? accumulator.readv_call : vfs->readv_call;
+ vfs->unlink_call = (accumulator.unlink_call >= vfs->unlink_call) ? accumulator.unlink_call : vfs->unlink_call;
+ vfs->fsync_call = (accumulator.fsync_call >= vfs->fsync_call) ? accumulator.fsync_call : vfs->fsync_call;
+ vfs->open_call = (accumulator.open_call >= vfs->open_call) ? accumulator.open_call : vfs->open_call;
+ vfs->create_call = (accumulator.create_call >= vfs->create_call) ? accumulator.create_call : vfs->create_call;
+
+ vfs->write_bytes = (accumulator.write_bytes >= vfs->write_bytes) ? accumulator.write_bytes : vfs->write_bytes;
+ vfs->writev_bytes = (accumulator.writev_bytes >= vfs->writev_bytes) ? accumulator.writev_bytes : vfs->writev_bytes;
+ vfs->read_bytes = (accumulator.read_bytes >= vfs->read_bytes) ? accumulator.read_bytes : vfs->read_bytes;
+ vfs->readv_bytes = (accumulator.readv_bytes >= vfs->readv_bytes) ? accumulator.readv_bytes : vfs->readv_bytes;
+
+ vfs->write_err = (accumulator.write_err >= vfs->write_err) ? accumulator.write_err : vfs->write_err;
+ vfs->writev_err = (accumulator.writev_err >= vfs->writev_err) ? accumulator.writev_err : vfs->writev_err;
+ vfs->read_err = (accumulator.read_err >= vfs->read_err) ? accumulator.read_err : vfs->read_err;
+ vfs->readv_err = (accumulator.readv_err >= vfs->readv_err) ? accumulator.readv_err : vfs->readv_err;
+ vfs->unlink_err = (accumulator.unlink_err >= vfs->unlink_err) ? accumulator.unlink_err : vfs->unlink_err;
+ vfs->fsync_err = (accumulator.fsync_err >= vfs->fsync_err) ? accumulator.fsync_err : vfs->fsync_err;
+ vfs->open_err = (accumulator.open_err >= vfs->open_err) ? accumulator.open_err : vfs->open_err;
+ vfs->create_err = (accumulator.create_err >= vfs->create_err) ? accumulator.create_err : vfs->create_err;
+}
+
+/**
+ * Create specific VFS charts
+ *
+ * Create charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param em the main thread structure.
+ */
+static void ebpf_create_specific_vfs_charts(char *type, ebpf_module_t *em)
+{
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_FILE_DELETED,"Files deleted",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_UNLINK_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5500,
+ ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "Write to disk",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_WRITE_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5501,
+ ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "Fails to write",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_WRITE_ERROR_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5502,
+ ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ }
+
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "Read from disk",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_READ_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5503,
+ ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "Fails to read",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_READ_ERROR_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5504,
+ ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ }
+
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "Bytes written on disk",
+ EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_WRITE_BYTES_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5505,
+ ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "Bytes read from disk",
+ EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP, NETDATA_CGROUP_VFS_READ_BYTES_CONTEXT,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5506,
+ ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls for <code>vfs_fsync</code>",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5507,
+ ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "Sync error",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5508,
+ ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ }
+
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls for <code>vfs_open</code>",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5509,
+ ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "Open error",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5510,
+ ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ }
+
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls for <code>vfs_create</code>",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5511,
+ ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "Create error",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP, NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5512,
+ ebpf_create_global_dimension, &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
+ }
+}
+
+/**
+ * Obsolete specific VFS charts
+ *
+ * Obsolete charts for cgroup/application.
+ *
+ * @param type the chart type.
+ * @param em the main thread structure.
+ */
+static void ebpf_obsolete_specific_vfs_charts(char *type, ebpf_module_t *em)
+{
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_FILE_DELETED, "Files deleted",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_UNLINK_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5500, em->update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "Write to disk",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_WRITE_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5501, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "Fails to write",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_WRITE_ERROR_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5502, em->update_every);
+ }
+
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "Read from disk",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_READ_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5503, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "Fails to read",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_READ_ERROR_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5504, em->update_every);
+ }
+
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "Bytes written on disk",
+ EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_WRITE_BYTES_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5505, em->update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "Bytes read from disk",
+ EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NETDATA_CGROUP_VFS_READ_BYTES_CONTEXT,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5506, em->update_every);
+
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls for <code>vfs_fsync</code>",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NULL,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5507, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "Sync error",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NULL,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5508, em->update_every);
+ }
+
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls for <code>vfs_open</code>",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NULL,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5509, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "Open error",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NULL,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5510, em->update_every);
+ }
+
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls for <code>vfs_create</code>",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NULL,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5511, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_write_chart_obsolete(type, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "Create error",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_LINE, NULL,
+ NETDATA_CHART_PRIO_CGROUPS_CONTAINERS + 5512, em->update_every);
+ }
+}
+
+/*
+ * Send specific VFS data
+ *
+ * Send data for specific cgroup/apps.
+ *
+ * @param type chart type
+ * @param values structure with values that will be sent to netdata
+ */
+static void ebpf_send_specific_vfs_data(char *type, netdata_publish_vfs_t *values, ebpf_module_t *em)
+{
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_FILE_DELETED);
+ write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK].name, (long long)values->unlink_call);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS);
+ write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].name,
+ (long long)values->write_call + (long long)values->writev_call);
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR);
+ write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].name,
+ (long long)values->write_err + (long long)values->writev_err);
+ write_end_chart();
+ }
+
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS);
+ write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].name,
+ (long long)values->read_call + (long long)values->readv_call);
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR);
+ write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].name,
+ (long long)values->read_err + (long long)values->readv_err);
+ write_end_chart();
+ }
+
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES);
+ write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_WRITE].name,
+ (long long)values->write_bytes + (long long)values->writev_bytes);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_READ_BYTES);
+ write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].name,
+ (long long)values->read_bytes + (long long)values->readv_bytes);
+ write_end_chart();
+
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC);
+ write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].name,
+ (long long)values->fsync_call);
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR);
+ write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC].name,
+ (long long)values->fsync_err);
+ write_end_chart();
+ }
+
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN);
+ write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].name,
+ (long long)values->open_call);
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR);
+ write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN].name,
+ (long long)values->open_err);
+ write_end_chart();
+ }
+
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE);
+ write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].name,
+ (long long)values->create_call);
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(type, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR);
+ write_chart_dimension(vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE].name,
+ (long long)values->create_err);
+ write_end_chart();
+ }
+}
+
+/**
+ * Create Systemd Socket Charts
+ *
+ * Create charts when systemd is enabled
+ *
+ * @param em the main collector structure
+ **/
+static void ebpf_create_systemd_vfs_charts(ebpf_module_t *em)
+{
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_FILE_DELETED, "Files deleted",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20065,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_UNLINK_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS, "Write to disk",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20066,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_WRITE_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR, "Fails to write",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20067,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ NETDATA_SYSTEMD_VFS_WRITE_ERROR_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+ }
+
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_READ_CALLS, "Read from disk",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20068,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_READ_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR, "Fails to read",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20069,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ NETDATA_SYSTEMD_VFS_READ_ERROR_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+ }
+
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES, "Bytes written on disk",
+ EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20070,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_WRITE_BYTES_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_READ_BYTES, "Bytes read from disk",
+ EBPF_COMMON_DIMENSION_BYTES, NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20071,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NETDATA_SYSTEMD_VFS_READ_BYTES_CONTEXT,
+ NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_FSYNC, "Calls to <code>vfs_fsync</code>",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20072,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL,
+ NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR, "Sync error",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20073,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL,
+ NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+ }
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_OPEN, "Calls to <code>vfs_open</code>",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20074,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL,
+ NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR, "Open error",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20075,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL,
+ NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+ }
+
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_CREATE, "Calls to <code>vfs_create</code>",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20076,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL,
+ NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_systemd(NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR, "Create error",
+ EBPF_COMMON_DIMENSION_CALL, NETDATA_VFS_CGROUP_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED, 20077,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX], NULL,
+ NETDATA_EBPF_MODULE_NAME_VFS, em->update_every);
+ }
+}
+
+/**
+ * Send Systemd charts
+ *
+ * Send collected data to Netdata.
+ *
+ * @param em the main collector structure
+ *
+ * @return It returns the status for chart creation, if it is necessary to remove a specific dimension, zero is returned
+ * otherwise function returns 1 to avoid chart recreation
+ */
+static int ebpf_send_systemd_vfs_charts(ebpf_module_t *em)
+{
+ int ret = 1;
+ ebpf_cgroup_target_t *ect;
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_FILE_DELETED);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_vfs.unlink_call);
+ } else
+ ret = 0;
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_vfs.write_call +
+ ect->publish_systemd_vfs.writev_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_vfs.write_err +
+ ect->publish_systemd_vfs.writev_err);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_vfs.read_call +
+ ect->publish_systemd_vfs.readv_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_vfs.read_err +
+ ect->publish_systemd_vfs.readv_err);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_vfs.write_bytes +
+ ect->publish_systemd_vfs.writev_bytes);
+ }
+ }
+ write_end_chart();
+
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_READ_BYTES);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_vfs.read_bytes +
+ ect->publish_systemd_vfs.readv_bytes);
+ }
+ }
+ write_end_chart();
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_vfs.fsync_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_vfs.fsync_err);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_vfs.open_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_vfs.open_err);
+ }
+ }
+ write_end_chart();
+ }
+
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_vfs.create_call);
+ }
+ }
+ write_end_chart();
+
+ if (em->mode < MODE_ENTRY) {
+ write_begin_chart(NETDATA_SERVICE_FAMILY, NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR);
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (unlikely(ect->systemd) && unlikely(ect->updated)) {
+ write_chart_dimension(ect->name, ect->publish_systemd_vfs.create_err);
+ }
+ }
+ write_end_chart();
+ }
+
+ return ret;
+}
+
+/**
+ * Send data to Netdata calling auxiliary functions.
+ *
+ * @param em the main collector structure
+*/
+static void ebpf_vfs_send_cgroup_data(ebpf_module_t *em)
+{
+ if (!ebpf_cgroup_pids)
+ return;
+
+ pthread_mutex_lock(&mutex_cgroup_shm);
+ ebpf_cgroup_target_t *ect;
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ ebpf_vfs_sum_cgroup_pids(&ect->publish_systemd_vfs, ect->pids);
+ }
+
+ int has_systemd = shm_ebpf_cgroup.header->systemd_enabled;
+ if (has_systemd) {
+ static int systemd_charts = 0;
+ if (!systemd_charts) {
+ ebpf_create_systemd_vfs_charts(em);
+ systemd_charts = 1;
+ }
+
+ systemd_charts = ebpf_send_systemd_vfs_charts(em);
+ }
+
+ for (ect = ebpf_cgroup_pids; ect ; ect = ect->next) {
+ if (ect->systemd)
+ continue;
+
+ if (!(ect->flags & NETDATA_EBPF_CGROUP_HAS_VFS_CHART) && ect->updated) {
+ ebpf_create_specific_vfs_charts(ect->name, em);
+ ect->flags |= NETDATA_EBPF_CGROUP_HAS_VFS_CHART;
+ }
+
+ if (ect->flags & NETDATA_EBPF_CGROUP_HAS_VFS_CHART) {
+ if (ect->updated) {
+ ebpf_send_specific_vfs_data(ect->name, &ect->publish_systemd_vfs, em);
+ } else {
+ ebpf_obsolete_specific_vfs_charts(ect->name, em);
+ ect->flags &= ~NETDATA_EBPF_CGROUP_HAS_VFS_CHART;
+ }
+ }
+ }
+
+ pthread_mutex_unlock(&mutex_cgroup_shm);
+}
+
+/**
+ * Main loop for this collector.
+ *
+ * @param step the number of microseconds used with heart beat
+ * @param em the structure with thread information
+ */
+static void vfs_collector(ebpf_module_t *em)
+{
+ vfs_threads.thread = mallocz(sizeof(netdata_thread_t));
+ vfs_threads.start_routine = ebpf_vfs_read_hash;
+
+ netdata_thread_create(vfs_threads.thread, vfs_threads.name, NETDATA_THREAD_OPTION_JOINABLE,
+ ebpf_vfs_read_hash, em);
+
+ int apps = em->apps_charts;
+ int cgroups = em->cgroup_charts;
+ int update_every = em->update_every;
+ int counter = update_every - 1;
+ while (!close_ebpf_plugin) {
+ pthread_mutex_lock(&collect_data_mutex);
+ pthread_cond_wait(&collect_data_cond_var, &collect_data_mutex);
+
+ if (++counter == update_every) {
+ counter = 0;
+ if (apps)
+ ebpf_vfs_read_apps();
+
+ if (cgroups)
+ read_update_vfs_cgroup();
+
+ pthread_mutex_lock(&lock);
+
+ ebpf_vfs_send_data(em);
+ fflush(stdout);
+
+ if (apps)
+ ebpf_vfs_send_apps_data(em, apps_groups_root_target);
+
+ if (cgroups)
+ ebpf_vfs_send_cgroup_data(em);
+
+ pthread_mutex_unlock(&lock);
+ }
+ pthread_mutex_unlock(&collect_data_mutex);
+ }
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO CREATE CHARTS
+ *
+ *****************************************************************/
+
+/**
+ * Create IO chart
+ *
+ * @param family the chart family
+ * @param name the chart name
+ * @param axis the axis label
+ * @param web the group name used to attach the chart on dashboard
+ * @param order the order number of the specified chart
+ * @param algorithm the algorithm used to make the charts.
+ * @param update_every value to overwrite the update frequency set by the server.
+ */
+static void ebpf_create_io_chart(char *family, char *name, char *axis, char *web,
+ int order, int algorithm, int update_every)
+{
+ printf("CHART %s.%s '' 'Bytes written and read' '%s' '%s' '' line %d %d '' 'ebpf.plugin' 'filesystem'\n",
+ family,
+ name,
+ axis,
+ web,
+ order,
+ update_every);
+
+ printf("DIMENSION %s %s %s 1 1\n",
+ vfs_id_names[NETDATA_KEY_PUBLISH_VFS_READ],
+ vfs_dimension_names[NETDATA_KEY_PUBLISH_VFS_READ],
+ ebpf_algorithms[algorithm]);
+ printf("DIMENSION %s %s %s -1 1\n",
+ vfs_id_names[NETDATA_KEY_PUBLISH_VFS_WRITE],
+ vfs_dimension_names[NETDATA_KEY_PUBLISH_VFS_WRITE],
+ ebpf_algorithms[algorithm]);
+}
+
+/**
+ * Create global charts
+ *
+ * Call ebpf_create_chart to create the charts for the collector.
+ *
+ * @param em a pointer to the structure with the default values.
+ */
+static void ebpf_create_global_charts(ebpf_module_t *em)
+{
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
+ NETDATA_VFS_FILE_CLEAN_COUNT,
+ "Remove files",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_FILESYSTEM_VFS_CLEAN,
+ ebpf_create_global_dimension,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
+ NETDATA_VFS_FILE_IO_COUNT,
+ "Calls to IO",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_COUNT,
+ ebpf_create_global_dimension,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
+ 2, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+
+ ebpf_create_io_chart(NETDATA_FILESYSTEM_FAMILY,
+ NETDATA_VFS_IO_FILE_BYTES, EBPF_COMMON_DIMENSION_BYTES,
+ NETDATA_VFS_GROUP,
+ NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_BYTES,
+ NETDATA_EBPF_INCREMENTAL_IDX, em->update_every);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
+ NETDATA_VFS_FILE_ERR_COUNT,
+ "Fails to write or read",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EBYTES,
+ ebpf_create_global_dimension,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ],
+ 2, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+ }
+
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
+ NETDATA_VFS_FSYNC,
+ "Calls for <code>vfs_fsync</code>",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_FSYNC,
+ ebpf_create_global_dimension,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
+ NETDATA_VFS_FSYNC_ERR,
+ "Fails to synchronize",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EFSYNC,
+ ebpf_create_global_dimension,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_FSYNC],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+ }
+
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
+ NETDATA_VFS_OPEN,
+ "Calls for <code>vfs_open</code>",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_OPEN,
+ ebpf_create_global_dimension,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
+ NETDATA_VFS_OPEN_ERR,
+ "Fails to open a file",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_EOPEN,
+ ebpf_create_global_dimension,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_OPEN],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+ }
+
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
+ NETDATA_VFS_CREATE,
+ "Calls for <code>vfs_create</code>",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_CREATE,
+ ebpf_create_global_dimension,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_chart(NETDATA_FILESYSTEM_FAMILY,
+ NETDATA_VFS_CREATE_ERR,
+ "Fails to create a file.",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NULL,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NETDATA_CHART_PRIO_FILESYSTEM_VFS_IO_ECREATE,
+ ebpf_create_global_dimension,
+ &vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_CREATE],
+ 1, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+ }
+}
+
+/**
+ * Create process apps charts
+ *
+ * Call ebpf_create_chart to create the charts on apps submenu.
+ *
+ * @param em a pointer to the structure with the default values.
+ * @param ptr a pointer for the targets.
+ **/
+void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr)
+{
+ struct target *root = ptr;
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_FILE_DELETED,
+ "Files deleted",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20065,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS,
+ "Write to disk",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20066,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR,
+ "Fails to write",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20067,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+ }
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS,
+ "Read from disk",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20068,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR,
+ "Fails to read",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20069,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+ }
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES,
+ "Bytes written on disk", EBPF_COMMON_DIMENSION_BYTES,
+ NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20070,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_READ_BYTES,
+ "Bytes read from disk", EBPF_COMMON_DIMENSION_BYTES,
+ NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20071,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_FSYNC,
+ "Calls for <code>vfs_fsync</code>", EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20072,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR,
+ "Sync error",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20073,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+ }
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_OPEN,
+ "Calls for <code>vfs_open</code>", EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20074,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR,
+ "Open error",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20075,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+ }
+
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_CREATE,
+ "Calls for <code>vfs_create</code>", EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20076,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+
+ if (em->mode < MODE_ENTRY) {
+ ebpf_create_charts_on_apps(NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR,
+ "Create error",
+ EBPF_COMMON_DIMENSION_CALL,
+ NETDATA_VFS_GROUP,
+ NETDATA_EBPF_CHART_TYPE_STACKED,
+ 20077,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX],
+ root, em->update_every, NETDATA_EBPF_MODULE_NAME_VFS);
+ }
+}
+
+/*****************************************************************
+ *
+ * FUNCTIONS TO START THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Allocate vectors used with this thread.
+ * We are not testing the return, because callocz does this and shutdown the software
+ * case it was not possible to allocate.
+ *
+ * @param apps is apps enabled?
+ */
+static void ebpf_vfs_allocate_global_vectors(int apps)
+{
+ memset(vfs_aggregated_data, 0, sizeof(vfs_aggregated_data));
+ memset(vfs_publish_aggregated, 0, sizeof(vfs_publish_aggregated));
+
+ vfs_hash_values = callocz(ebpf_nprocs, sizeof(netdata_idx_t));
+ vfs_vector = callocz(ebpf_nprocs, sizeof(netdata_publish_vfs_t));
+
+ if (apps)
+ vfs_pid = callocz((size_t)pid_max, sizeof(netdata_publish_vfs_t *));
+}
+
+/*****************************************************************
+ *
+ * EBPF VFS THREAD
+ *
+ *****************************************************************/
+
+/**
+ * Process thread
+ *
+ * Thread used to generate process charts.
+ *
+ * @param ptr a pointer to `struct ebpf_module`
+ *
+ * @return It always return NULL
+ */
+void *ebpf_vfs_thread(void *ptr)
+{
+ netdata_thread_cleanup_push(ebpf_vfs_cleanup, ptr);
+
+ ebpf_module_t *em = (ebpf_module_t *)ptr;
+ em->maps = vfs_maps;
+
+ ebpf_update_pid_table(&vfs_maps[NETDATA_VFS_PID], em);
+
+ ebpf_vfs_allocate_global_vectors(em->apps_charts);
+
+ if (!em->enabled)
+ goto endvfs;
+
+ probe_links = ebpf_load_program(ebpf_plugin_dir, em, kernel_string, &objects);
+ if (!probe_links) {
+ goto endvfs;
+ }
+
+ int algorithms[NETDATA_KEY_PUBLISH_VFS_END] = {
+ NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX,NETDATA_EBPF_INCREMENTAL_IDX,
+ NETDATA_EBPF_INCREMENTAL_IDX, NETDATA_EBPF_INCREMENTAL_IDX,NETDATA_EBPF_INCREMENTAL_IDX
+ };
+
+ ebpf_global_labels(vfs_aggregated_data, vfs_publish_aggregated, vfs_dimension_names,
+ vfs_id_names, algorithms, NETDATA_KEY_PUBLISH_VFS_END);
+
+ pthread_mutex_lock(&lock);
+ ebpf_create_global_charts(em);
+ pthread_mutex_unlock(&lock);
+
+ vfs_collector(em);
+
+endvfs:
+ netdata_thread_cleanup_pop(1);
+ return NULL;
+}
diff --git a/collectors/ebpf.plugin/ebpf_vfs.h b/collectors/ebpf.plugin/ebpf_vfs.h
new file mode 100644
index 000000000..0a972c983
--- /dev/null
+++ b/collectors/ebpf.plugin/ebpf_vfs.h
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-3.0-or-later
+
+#ifndef NETDATA_EBPF_VFS_H
+#define NETDATA_EBPF_VFS_H 1
+
+// Module name
+#define NETDATA_EBPF_MODULE_NAME_VFS "vfs"
+
+#define NETDATA_DIRECTORY_VFS_CONFIG_FILE "vfs.conf"
+
+#define NETDATA_LATENCY_VFS_SLEEP_MS 750000ULL
+
+// Global chart name
+#define NETDATA_VFS_FILE_CLEAN_COUNT "vfs_deleted_objects"
+#define NETDATA_VFS_FILE_IO_COUNT "vfs_io"
+#define NETDATA_VFS_FILE_ERR_COUNT "vfs_io_error"
+#define NETDATA_VFS_IO_FILE_BYTES "vfs_io_bytes"
+#define NETDATA_VFS_FSYNC "vfs_fsync"
+#define NETDATA_VFS_FSYNC_ERR "vfs_fsync_error"
+#define NETDATA_VFS_OPEN "vfs_open"
+#define NETDATA_VFS_OPEN_ERR "vfs_open_error"
+#define NETDATA_VFS_CREATE "vfs_create"
+#define NETDATA_VFS_CREATE_ERR "vfs_create_error"
+
+// Charts created on Apps submenu
+#define NETDATA_SYSCALL_APPS_FILE_DELETED "file_deleted"
+#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS "vfs_write_call"
+#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS "vfs_read_call"
+#define NETDATA_SYSCALL_APPS_VFS_WRITE_BYTES "vfs_write_bytes"
+#define NETDATA_SYSCALL_APPS_VFS_READ_BYTES "vfs_read_bytes"
+#define NETDATA_SYSCALL_APPS_VFS_FSYNC "vfs_fsync"
+#define NETDATA_SYSCALL_APPS_VFS_OPEN "vfs_open"
+#define NETDATA_SYSCALL_APPS_VFS_CREATE "vfs_create"
+
+#define NETDATA_SYSCALL_APPS_VFS_WRITE_CALLS_ERROR "vfs_write_error"
+#define NETDATA_SYSCALL_APPS_VFS_READ_CALLS_ERROR "vfs_read_error"
+#define NETDATA_SYSCALL_APPS_VFS_FSYNC_CALLS_ERROR "vfs_fsync_error"
+#define NETDATA_SYSCALL_APPS_VFS_OPEN_CALLS_ERROR "vfs_open_error"
+#define NETDATA_SYSCALL_APPS_VFS_CREATE_CALLS_ERROR "vfs_create_error"
+
+// Group used on Dashboard
+#define NETDATA_VFS_GROUP "vfs"
+#define NETDATA_VFS_CGROUP_GROUP "vfs (eBPF)"
+
+// Contexts
+#define NETDATA_CGROUP_VFS_UNLINK_CONTEXT "cgroup.vfs_unlink"
+#define NETDATA_CGROUP_VFS_WRITE_CONTEXT "cgroup.vfs_write"
+#define NETDATA_CGROUP_VFS_WRITE_ERROR_CONTEXT "cgroup.vfs_write_error"
+#define NETDATA_CGROUP_VFS_READ_CONTEXT "cgroup.vfs_read"
+#define NETDATA_CGROUP_VFS_READ_ERROR_CONTEXT "cgroup.vfs_read_error"
+#define NETDATA_CGROUP_VFS_WRITE_BYTES_CONTEXT "cgroup.vfs_write_bytes"
+#define NETDATA_CGROUP_VFS_READ_BYTES_CONTEXT "cgroup.vfs_read_bytes"
+
+#define NETDATA_SYSTEMD_VFS_UNLINK_CONTEXT "services.vfs_unlink"
+#define NETDATA_SYSTEMD_VFS_WRITE_CONTEXT "services.vfs_write"
+#define NETDATA_SYSTEMD_VFS_WRITE_ERROR_CONTEXT "services.vfs_write_error"
+#define NETDATA_SYSTEMD_VFS_READ_CONTEXT "services.vfs_read"
+#define NETDATA_SYSTEMD_VFS_READ_ERROR_CONTEXT "services.vfs_read_error"
+#define NETDATA_SYSTEMD_VFS_WRITE_BYTES_CONTEXT "services.vfs_write_bytes"
+#define NETDATA_SYSTEMD_VFS_READ_BYTES_CONTEXT "services.vfs_read_bytes"
+
+typedef struct netdata_publish_vfs {
+ uint64_t pid_tgid;
+ uint32_t pid;
+ uint32_t pad;
+
+ //Counter
+ uint32_t write_call;
+ uint32_t writev_call;
+ uint32_t read_call;
+ uint32_t readv_call;
+ uint32_t unlink_call;
+ uint32_t fsync_call;
+ uint32_t open_call;
+ uint32_t create_call;
+
+ //Accumulator
+ uint64_t write_bytes;
+ uint64_t writev_bytes;
+ uint64_t readv_bytes;
+ uint64_t read_bytes;
+
+ //Counter
+ uint32_t write_err;
+ uint32_t writev_err;
+ uint32_t read_err;
+ uint32_t readv_err;
+ uint32_t unlink_err;
+ uint32_t fsync_err;
+ uint32_t open_err;
+ uint32_t create_err;
+} netdata_publish_vfs_t;
+
+enum netdata_publish_vfs_list {
+ NETDATA_KEY_PUBLISH_VFS_UNLINK,
+ NETDATA_KEY_PUBLISH_VFS_READ,
+ NETDATA_KEY_PUBLISH_VFS_WRITE,
+ NETDATA_KEY_PUBLISH_VFS_FSYNC,
+ NETDATA_KEY_PUBLISH_VFS_OPEN,
+ NETDATA_KEY_PUBLISH_VFS_CREATE,
+
+ NETDATA_KEY_PUBLISH_VFS_END
+};
+
+enum vfs_counters {
+ NETDATA_KEY_CALLS_VFS_WRITE,
+ NETDATA_KEY_ERROR_VFS_WRITE,
+ NETDATA_KEY_BYTES_VFS_WRITE,
+
+ NETDATA_KEY_CALLS_VFS_WRITEV,
+ NETDATA_KEY_ERROR_VFS_WRITEV,
+ NETDATA_KEY_BYTES_VFS_WRITEV,
+
+ NETDATA_KEY_CALLS_VFS_READ,
+ NETDATA_KEY_ERROR_VFS_READ,
+ NETDATA_KEY_BYTES_VFS_READ,
+
+ NETDATA_KEY_CALLS_VFS_READV,
+ NETDATA_KEY_ERROR_VFS_READV,
+ NETDATA_KEY_BYTES_VFS_READV,
+
+ NETDATA_KEY_CALLS_VFS_UNLINK,
+ NETDATA_KEY_ERROR_VFS_UNLINK,
+
+ NETDATA_KEY_CALLS_VFS_FSYNC,
+ NETDATA_KEY_ERROR_VFS_FSYNC,
+
+ NETDATA_KEY_CALLS_VFS_OPEN,
+ NETDATA_KEY_ERROR_VFS_OPEN,
+
+ NETDATA_KEY_CALLS_VFS_CREATE,
+ NETDATA_KEY_ERROR_VFS_CREATE,
+
+ // Keep this as last and don't skip numbers as it is used as element counter
+ NETDATA_VFS_COUNTER
+};
+
+enum netdata_vfs_tables {
+ NETDATA_VFS_PID,
+ NETDATA_VFS_ALL
+};
+
+extern netdata_publish_vfs_t **vfs_pid;
+
+extern void *ebpf_vfs_thread(void *ptr);
+extern void ebpf_vfs_create_apps_charts(struct ebpf_module *em, void *ptr);
+extern void clean_vfs_pid_structures();
+
+extern struct config vfs_config;
+
+#endif /* NETDATA_EBPF_VFS_H */
diff --git a/collectors/ebpf.plugin/reset_netdata_trace.sh.in b/collectors/ebpf.plugin/reset_netdata_trace.sh.in
deleted file mode 100644
index 51d981ee3..000000000
--- a/collectors/ebpf.plugin/reset_netdata_trace.sh.in
+++ /dev/null
@@ -1,9 +0,0 @@
-#!/bin/bash
-
-KPROBE_FILE="/sys/kernel/debug/tracing/kprobe_events"
-
-DATA="$(grep _netdata_ $KPROBE_FILE| cut -d' ' -f1 | cut -d: -f2)"
-
-for I in $DATA; do
- echo "-:$I" > $KPROBE_FILE 2>/dev/null;
-done