summaryrefslogtreecommitdiffstats
path: root/collectors/ebpf.plugin
diff options
context:
space:
mode:
Diffstat (limited to 'collectors/ebpf.plugin')
-rw-r--r--collectors/ebpf.plugin/ebpf.c167
-rw-r--r--collectors/ebpf.plugin/ebpf.h8
-rw-r--r--collectors/ebpf.plugin/ebpf_cachestat.c36
-rw-r--r--collectors/ebpf.plugin/ebpf_dcstat.c36
-rw-r--r--collectors/ebpf.plugin/ebpf_fd.c38
-rw-r--r--collectors/ebpf.plugin/ebpf_functions.c2
-rw-r--r--collectors/ebpf.plugin/ebpf_oomkill.c4
-rw-r--r--collectors/ebpf.plugin/ebpf_process.c37
-rw-r--r--collectors/ebpf.plugin/ebpf_process.h6
-rw-r--r--collectors/ebpf.plugin/ebpf_shm.c40
-rw-r--r--collectors/ebpf.plugin/ebpf_socket.c42
-rw-r--r--collectors/ebpf.plugin/ebpf_swap.c48
-rw-r--r--collectors/ebpf.plugin/ebpf_vfs.c39
-rw-r--r--collectors/ebpf.plugin/metadata.yaml3308
-rw-r--r--collectors/ebpf.plugin/metrics.csv197
-rw-r--r--collectors/ebpf.plugin/multi_metadata.yaml2360
16 files changed, 3639 insertions, 2729 deletions
diff --git a/collectors/ebpf.plugin/ebpf.c b/collectors/ebpf.plugin/ebpf.c
index 72aedba6a..844047305 100644
--- a/collectors/ebpf.plugin/ebpf.c
+++ b/collectors/ebpf.plugin/ebpf.c
@@ -60,7 +60,7 @@ ebpf_module_t ebpf_modules[] = {
NETDATA_V5_14,
.load = EBPF_LOAD_LEGACY, .targets = NULL, .probe_links = NULL, .objects = NULL,
.thread = NULL, .maps_per_core = CONFIG_BOOLEAN_YES, .lifetime = EBPF_DEFAULT_LIFETIME, .running_time = 0 },
- { .thread_name = "socket", .config_name = "socket", .thread_description = NETDATA_EBPF_SOCKET_MODULE_DESC,
+ { .thread_name = "socket", .config_name = "socket", .thread_description = NETDATA_EBPF_SOCKET_MODULE_DESC,
.enabled = 0, .start_routine = ebpf_socket_thread,
.update_every = EBPF_DEFAULT_UPDATE_EVERY, .global_charts = 1, .apps_charts = NETDATA_EBPF_APPS_FLAG_NO,
.apps_level = NETDATA_APPS_LEVEL_REAL_PARENT, .cgroup_charts = CONFIG_BOOLEAN_NO, .mode = MODE_ENTRY, .optional = 0,
@@ -1229,7 +1229,7 @@ void write_histogram_chart(char *family, char *name, const netdata_idx_t *hist,
*/
int ebpf_statistic_create_aral_chart(char *name, ebpf_module_t *em)
{
- static int priority = 140100;
+ static int priority = NETATA_EBPF_ORDER_STAT_ARAL_BEGIN;
char *mem = { NETDATA_EBPF_STAT_DIMENSION_MEMORY };
char *aral = { NETDATA_EBPF_STAT_DIMENSION_ARAL };
@@ -1327,6 +1327,49 @@ void ebpf_send_data_aral_chart(ARAL *memory, ebpf_module_t *em)
/*****************************************************************
*
+ * FUNCTIONS TO READ GLOBAL HASH TABLES
+ *
+ *****************************************************************/
+
+/**
+ * Read Global Table Stats
+ *
+ * Read data from specified table (map_fd) using array allocated inside thread(values) and storing
+ * them in stats vector starting from the first position.
+ *
+ * For PID tables is recommended to use a function to parse the specific data.
+ *
+ * @param stats vector used to store data
+ * @param values helper to read data from hash tables.
+ * @param map_fd table that has data
+ * @param maps_per_core Is necessary to read data from all cores?
+ * @param begin initial value to query hash table
+ * @param end last value that will not be used.
+ */
+void ebpf_read_global_table_stats(netdata_idx_t *stats,
+ netdata_idx_t *values,
+ int map_fd,
+ int maps_per_core,
+ uint32_t begin,
+ uint32_t end)
+{
+ uint32_t idx, order;
+
+ for (idx = begin, order = 0; idx < end; idx++, order++) {
+ if (!bpf_map_lookup_elem(map_fd, &idx, values)) {
+ int i;
+ int before = (maps_per_core) ? ebpf_nprocs: 1;
+ netdata_idx_t total = 0;
+ for (i = 0; i < before; i++)
+ total += values[i];
+
+ stats[order] = total;
+ }
+ }
+}
+
+/*****************************************************************
+ *
* FUNCTIONS TO DEFINE OPTIONS
*
*****************************************************************/
@@ -2454,6 +2497,47 @@ static char *hash_table_stat = {"hash_table"};
static char *hash_table_core[NETDATA_EBPF_LOAD_STAT_END] = {"per_core", "unique"};
/**
+ * Send Hash Table PID data
+ *
+ * Send all information associated with a specific pid table.
+ *
+ * @param chart chart id
+ * @param idx index position in hash_table_stats
+ */
+static inline void ebpf_send_hash_table_pid_data(char *chart, uint32_t idx)
+{
+ int i;
+ write_begin_chart(NETDATA_MONITORING_FAMILY, chart);
+ for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
+ ebpf_module_t *wem = &ebpf_modules[i];
+ if (wem->apps_routine)
+ write_chart_dimension((char *)wem->thread_name,
+ (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ?
+ wem->hash_table_stats[idx]:
+ 0);
+ }
+ write_end_chart();
+}
+
+/**
+ * Send Global Hash Table data
+ *
+ * Send all information associated with a specific pid table.
+ *
+ */
+static inline void ebpf_send_global_hash_table_data()
+{
+ int i;
+ write_begin_chart(NETDATA_MONITORING_FAMILY, NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS);
+ for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
+ ebpf_module_t *wem = &ebpf_modules[i];
+ write_chart_dimension((char *)wem->thread_name,
+ (wem->enabled < NETDATA_THREAD_EBPF_STOPPING) ? NETDATA_CONTROLLER_END: 0);
+ }
+ write_end_chart();
+}
+
+/**
* Send Statistic Data
*
* Send statistic information to netdata.
@@ -2500,6 +2584,11 @@ void ebpf_send_statistic_data()
write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_PER_CORE], (long long)plugin_statistics.hash_percpu);
write_chart_dimension(hash_table_core[NETDATA_EBPF_THREAD_UNIQUE], (long long)plugin_statistics.hash_unique);
write_end_chart();
+
+ ebpf_send_global_hash_table_data();
+
+ ebpf_send_hash_table_pid_data(NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS, NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_ADD);
+ ebpf_send_hash_table_pid_data(NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS, NETDATA_EBPF_GLOBAL_TABLE_PID_TABLE_DEL);
}
/**
@@ -2681,6 +2770,66 @@ static inline void ebpf_create_statistic_hash_per_core(int update_every)
ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
}
+/**
+ * Hash table global elements
+ *
+ * Write to standard output current values inside global tables.
+ *
+ * @param update_every time used to update charts
+ */
+static void ebpf_create_statistic_hash_global_elements(int update_every)
+{
+ ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
+ NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS,
+ "Controllers inside global table",
+ "rows",
+ NETDATA_EBPF_FAMILY,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NULL,
+ NETDATA_EBPF_ORDER_STAT_HASH_GLOBAL_TABLE_TOTAL,
+ update_every,
+ NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ int i;
+ for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
+ ebpf_write_global_dimension((char *)ebpf_modules[i].thread_name,
+ (char *)ebpf_modules[i].thread_name,
+ ebpf_algorithms[NETDATA_EBPF_ABSOLUTE_IDX]);
+ }
+}
+
+/**
+ * Hash table global elements
+ *
+ * Write to standard output current values inside global tables.
+ *
+ * @param update_every time used to update charts
+ * @param id chart id
+ * @param title chart title
+ * @param order ordder chart will be shown on dashboard.
+ */
+static void ebpf_create_statistic_hash_pid_table(int update_every, char *id, char *title, int order)
+{
+ ebpf_write_chart_cmd(NETDATA_MONITORING_FAMILY,
+ id,
+ title,
+ "rows",
+ NETDATA_EBPF_FAMILY,
+ NETDATA_EBPF_CHART_TYPE_LINE,
+ NULL,
+ order,
+ update_every,
+ NETDATA_EBPF_MODULE_NAME_PROCESS);
+
+ int i;
+ for (i = 0; i < EBPF_MODULE_FUNCTION_IDX; i++) {
+ ebpf_module_t *wem = &ebpf_modules[i];
+ if (wem->apps_routine)
+ ebpf_write_global_dimension((char *)wem->thread_name,
+ (char *)wem->thread_name,
+ ebpf_algorithms[NETDATA_EBPF_INCREMENTAL_IDX]);
+ }
+}
/**
* Create Statistics Charts
@@ -2718,6 +2867,20 @@ static void ebpf_create_statistic_charts(int update_every)
ebpf_create_statistic_hash_tables(update_every);
ebpf_create_statistic_hash_per_core(update_every);
+
+ ebpf_create_statistic_hash_global_elements(update_every);
+
+ ebpf_create_statistic_hash_pid_table(update_every,
+ NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS,
+ "Elements inserted into PID table",
+ NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_ADDED);
+
+ ebpf_create_statistic_hash_pid_table(update_every,
+ NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS,
+ "Elements removed from PID table",
+ NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_REMOVED);
+
+ fflush(stdout);
}
/*****************************************************************
diff --git a/collectors/ebpf.plugin/ebpf.h b/collectors/ebpf.plugin/ebpf.h
index f008d21af..78e3a9252 100644
--- a/collectors/ebpf.plugin/ebpf.h
+++ b/collectors/ebpf.plugin/ebpf.h
@@ -69,8 +69,6 @@ typedef struct netdata_syscall_stat {
struct netdata_syscall_stat *next; // Link list
} netdata_syscall_stat_t;
-typedef uint64_t netdata_idx_t;
-
typedef struct netdata_publish_syscall {
char *dimension;
char *name;
@@ -169,6 +167,9 @@ typedef struct ebpf_tracepoint {
#define NETDATA_EBPF_KERNEL_MEMORY "ebpf_kernel_memory"
#define NETDATA_EBPF_HASH_TABLES_LOADED "ebpf_hash_tables_count"
#define NETDATA_EBPF_HASH_TABLES_PER_CORE "ebpf_hash_tables_per_core"
+#define NETDATA_EBPF_HASH_TABLES_GLOBAL_ELEMENTS "ebpf_hash_tables_global_elements"
+#define NETDATA_EBPF_HASH_TABLES_INSERT_PID_ELEMENTS "ebpf_hash_tables_insert_pid_elements"
+#define NETDATA_EBPF_HASH_TABLES_REMOVE_PID_ELEMENTS "ebpf_hash_tables_remove_pid_elements"
// Log file
#define NETDATA_DEVELOPER_LOG_FILE "developer.log"
@@ -319,6 +320,9 @@ void ebpf_update_disabled_plugin_stats(ebpf_module_t *em);
ARAL *ebpf_allocate_pid_aral(char *name, size_t size);
void ebpf_unload_legacy_code(struct bpf_object *objects, struct bpf_link **probe_links);
+void ebpf_read_global_table_stats(netdata_idx_t *stats, netdata_idx_t *values, int map_fd,
+ int maps_per_core, uint32_t begin, uint32_t end);
+
extern ebpf_filesystem_partitions_t localfs[];
extern ebpf_sync_syscalls_t local_syscalls[];
extern int ebpf_exit_plugin;
diff --git a/collectors/ebpf.plugin/ebpf_cachestat.c b/collectors/ebpf.plugin/ebpf_cachestat.c
index 72c337941..affecdea2 100644
--- a/collectors/ebpf.plugin/ebpf_cachestat.c
+++ b/collectors/ebpf.plugin/ebpf_cachestat.c
@@ -854,26 +854,24 @@ void ebpf_cachestat_create_apps_charts(struct ebpf_module *em, void *ptr)
*
* Read the table with number of calls for all functions
*
+ * @param stats vector used to read data from control table.
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_cachestat_read_global_table(int maps_per_core)
+static void ebpf_cachestat_read_global_tables(netdata_idx_t *stats, int maps_per_core)
{
- uint32_t idx;
- netdata_idx_t *val = cachestat_hash_values;
- netdata_idx_t *stored = cachestat_values;
- int fd = cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS].map_fd;
-
- for (idx = NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU; idx < NETDATA_CACHESTAT_END; idx++) {
- if (!bpf_map_lookup_elem(fd, &idx, stored)) {
- int i;
- int end = (maps_per_core) ? ebpf_nprocs: 1;
- netdata_idx_t total = 0;
- for (i = 0; i < end; i++)
- total += stored[i];
-
- val[idx] = total;
- }
- }
+ ebpf_read_global_table_stats(cachestat_hash_values,
+ cachestat_values,
+ cachestat_maps[NETDATA_CACHESTAT_GLOBAL_STATS].map_fd,
+ maps_per_core,
+ NETDATA_KEY_CALLS_ADD_TO_PAGE_CACHE_LRU,
+ NETDATA_CACHESTAT_END);
+
+ ebpf_read_global_table_stats(stats,
+ cachestat_values,
+ cachestat_maps[NETDATA_CACHESTAT_CTRL].map_fd,
+ maps_per_core,
+ NETDATA_CONTROLLER_PID_TABLE_ADD,
+ NETDATA_CONTROLLER_END);
}
/**
@@ -1288,6 +1286,8 @@ static void cachestat_collector(ebpf_module_t *em)
//This will be cancelled by its parent
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
+ memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
@@ -1296,7 +1296,7 @@ static void cachestat_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_cachestat_read_global_table(maps_per_core);
+ ebpf_cachestat_read_global_tables(stats, maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
ebpf_read_cachestat_apps_table(maps_per_core);
diff --git a/collectors/ebpf.plugin/ebpf_dcstat.c b/collectors/ebpf.plugin/ebpf_dcstat.c
index dba3f44d9..feb935b93 100644
--- a/collectors/ebpf.plugin/ebpf_dcstat.c
+++ b/collectors/ebpf.plugin/ebpf_dcstat.c
@@ -690,26 +690,24 @@ static void ebpf_update_dc_cgroup(int maps_per_core)
*
* Read the table with number of calls for all functions
*
+ * @param stats vector used to read data from control table.
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_dc_read_global_table(int maps_per_core)
+static void ebpf_dc_read_global_tables(netdata_idx_t *stats, int maps_per_core)
{
- uint32_t idx;
- netdata_idx_t *val = dcstat_hash_values;
- netdata_idx_t *stored = dcstat_values;
- int fd = dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS].map_fd;
-
- for (idx = NETDATA_KEY_DC_REFERENCE; idx < NETDATA_DIRECTORY_CACHE_END; idx++) {
- if (!bpf_map_lookup_elem(fd, &idx, stored)) {
- int i;
- int end = (maps_per_core) ? ebpf_nprocs: 1;
- netdata_idx_t total = 0;
- for (i = 0; i < end; i++)
- total += stored[i];
-
- val[idx] = total;
- }
- }
+ ebpf_read_global_table_stats(dcstat_hash_values,
+ dcstat_values,
+ dcstat_maps[NETDATA_DCSTAT_GLOBAL_STATS].map_fd,
+ maps_per_core,
+ NETDATA_KEY_DC_REFERENCE,
+ NETDATA_DIRECTORY_CACHE_END);
+
+ ebpf_read_global_table_stats(stats,
+ dcstat_values,
+ dcstat_maps[NETDATA_DCSTAT_CTRL].map_fd,
+ maps_per_core,
+ NETDATA_CONTROLLER_PID_TABLE_ADD,
+ NETDATA_CONTROLLER_END);
}
/**
@@ -1169,6 +1167,8 @@ static void dcstat_collector(ebpf_module_t *em)
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
+ memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
@@ -1177,7 +1177,7 @@ static void dcstat_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_dc_read_global_table(maps_per_core);
+ ebpf_dc_read_global_tables(stats, maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
read_dc_apps_table(maps_per_core);
diff --git a/collectors/ebpf.plugin/ebpf_fd.c b/collectors/ebpf.plugin/ebpf_fd.c
index 214d2955d..f039647a1 100644
--- a/collectors/ebpf.plugin/ebpf_fd.c
+++ b/collectors/ebpf.plugin/ebpf_fd.c
@@ -360,7 +360,7 @@ static inline int ebpf_fd_load_and_attach(struct fd_bpf *obj, ebpf_module_t *em)
if (!ret) {
ebpf_fd_set_hash_tables(obj);
- ebpf_update_controller(fd_maps[NETDATA_CACHESTAT_CTRL].map_fd, em);
+ ebpf_update_controller(fd_maps[NETDATA_FD_CONTROLLER].map_fd, em);
}
return ret;
@@ -624,26 +624,24 @@ static void ebpf_fd_send_data(ebpf_module_t *em)
*
* Read the table with number of calls for all functions
*
+ * @param stats vector used to read data from control table.
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_fd_read_global_table(int maps_per_core)
+static void ebpf_fd_read_global_tables(netdata_idx_t *stats, int maps_per_core)
{
- uint32_t idx;
- netdata_idx_t *val = fd_hash_values;
- netdata_idx_t *stored = fd_values;
- int fd = fd_maps[NETDATA_FD_GLOBAL_STATS].map_fd;
-
- for (idx = NETDATA_KEY_CALLS_DO_SYS_OPEN; idx < NETDATA_FD_COUNTER; idx++) {
- if (!bpf_map_lookup_elem(fd, &idx, stored)) {
- int i;
- int end = (maps_per_core) ? ebpf_nprocs: 1;
- netdata_idx_t total = 0;
- for (i = 0; i < end; i++)
- total += stored[i];
-
- val[idx] = total;
- }
- }
+ ebpf_read_global_table_stats(fd_hash_values,
+ fd_values,
+ fd_maps[NETDATA_FD_GLOBAL_STATS].map_fd,
+ maps_per_core,
+ NETDATA_KEY_CALLS_DO_SYS_OPEN,
+ NETDATA_FD_COUNTER);
+
+ ebpf_read_global_table_stats(stats,
+ fd_values,
+ fd_maps[NETDATA_FD_CONTROLLER].map_fd,
+ maps_per_core,
+ NETDATA_CONTROLLER_PID_TABLE_ADD,
+ NETDATA_CONTROLLER_END);
}
/**
@@ -1136,6 +1134,8 @@ static void fd_collector(ebpf_module_t *em)
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
+ memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
@@ -1144,7 +1144,7 @@ static void fd_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_fd_read_global_table(maps_per_core);
+ ebpf_fd_read_global_tables(stats, maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
read_fd_apps_table(maps_per_core);
diff --git a/collectors/ebpf.plugin/ebpf_functions.c b/collectors/ebpf.plugin/ebpf_functions.c
index cc26044c4..7a43692bc 100644
--- a/collectors/ebpf.plugin/ebpf_functions.c
+++ b/collectors/ebpf.plugin/ebpf_functions.c
@@ -206,7 +206,7 @@ static void ebpf_function_thread_manipulation(const char *transaction,
time_t expires = now_realtime_sec() + em->update_every;
BUFFER *wb = buffer_create(PLUGINSD_LINE_MAX, NULL);
- buffer_json_initialize(wb, "\"", "\"", 0, true, false);
+ buffer_json_initialize(wb, "\"", "\"", 0, true, BUFFER_JSON_OPTIONS_NEWLINE_ON_ARRAY_ITEMS);
buffer_json_member_add_uint64(wb, "status", HTTP_RESP_OK);
buffer_json_member_add_string(wb, "type", "table");
buffer_json_member_add_time_t(wb, "update_every", em->update_every);
diff --git a/collectors/ebpf.plugin/ebpf_oomkill.c b/collectors/ebpf.plugin/ebpf_oomkill.c
index 66421d277..84830160a 100644
--- a/collectors/ebpf.plugin/ebpf_oomkill.c
+++ b/collectors/ebpf.plugin/ebpf_oomkill.c
@@ -419,6 +419,7 @@ static void oomkill_collector(ebpf_module_t *em)
int counter = update_every - 1;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
if (ebpf_exit_plugin || ++counter != update_every)
@@ -432,6 +433,9 @@ static void oomkill_collector(ebpf_module_t *em)
continue;
}
+ stats[NETDATA_CONTROLLER_PID_TABLE_ADD] += (uint64_t) count;
+ stats[NETDATA_CONTROLLER_PID_TABLE_DEL] += (uint64_t) count;
+
pthread_mutex_lock(&collect_data_mutex);
pthread_mutex_lock(&lock);
if (cgroups) {
diff --git a/collectors/ebpf.plugin/ebpf_process.c b/collectors/ebpf.plugin/ebpf_process.c
index 4d915e132..3537efc55 100644
--- a/collectors/ebpf.plugin/ebpf_process.c
+++ b/collectors/ebpf.plugin/ebpf_process.c
@@ -267,26 +267,22 @@ void ebpf_process_send_apps_data(struct ebpf_target *root, ebpf_module_t *em)
*
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_read_process_hash_global_tables(int maps_per_core)
+static void ebpf_read_process_hash_global_tables(netdata_idx_t *stats, int maps_per_core)
{
- uint64_t idx;
netdata_idx_t res[NETDATA_KEY_END_VECTOR];
-
- netdata_idx_t *val = process_hash_values;
- int fd = process_maps[NETDATA_PROCESS_GLOBAL_TABLE].map_fd;
- for (idx = 0; idx < NETDATA_KEY_END_VECTOR; idx++) {
- if (!bpf_map_lookup_elem(fd, &idx, val)) {
- uint64_t total = 0;
- int i;
- int end = (maps_per_core) ? ebpf_nprocs : 1;
- for (i = 0; i < end; i++)
- total += val[i];
-
- res[idx] = total;
- } else {
- res[idx] = 0;
- }
- }
+ ebpf_read_global_table_stats(res,
+ process_hash_values,
+ process_maps[NETDATA_PROCESS_GLOBAL_TABLE].map_fd,
+ maps_per_core,
+ 0,
+ NETDATA_KEY_END_VECTOR);
+
+ ebpf_read_global_table_stats(stats,
+ process_hash_values,
+ process_maps[NETDATA_PROCESS_CTRL_TABLE].map_fd,
+ maps_per_core,
+ NETDATA_CONTROLLER_PID_TABLE_ADD,
+ NETDATA_CONTROLLER_END);
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_EXIT].call = res[NETDATA_KEY_CALLS_DO_EXIT];
process_aggregated_data[NETDATA_KEY_PUBLISH_PROCESS_RELEASE_TASK].call = res[NETDATA_KEY_CALLS_RELEASE_TASK];
@@ -747,7 +743,6 @@ static void ebpf_process_exit(void *ptr)
ebpf_statistic_obsolete_aral_chart(em, process_disable_priority);
#endif
-
fflush(stdout);
pthread_mutex_unlock(&lock);
}
@@ -1121,6 +1116,8 @@ static void process_collector(ebpf_module_t *em)
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
+ memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
usec_t dt = heartbeat_next(&hb, USEC_PER_SEC);
(void)dt;
@@ -1130,7 +1127,7 @@ static void process_collector(ebpf_module_t *em)
if (++counter == update_every) {
counter = 0;
- ebpf_read_process_hash_global_tables(maps_per_core);
+ ebpf_read_process_hash_global_tables(stats, maps_per_core);
netdata_apps_integration_flags_t apps_enabled = em->apps_charts;
pthread_mutex_lock(&collect_data_mutex);
diff --git a/collectors/ebpf.plugin/ebpf_process.h b/collectors/ebpf.plugin/ebpf_process.h
index 48267d87b..d49e38452 100644
--- a/collectors/ebpf.plugin/ebpf_process.h
+++ b/collectors/ebpf.plugin/ebpf_process.h
@@ -48,7 +48,11 @@ enum netdata_ebpf_stats_order {
NETDATA_EBPF_ORDER_STAT_LOAD_METHOD,
NETDATA_EBPF_ORDER_STAT_KERNEL_MEMORY,
NETDATA_EBPF_ORDER_STAT_HASH_TABLES,
- NETDATA_EBPF_ORDER_STAT_HASH_CORE
+ NETDATA_EBPF_ORDER_STAT_HASH_CORE,
+ NETDATA_EBPF_ORDER_STAT_HASH_GLOBAL_TABLE_TOTAL,
+ NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_ADDED,
+ NETDATA_EBPF_ORDER_STAT_HASH_PID_TABLE_REMOVED,
+ NETATA_EBPF_ORDER_STAT_ARAL_BEGIN
};
enum netdata_ebpf_load_mode_stats{
diff --git a/collectors/ebpf.plugin/ebpf_shm.c b/collectors/ebpf.plugin/ebpf_shm.c
index 78ada81f7..baeb7204e 100644
--- a/collectors/ebpf.plugin/ebpf_shm.c
+++ b/collectors/ebpf.plugin/ebpf_shm.c
@@ -646,30 +646,24 @@ static void shm_send_global()
*
* Read the table with number of calls for all functions
*
+ * @param stats vector used to read data from control table.
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_shm_read_global_table(int maps_per_core)
+static void ebpf_shm_read_global_table(netdata_idx_t *stats, int maps_per_core)
{
- netdata_idx_t *stored = shm_values;
- netdata_idx_t *val = shm_hash_values;
- int fd = shm_maps[NETDATA_SHM_GLOBAL_TABLE].map_fd;
- size_t length = sizeof(netdata_idx_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- uint32_t i, end = NETDATA_SHM_END;
- for (i = NETDATA_KEY_SHMGET_CALL; i < end; i++) {
- if (!bpf_map_lookup_elem(fd, &i, stored)) {
- int j;
- int last = (maps_per_core) ? ebpf_nprocs : 1;
- netdata_idx_t total = 0;
- for (j = 0; j < last; j++)
- total += stored[j];
-
- val[i] = total;
- memset(stored, 0 , length);
- }
- }
+ ebpf_read_global_table_stats(shm_hash_values,
+ shm_values,
+ shm_maps[NETDATA_SHM_GLOBAL_TABLE].map_fd,
+ maps_per_core,
+ NETDATA_KEY_SHMGET_CALL,
+ NETDATA_SHM_END);
+
+ ebpf_read_global_table_stats(stats,
+ shm_values,
+ shm_maps[NETDATA_SHM_CONTROLLER].map_fd,
+ maps_per_core,
+ NETDATA_CONTROLLER_PID_TABLE_ADD,
+ NETDATA_CONTROLLER_END);
}
/**
@@ -1039,6 +1033,8 @@ static void shm_collector(ebpf_module_t *em)
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
+ memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
if (ebpf_exit_plugin || ++counter != update_every)
@@ -1046,7 +1042,7 @@ static void shm_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_shm_read_global_table(maps_per_core);
+ ebpf_shm_read_global_table(stats, maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps) {
read_shm_apps_table(maps_per_core);
diff --git a/collectors/ebpf.plugin/ebpf_socket.c b/collectors/ebpf.plugin/ebpf_socket.c
index 2cad8bdf1..e4798b30c 100644
--- a/collectors/ebpf.plugin/ebpf_socket.c
+++ b/collectors/ebpf.plugin/ebpf_socket.c
@@ -2205,33 +2205,25 @@ void *ebpf_socket_read_hash(void *ptr)
/**
* Read the hash table and store data to allocated vectors.
*
+ * @param stats vector used to read data from control table.
* @param maps_per_core do I need to read all cores?
*/
-static void read_hash_global_tables(int maps_per_core)
+static void ebpf_socket_read_hash_global_tables(netdata_idx_t *stats, int maps_per_core)
{
- uint64_t idx;
netdata_idx_t res[NETDATA_SOCKET_COUNTER];
-
- netdata_idx_t *val = socket_hash_values;
- size_t length = sizeof(netdata_idx_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- int fd = socket_maps[NETDATA_SOCKET_GLOBAL].map_fd;
- for (idx = 0; idx < NETDATA_SOCKET_COUNTER; idx++) {
- if (!bpf_map_lookup_elem(fd, &idx, val)) {
- uint64_t total = 0;
- int i;
- int end = (maps_per_core) ? ebpf_nprocs : 1;
- for (i = 0; i < end; i++)
- total += val[i];
-
- res[idx] = total;
- memset(socket_hash_values, 0, length);
- } else {
- res[idx] = 0;
- }
- }
+ ebpf_read_global_table_stats(res,
+ socket_hash_values,
+ socket_maps[NETDATA_SOCKET_GLOBAL].map_fd,
+ maps_per_core,
+ NETDATA_KEY_CALLS_TCP_SENDMSG,
+ NETDATA_SOCKET_COUNTER);
+
+ ebpf_read_global_table_stats(stats,
+ socket_hash_values,
+ socket_maps[NETDATA_SOCKET_TABLE_CTRL].map_fd,
+ maps_per_core,
+ NETDATA_CONTROLLER_PID_TABLE_ADD,
+ NETDATA_CONTROLLER_END);
socket_aggregated_data[NETDATA_IDX_TCP_SENDMSG].call = res[NETDATA_KEY_CALLS_TCP_SENDMSG];
socket_aggregated_data[NETDATA_IDX_TCP_CLEANUP_RBUF].call = res[NETDATA_KEY_CALLS_TCP_CLEANUP_RBUF];
@@ -2930,6 +2922,8 @@ static void socket_collector(ebpf_module_t *em)
int counter = update_every - 1;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
+ memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
if (ebpf_exit_plugin || ++counter != update_every)
@@ -2939,7 +2933,7 @@ static void socket_collector(ebpf_module_t *em)
netdata_apps_integration_flags_t socket_apps_enabled = em->apps_charts;
if (socket_global_enabled) {
read_listen_table();
- read_hash_global_tables(maps_per_core);
+ ebpf_socket_read_hash_global_tables(stats, maps_per_core);
}
pthread_mutex_lock(&collect_data_mutex);
diff --git a/collectors/ebpf.plugin/ebpf_swap.c b/collectors/ebpf.plugin/ebpf_swap.c
index 9a1640a35..359fe2308 100644
--- a/collectors/ebpf.plugin/ebpf_swap.c
+++ b/collectors/ebpf.plugin/ebpf_swap.c
@@ -322,13 +322,13 @@ void ebpf_obsolete_swap_apps_charts(struct ebpf_module *em)
*/
static void ebpf_obsolete_swap_global(ebpf_module_t *em)
{
- ebpf_write_chart_obsolete(NETDATA_EBPF_SYSTEM_GROUP,
+ ebpf_write_chart_obsolete(NETDATA_EBPF_MEMORY_GROUP,
NETDATA_MEM_SWAP_CHART,
"Calls to access swap memory",
EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_SWAP_SUBMENU,
NETDATA_EBPF_CHART_TYPE_LINE,
NULL,
- NETDATA_CHART_PRIO_SYSTEM_SWAP_CALLS,
+ NETDATA_CHART_PRIO_MEM_SWAP_CALLS,
em->update_every);
}
@@ -507,7 +507,7 @@ static void read_swap_apps_table(int maps_per_core)
*/
static void swap_send_global()
{
- write_io_chart(NETDATA_MEM_SWAP_CHART, NETDATA_EBPF_SYSTEM_GROUP,
+ write_io_chart(NETDATA_MEM_SWAP_CHART, NETDATA_EBPF_MEMORY_GROUP,
swap_publish_aggregated[NETDATA_KEY_SWAP_WRITEPAGE_CALL].dimension,
(long long) swap_hash_values[NETDATA_KEY_SWAP_WRITEPAGE_CALL],
swap_publish_aggregated[NETDATA_KEY_SWAP_READPAGE_CALL].dimension,
@@ -519,26 +519,24 @@ static void swap_send_global()
*
* Read the table with number of calls to all functions
*
+ * @param stats vector used to read data from control table.
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_swap_read_global_table(int maps_per_core)
-{
- netdata_idx_t *stored = swap_values;
- netdata_idx_t *val = swap_hash_values;
- int fd = swap_maps[NETDATA_SWAP_GLOBAL_TABLE].map_fd;
-
- uint32_t i, end = NETDATA_SWAP_END;
- for (i = NETDATA_KEY_SWAP_READPAGE_CALL; i < end; i++) {
- if (!bpf_map_lookup_elem(fd, &i, stored)) {
- int j;
- int last = (maps_per_core) ? ebpf_nprocs : 1;
- netdata_idx_t total = 0;
- for (j = 0; j < last; j++)
- total += stored[j];
-
- val[i] = total;
- }
- }
+static void ebpf_swap_read_global_table(netdata_idx_t *stats, int maps_per_core)
+{
+ ebpf_read_global_table_stats(swap_hash_values,
+ swap_values,
+ swap_maps[NETDATA_SWAP_GLOBAL_TABLE].map_fd,
+ maps_per_core,
+ NETDATA_KEY_SWAP_READPAGE_CALL,
+ NETDATA_SWAP_END);
+
+ ebpf_read_global_table_stats(stats,
+ swap_values,
+ swap_maps[NETDATA_SWAP_CONTROLLER].map_fd,
+ maps_per_core,
+ NETDATA_CONTROLLER_PID_TABLE_ADD,
+ NETDATA_CONTROLLER_END);
}
/**
@@ -804,6 +802,8 @@ static void swap_collector(ebpf_module_t *em)
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
+ memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
if (ebpf_exit_plugin || ++counter != update_every)
@@ -811,7 +811,7 @@ static void swap_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_swap_read_global_table(maps_per_core);
+ ebpf_swap_read_global_table(stats, maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
read_swap_apps_table(maps_per_core);
@@ -914,12 +914,12 @@ static void ebpf_swap_allocate_global_vectors(int apps)
*/
static void ebpf_create_swap_charts(int update_every)
{
- ebpf_create_chart(NETDATA_EBPF_SYSTEM_GROUP, NETDATA_MEM_SWAP_CHART,
+ ebpf_create_chart(NETDATA_EBPF_MEMORY_GROUP, NETDATA_MEM_SWAP_CHART,
"Calls to access swap memory",
EBPF_COMMON_DIMENSION_CALL, NETDATA_SYSTEM_SWAP_SUBMENU,
NULL,
NETDATA_EBPF_CHART_TYPE_LINE,
- NETDATA_CHART_PRIO_SYSTEM_SWAP_CALLS,
+ NETDATA_CHART_PRIO_MEM_SWAP_CALLS,
ebpf_create_global_dimension,
swap_publish_aggregated, NETDATA_SWAP_END,
update_every, NETDATA_EBPF_MODULE_NAME_SWAP);
diff --git a/collectors/ebpf.plugin/ebpf_vfs.c b/collectors/ebpf.plugin/ebpf_vfs.c
index 5747a2408..e566e169d 100644
--- a/collectors/ebpf.plugin/ebpf_vfs.c
+++ b/collectors/ebpf.plugin/ebpf_vfs.c
@@ -964,30 +964,25 @@ static void ebpf_vfs_send_data(ebpf_module_t *em)
/**
* Read the hash table and store data to allocated vectors.
*
+ * @param stats vector used to read data from control table.
* @param maps_per_core do I need to read all cores?
*/
-static void ebpf_vfs_read_global_table(int maps_per_core)
+static void ebpf_vfs_read_global_table(netdata_idx_t *stats, int maps_per_core)
{
- uint64_t idx;
netdata_idx_t res[NETDATA_VFS_COUNTER];
-
- netdata_idx_t *val = vfs_hash_values;
- size_t length = sizeof(netdata_idx_t);
- if (maps_per_core)
- length *= ebpf_nprocs;
-
- int fd = vfs_maps[NETDATA_VFS_ALL].map_fd;
- for (idx = 0; idx < NETDATA_VFS_COUNTER; idx++) {
- uint64_t total = 0;
- if (!bpf_map_lookup_elem(fd, &idx, val)) {
- int i;
- int end = (maps_per_core) ? ebpf_nprocs : 1;
- for (i = 0; i < end; i++)
- total += val[i];
- }
- res[idx] = total;
- memset(val, 0, length);
- }
+ ebpf_read_global_table_stats(res,
+ vfs_hash_values,
+ vfs_maps[NETDATA_VFS_ALL].map_fd,
+ maps_per_core,
+ NETDATA_KEY_CALLS_VFS_WRITE,
+ NETDATA_VFS_COUNTER);
+
+ ebpf_read_global_table_stats(stats,
+ vfs_hash_values,
+ vfs_maps[NETDATA_VFS_CTRL].map_fd,
+ maps_per_core,
+ NETDATA_CONTROLLER_PID_TABLE_ADD,
+ NETDATA_CONTROLLER_END);
vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_UNLINK].ncall = res[NETDATA_KEY_CALLS_VFS_UNLINK];
vfs_publish_aggregated[NETDATA_KEY_PUBLISH_VFS_READ].ncall = res[NETDATA_KEY_CALLS_VFS_READ] +
@@ -1963,6 +1958,8 @@ static void vfs_collector(ebpf_module_t *em)
int maps_per_core = em->maps_per_core;
uint32_t running_time = 0;
uint32_t lifetime = em->lifetime;
+ netdata_idx_t *stats = em->hash_table_stats;
+ memset(stats, 0, sizeof(em->hash_table_stats));
while (!ebpf_exit_plugin && running_time < lifetime) {
(void)heartbeat_next(&hb, USEC_PER_SEC);
if (ebpf_exit_plugin || ++counter != update_every)
@@ -1970,7 +1967,7 @@ static void vfs_collector(ebpf_module_t *em)
counter = 0;
netdata_apps_integration_flags_t apps = em->apps_charts;
- ebpf_vfs_read_global_table(maps_per_core);
+ ebpf_vfs_read_global_table(stats, maps_per_core);
pthread_mutex_lock(&collect_data_mutex);
if (apps)
ebpf_vfs_read_apps(maps_per_core);
diff --git a/collectors/ebpf.plugin/metadata.yaml b/collectors/ebpf.plugin/metadata.yaml
new file mode 100644
index 000000000..232326778
--- /dev/null
+++ b/collectors/ebpf.plugin/metadata.yaml
@@ -0,0 +1,3308 @@
+plugin_name: ebpf.plugin
+modules:
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: filedescriptor
+ monitored_instance:
+ name: eBPF Filedescriptor
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ - plugin_name: cgroups.plugin
+ module_name: cgroups
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - file
+ - eBPF
+ - fd
+ - open
+ - close
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor calls for functions responsible to open or close a file descriptor and possible errors."
+ method_description: "Attach tracing (kprobe and trampoline) to internal kernel functions according options used to compile kernel."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netdata sets necessary permissions during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: "Depending of kernel version and frequency that files are open and close, this thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ configuration:
+ file:
+ name: "ebpf.d/fd.conf"
+ description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
+ options:
+ description: |
+ All options are defined inside section `[global]`.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: apps
+ description: Enable or disable integration with apps.plugin
+ default_value: no
+ required: false
+ - name: cgroups
+ description: Enable or disable integration with cgroup.plugin
+ default_value: no
+ required: false
+ - name: pid table size
+ description: Number of elements stored inside hash tables used to monitor calls per PID.
+ default_value: 32768
+ required: false
+ - name: ebpf type format
+ description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
+ default_value: auto
+ required: false
+ - name: ebpf co-re tracing
+ description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
+ default_value: trampoline
+ required: false
+ - name: maps per core
+ description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
+ default_value: yes
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: cgroup
+ description: "These Metrics show grouped information per cgroup/service."
+ labels: []
+ metrics:
+ - name: cgroup.fd_open
+ description: Number of open files
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: cgroup.fd_open_error
+ description: Fails to open files
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: cgroup.fd_closed
+ description: Files closed
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: close
+ - name: cgroup.fd_close_error
+ description: Fails to close files
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: close
+ - name: services.file_open
+ description: Number of open files
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.file_open_error
+ description: Fails to open files
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.file_closed
+ description: Files closed
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.file_close_error
+ description: Fails to close files
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: global
+ description: "These metrics show total number of calls to functions inside kernel."
+ labels: []
+ metrics:
+ - name: filesystem.file_descriptor
+ description: Open and close calls
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: close
+ - name: filesystem.file_error
+ description: Open fails
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: close
+ - name: apps
+ description: "These Metrics show grouped information per apps group."
+ labels: []
+ metrics:
+ - name: apps.file_open
+ description: Number of open files
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.file_open_error
+ description: Fails to open files
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.file_closed
+ description: Files closed
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.file_close_error
+ description: Fails to close files
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: processes
+ monitored_instance:
+ name: eBPF Processes
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ - plugin_name: cgroups.plugin
+ module_name: cgroups
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - thread
+ - fork
+ - process
+ - eBPF
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor calls for function creating tasks (threads and processes) inside Linux kernel."
+ method_description: "Attach tracing (kprobe or tracepoint, and trampoline) to internal kernel functions."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called."
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ - title: Debug Filesystem
+ description: |
+ This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).
+ configuration:
+ file:
+ name: "ebpf.d/process.conf"
+ description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
+ options:
+ description: |
+ All options are defined inside section `[global]`.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: apps
+ description: Enable or disable integration with apps.plugin
+ default_value: no
+ required: false
+ - name: cgroups
+ description: Enable or disable integration with cgroup.plugin
+ default_value: no
+ required: false
+ - name: pid table size
+ description: Number of elements stored inside hash tables used to monitor calls per PID.
+ default_value: 32768
+ required: false
+ - name: ebpf type format
+ description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
+ default_value: auto
+ required: false
+ - name: ebpf co-re tracing
+ description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code). This plugin will always try to attach a tracepoint, so option here will impact only function used to monitor task (thread and process) creation."
+ default_value: trampoline
+ required: false
+ - name: maps per core
+ description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
+ default_value: yes
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics show total number of calls to functions inside kernel."
+ labels: []
+ metrics:
+ - name: system.process_thread
+ description: Start process
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: process
+ - name: system.process_status
+ description: Process not closed
+ unit: "difference"
+ chart_type: line
+ dimensions:
+ - name: process
+ - name: zombie
+ - name: system.exit
+ description: Exit process
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: process
+ - name: system.task_error
+ description: Fails to create process
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: task
+ - name: apps
+ description: "These Metrics show grouped information per apps group."
+ labels: []
+ metrics:
+ - name: apps.process_create
+ description: Process started
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.thread_create
+ description: Threads started
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.task_exit
+ description: Tasks starts exit process
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.task_close
+ description: Tasks closed
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.task_error
+ description: Errors to create process or threads
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: cgroup
+ description: "These Metrics show grouped information per cgroup/service."
+ labels: []
+ metrics:
+ - name: cgroup.process_create
+ description: Process started
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: process
+ - name: cgroup.thread_create
+ description: Threads started
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: thread
+ - name: cgroup.task_exit
+ description: Tasks starts exit process
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: exit
+ - name: cgroup.task_close
+ description: Tasks closed
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: process
+ - name: cgroup.task_error
+ description: Errors to create process or threads
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: process
+ - name: services.process_create
+ description: Process started
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.thread_create
+ description: Threads started
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.task_close
+ description: Tasks starts exit process
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.task_exit
+ description: Tasks closed
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.task_error
+ description: Errors to create process or threads
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: disk
+ monitored_instance:
+ name: eBPF Disk
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - hard Disk
+ - eBPF
+ - latency
+ - partition
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Measure latency for I/O events on disk."
+ method_description: "Attach tracepoints to internal kernel functions."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called."
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ - title: Debug Filesystem
+ description: |
+ This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
+ configuration:
+ file:
+ name: "ebpf.d/disk.conf"
+ description: "Overwrite default configuration reducing number of I/O events."
+ options:
+ description: |
+ All options are defined inside section `[global]`.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: disk
+ description: "These metrics measure latency for I/O events on every hard disk present on host."
+ labels: []
+ metrics:
+ - name: disk.latency_io
+ description: Disk latency
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: latency
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: hardirq
+ monitored_instance:
+ name: eBPF Hardirq
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - HardIRQ
+ - eBPF
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor latency for each HardIRQ available."
+ method_description: "Attach tracepoints to internal kernel functions."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called."
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ - title: Debug Filesystem
+ description: |
+ This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).
+ configuration:
+ file:
+ name: "ebpf.d/hardirq.conf"
+ description: "Overwrite default configuration reducing number of I/O events."
+ options:
+ description: |
+ All options are defined inside section `[global]`.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics show latest timestamp for each hardIRQ available on host."
+ labels: []
+ metrics:
+ - name: system.hardirq_latency
+ description: Hard IRQ latency
+ unit: "milliseconds"
+ chart_type: stacked
+ dimensions:
+ - name: hardirq names
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: cachestat
+ monitored_instance:
+ name: eBPF Cachestat
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ - plugin_name: cgroups.plugin
+ module_name: cgroups
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - Page cache
+ - Hit ratio
+ - eBPF
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor Linux page cache events giving for users a general vision about how his kernel is manipulating files."
+ method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ configuration:
+ file:
+ name: "ebpf.d/cachestat.conf"
+ description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
+ options:
+ description: |
+ All options are defined inside section `[global]`.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: apps
+ description: Enable or disable integration with apps.plugin
+ default_value: no
+ required: false
+ - name: cgroups
+ description: Enable or disable integration with cgroup.plugin
+ default_value: no
+ required: false
+ - name: pid table size
+ description: Number of elements stored inside hash tables used to monitor calls per PID.
+ default_value: 32768
+ required: false
+ - name: ebpf type format
+ description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
+ default_value: auto
+ required: false
+ - name: ebpf co-re tracing
+ description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
+ default_value: trampoline
+ required: false
+ - name: maps per core
+ description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
+ default_value: yes
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics show total number of calls to functions inside kernel."
+ labels: []
+ metrics:
+ - name: mem.cachestat_ratio
+ description: Hit ratio
+ unit: "%"
+ chart_type: line
+ dimensions:
+ - name: ratio
+ - name: mem.cachestat_dirties
+ description: Number of dirty pages
+ unit: "page/s"
+ chart_type: line
+ dimensions:
+ - name: dirty
+ - name: mem.cachestat_hits
+ description: Number of accessed files
+ unit: "hits/s"
+ chart_type: line
+ dimensions:
+ - name: hit
+ - name: mem.cachestat_misses
+ description: Files out of page cache
+ unit: "misses/s"
+ chart_type: line
+ dimensions:
+ - name: miss
+ - name: apps
+ description: "These Metrics show grouped information per apps group."
+ labels: []
+ metrics:
+ - name: apps.cachestat_ratio
+ description: Hit ratio
+ unit: "%"
+ chart_type: line
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.cachestat_dirties
+ description: Number of dirty pages
+ unit: "page/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.cachestat_hits
+ description: Number of accessed files
+ unit: "hits/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.cachestat_misses
+ description: Files out of page cache
+ unit: "misses/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: cgroup
+ description: ""
+ labels: []
+ metrics:
+ - name: cgroup.cachestat_ratio
+ description: Hit ratio
+ unit: "%"
+ chart_type: line
+ dimensions:
+ - name: ratio
+ - name: cgroup.cachestat_dirties
+ description: Number of dirty pages
+ unit: "page/s"
+ chart_type: line
+ dimensions:
+ - name: dirty
+ - name: cgroup.cachestat_hits
+ description: Number of accessed files
+ unit: "hits/s"
+ chart_type: line
+ dimensions:
+ - name: hit
+ - name: cgroup.cachestat_misses
+ description: Files out of page cache
+ unit: "misses/s"
+ chart_type: line
+ dimensions:
+ - name: miss
+ - name: services.cachestat_ratio
+ description: Hit ratio
+ unit: "%"
+ chart_type: line
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.cachestat_dirties
+ description: Number of dirty pages
+ unit: "page/s"
+ chart_type: line
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.cachestat_hits
+ description: Number of accessed files
+ unit: "hits/s"
+ chart_type: line
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.cachestat_misses
+ description: Files out of page cache
+ unit: "misses/s"
+ chart_type: line
+ dimensions:
+ - name: a dimension per systemd service
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: sync
+ monitored_instance:
+ name: eBPF Sync
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - syscall
+ - eBPF
+ - hard disk
+ - memory
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor syscall responsible to move data from memory to storage device."
+ method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ - title: Debug Filesystem
+ description: |
+ This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug`).
+ configuration:
+ file:
+ name: "ebpf.d/sync.conf"
+ description: "Overwrite default configuration and allows user to select charts visible on dashboard."
+ options:
+ description: |
+ This configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: apps
+ description: Enable or disable integration with apps.plugin
+ default_value: no
+ required: false
+ - name: cgroups
+ description: Enable or disable integration with cgroup.plugin
+ default_value: no
+ required: false
+ - name: pid table size
+ description: Number of elements stored inside hash tables used to monitor calls per PID.
+ default_value: 32768
+ required: false
+ - name: ebpf type format
+ description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
+ default_value: auto
+ required: false
+ - name: ebpf co-re tracing
+ description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
+ default_value: trampoline
+ required: false
+ - name: maps per core
+ description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
+ default_value: yes
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ - name: sync
+ description: Enable or disable monitoring for syscall `sync`
+ default_value: yes
+ required: false
+ - name: msync
+ description: Enable or disable monitoring for syscall `msync`
+ default_value: yes
+ required: false
+ - name: fsync
+ description: Enable or disable monitoring for syscall `fsync`
+ default_value: yes
+ required: false
+ - name: fdatasync
+ description: Enable or disable monitoring for syscall `fdatasync`
+ default_value: yes
+ required: false
+ - name: syncfs
+ description: Enable or disable monitoring for syscall `syncfs`
+ default_value: yes
+ required: false
+ - name: sync_file_range
+ description: Enable or disable monitoring for syscall `sync_file_range`
+ default_value: yes
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts:
+ - name: sync_freq
+ link: https://github.com/netdata/netdata/blob/master/health/health.d/synchronization.conf
+ metric: mem.sync
+ info:
+ number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the
+ underlying filesystems.
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics show total number of calls to functions inside kernel."
+ labels: []
+ metrics:
+ - name: mem.file_sync
+ description: Monitor calls for <code>fsync(2)</code> and <code>fdatasync(2)</code>.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: fsync
+ - name: fdatasync
+ - name: mem.meory_map
+ description: Monitor calls for <code>msync(2)</code>.
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: msync
+ - name: mem.sync
+ description: Monitor calls for <code>sync(2)</code> and <code>syncfs(2)</code>.
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: sync
+ - name: syncfs
+ - name: mem.file_segment
+ description: Monitor calls for <code>sync_file_range(2)</code>.
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: sync_file_range
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: mdflush
+ monitored_instance:
+ name: eBPF MDflush
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - MD
+ - RAID
+ - eBPF
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor when flush events happen between disks."
+ method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: "This thread will add overhead every time that `md_flush_request` is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ configuration:
+ file:
+ name: "ebpf.d/mdflush.conf"
+ description: "Overwrite default configuration reducing I/O events."
+ options:
+ description: |
+ All options are defined inside section `[global]`.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "Number of times md_flush_request was called since last time."
+ labels: []
+ metrics:
+ - name: mdstat.mdstat_flush
+ description: MD flushes
+ unit: "flushes"
+ chart_type: stacked
+ dimensions:
+ - name: disk
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: swap
+ monitored_instance:
+ name: eBPF SWAP
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ - plugin_name: cgroups.plugin
+ module_name: cgroups
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - SWAP
+ - memory
+ - eBPF
+ - Hard Disk
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitors when swap has I/O events and applications executing events."
+ method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ configuration:
+ file:
+ name: "ebpf.d/swap.conf"
+ description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
+ options:
+ description: |
+ All options are defined inside section `[global]`.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: apps
+ description: Enable or disable integration with apps.plugin
+ default_value: no
+ required: false
+ - name: cgroups
+ description: Enable or disable integration with cgroup.plugin
+ default_value: no
+ required: false
+ - name: pid table size
+ description: Number of elements stored inside hash tables used to monitor calls per PID.
+ default_value: 32768
+ required: false
+ - name: ebpf type format
+ description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
+ default_value: auto
+ required: false
+ - name: ebpf co-re tracing
+ description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
+ default_value: trampoline
+ required: false
+ - name: maps per core
+ description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
+ default_value: yes
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: cgroup
+ description: "These Metrics show grouped information per cgroup/service."
+ labels: []
+ metrics:
+ - name: cgroup.swap_read
+ description: Calls to function <code>swap_readpage</code>.
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: cgroup.swap_write
+ description: Calls to function <code>swap_writepage</code>.
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: write
+ - name: services.swap_read
+ description: Calls to <code>swap_readpage</code>.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.swap_write
+ description: Calls to function <code>swap_writepage</code>.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: apps
+ description: "These Metrics show grouped information per apps group."
+ labels: []
+ metrics:
+ - name: apps.swap_read_call
+ description: Calls to function <code>swap_readpage</code>.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.swap_write_call
+ description: Calls to function <code>swap_writepage</code>.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: global
+ description: "These metrics show total number of calls to functions inside kernel."
+ labels: []
+ metrics:
+ - name: mem.swapcalls
+ description: Calls to access swap memory
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: write
+ - name: read
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: oomkill
+ monitored_instance:
+ name: eBPF OOMkill
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ - plugin_name: cgroups.plugin
+ module_name: cgroups
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - application
+ - memory
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor applications that reach out of memory."
+ method_description: "Attach tracepoint to internal kernel functions."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called."
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ - title: Debug Filesystem
+ description: |
+ This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).
+ configuration:
+ file:
+ name: "ebpf.d/oomkill.conf"
+ description: "Overwrite default configuration reducing number of I/O events."
+ options:
+ description: |
+ Overwrite default configuration reducing number of I/O events
+ folding:
+ title: "Config options"
+ enabled: true
+ list: []
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: cgroup
+ description: "These metrics show cgroup/service that reached OOM."
+ labels: []
+ metrics:
+ - name: cgroup.oomkills
+ description: OOM kills. This chart is provided by eBPF plugin.
+ unit: "kills"
+ chart_type: line
+ dimensions:
+ - name: cgroup name
+ - name: services.oomkills
+ description: OOM kills. This chart is provided by eBPF plugin.
+ unit: "kills"
+ chart_type: line
+ dimensions:
+ - name: a dimension per systemd service
+ - name: apps
+ description: "These metrics show cgroup/service that reached OOM."
+ labels: []
+ metrics:
+ - name: apps.oomkills
+ description: OOM kills
+ unit: "kills"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: socket
+ monitored_instance:
+ name: eBPF Socket
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ - plugin_name: cgroups.plugin
+ module_name: cgroups
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - TCP
+ - UDP
+ - bandwidth
+ - server
+ - connection
+ - socket
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor bandwidth consumption per application for protocols TCP and UDP."
+ method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ configuration:
+ file:
+ name: "ebpf.d/network.conf"
+ description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
+ options:
+ description: |
+ All options are defined inside section `[global]`. Options inside `network connections` are ignored for while.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: apps
+ description: Enable or disable integration with apps.plugin
+ default_value: no
+ required: false
+ - name: cgroups
+ description: Enable or disable integration with cgroup.plugin
+ default_value: no
+ required: false
+ - name: bandwidth table size
+ description: Number of elements stored inside hash tables used to monitor calls per PID.
+ default_value: 16384
+ required: false
+ - name: ipv4 connection table size
+ description: Number of elements stored inside hash tables used to monitor calls per IPV4 connections.
+ default_value: 16384
+ required: false
+ - name: ipv6 connection table size
+ description: Number of elements stored inside hash tables used to monitor calls per IPV6 connections.
+ default_value: 16384
+ required: false
+ - name: udp connection table size
+ description: Number of temporary elements stored inside hash tables used to monitor UDP connections.
+ default_value: 4096
+ required: false
+ - name: ebpf type format
+ description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
+ default_value: auto
+ required: false
+ - name: ebpf co-re tracing
+ description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
+ default_value: trampoline
+ required: false
+ - name: maps per core
+ description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
+ default_value: yes
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics show total number of calls to functions inside kernel."
+ labels: []
+ metrics:
+ - name: ip.inbound_conn
+ description: Inbound connections.
+ unit: "connections/s"
+ chart_type: line
+ dimensions:
+ - name: connection_tcp
+ - name: ip.tcp_outbound_conn
+ description: TCP outbound connections.
+ unit: "connections/s"
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: ip.tcp_functions
+ description: Calls to internal functions
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: send
+ - name: closed
+ - name: ip.total_tcp_bandwidth
+ description: TCP bandwidth
+ unit: "kilobits/s"
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: send
+ - name: ip.tcp_error
+ description: TCP errors
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: send
+ - name: ip.tcp_retransmit
+ description: Packages retransmitted
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: retransmited
+ - name: ip.udp_functions
+ description: UDP calls
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: send
+ - name: ip.total_udp_bandwidth
+ description: UDP bandwidth
+ unit: "kilobits/s"
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: send
+ - name: ip.udp_error
+ description: UDP errors
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: send
+ - name: apps
+ description: "These metrics show grouped information per apps group."
+ labels: []
+ metrics:
+ - name: apps.outbound_conn_v4
+ description: Calls to tcp_v4_connection
+ unit: "connections/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.outbound_conn_v6
+ description: Calls to tcp_v6_connection
+ unit: "connections/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.total_bandwidth_sent
+ description: Bytes sent
+ unit: "kilobits/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.total_bandwidth_recv
+ description: bytes received
+ unit: "kilobits/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.bandwidth_tcp_send
+ description: Calls for tcp_sendmsg
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.bandwidth_tcp_recv
+ description: Calls for tcp_cleanup_rbuf
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.bandwidth_tcp_retransmit
+ description: Calls for tcp_retransmit
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.bandwidth_udp_send
+ description: Calls for udp_sendmsg
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.bandwidth_udp_recv
+ description: Calls for udp_recvmsg
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: services.net_conn_ipv4
+ description: Calls to tcp_v4_connection
+ unit: "connections/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: cgroup
+ description: ""
+ labels: []
+ metrics:
+ - name: cgroup.net_conn_ipv4
+ description: Calls to tcp_v4_connection
+ unit: "connections/s"
+ chart_type: line
+ dimensions:
+ - name: connected_v4
+ - name: cgroup.net_conn_ipv6
+ description: Calls to tcp_v6_connection
+ unit: "connections/s"
+ chart_type: line
+ dimensions:
+ - name: connected_v6
+ - name: cgroup.net_bytes_recv
+ description: Bytes received
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: cgroup.net_bytes_sent
+ description: Bytes sent
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: sent
+ - name: cgroup.net_tcp_recv
+ description: Calls to tcp_cleanup_rbuf.
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: cgroup.net_tcp_send
+ description: Calls to tcp_sendmsg.
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: sent
+ - name: cgroup.net_retransmit
+ description: Calls to tcp_retransmit.
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: retransmitted
+ - name: cgroup.net_udp_send
+ description: Calls to udp_sendmsg
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: sent
+ - name: cgroup.net_udp_recv
+ description: Calls to udp_recvmsg
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: received
+ - name: services.net_conn_ipv6
+ description: Calls to tcp_v6_connection
+ unit: "connections/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.net_bytes_recv
+ description: Bytes received
+ unit: "kilobits/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.net_bytes_sent
+ description: Bytes sent
+ unit: "kilobits/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.net_tcp_recv
+ description: Calls to tcp_cleanup_rbuf.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.net_tcp_send
+ description: Calls to tcp_sendmsg.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.net_tcp_retransmit
+ description: Calls to tcp_retransmit
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.net_udp_send
+ description: Calls to udp_sendmsg
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.net_udp_recv
+ description: Calls to udp_recvmsg
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: dcstat
+ monitored_instance:
+ name: eBPF DCstat
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ - plugin_name: cgroups.plugin
+ module_name: cgroups
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - Directory Cache
+ - File system
+ - eBPF
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor directory cache events per application given an overall vision about files on memory or storage device."
+ method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ configuration:
+ file:
+ name: "ebpf.d/dcstat.conf"
+ description: "Overwrite default configuration helping to reduce memory usage. You can also select charts visible on dashboard."
+ options:
+ description: |
+ All options are defined inside section `[global]`.
+ folding:
+ title: "Config option"
+ enabled: true
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: apps
+ description: Enable or disable integration with apps.plugin
+ default_value: no
+ required: false
+ - name: cgroups
+ description: Enable or disable integration with cgroup.plugin
+ default_value: no
+ required: false
+ - name: pid table size
+ description: Number of elements stored inside hash tables used to monitor calls per PID.
+ default_value: 32768
+ required: false
+ - name: ebpf type format
+ description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
+ default_value: auto
+ required: false
+ - name: ebpf co-re tracing
+ description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
+ default_value: trampoline
+ required: false
+ - name: maps per core
+ description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
+ default_value: yes
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: apps
+ description: "These Metrics show grouped information per apps group."
+ labels: []
+ metrics:
+ - name: apps.dc_ratio
+ description: Percentage of files inside directory cache
+ unit: "%"
+ chart_type: line
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.dc_reference
+ description: Count file access
+ unit: "files"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.dc_not_cache
+ description: Files not present inside directory cache
+ unit: "files"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.dc_not_found
+ description: Files not found
+ unit: "files"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: filesystem
+ description: "These metrics show total number of calls to functions inside kernel."
+ labels: []
+ metrics:
+ - name: filesystem.dc_reference
+ description: Variables used to calculate hit ratio.
+ unit: "files"
+ chart_type: line
+ dimensions:
+ - name: reference
+ - name: slow
+ - name: miss
+ - name: filesystem.dc_hit_ratio
+ description: Percentage of files inside directory cache
+ unit: "%"
+ chart_type: line
+ dimensions:
+ - name: ratio
+ - name: cgroup
+ description: ""
+ labels: []
+ metrics:
+ - name: cgroup.dc_ratio
+ description: Percentage of files inside directory cache
+ unit: "%"
+ chart_type: line
+ dimensions:
+ - name: ratio
+ - name: cgroup.dc_reference
+ description: Count file access
+ unit: "files"
+ chart_type: line
+ dimensions:
+ - name: reference
+ - name: cgroup.dc_not_cache
+ description: Files not present inside directory cache
+ unit: "files"
+ chart_type: line
+ dimensions:
+ - name: slow
+ - name: cgroup.dc_not_found
+ description: Files not found
+ unit: "files"
+ chart_type: line
+ dimensions:
+ - name: miss
+ - name: services.dc_ratio
+ description: Percentage of files inside directory cache
+ unit: "%"
+ chart_type: line
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.dc_reference
+ description: Count file access
+ unit: "files"
+ chart_type: line
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.dc_not_cache
+ description: Files not present inside directory cache
+ unit: "files"
+ chart_type: line
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.dc_not_found
+ description: Files not found
+ unit: "files"
+ chart_type: line
+ dimensions:
+ - name: a dimension per systemd service
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: filesystem
+ monitored_instance:
+ name: eBPF Filesystem
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - Filesystem
+ - ext4
+ - btrfs
+ - nfs
+ - xfs
+ - zfs
+ - eBPF
+ - latency
+ - I/O
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor latency for main actions on filesystem like I/O events."
+ method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ configuration:
+ file:
+ name: "ebpf.d/filesystem.conf"
+ description: "Overwrite default configuration and allows user to select charts visible on dashboard."
+ options:
+ description: |
+ This configuration file have two different sections. The `[global]` overwrites default options, while `[filesystem]` allow user to select the filesystems to monitor.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ - name: btrfsdist
+ description: Enable or disable latency monitoring for functions associated with btrfs filesystem.
+ default_value: yes
+ required: false
+ - name: ext4dist
+ description: Enable or disable latency monitoring for functions associated with ext4 filesystem.
+ default_value: yes
+ required: false
+ - name: nfsdist
+ description: Enable or disable latency monitoring for functions associated with nfs filesystem.
+ default_value: yes
+ required: false
+ - name: xfsdist
+ description: Enable or disable latency monitoring for functions associated with xfs filesystem.
+ default_value: yes
+ required: false
+ - name: zfsdist
+ description: Enable or disable latency monitoring for functions associated with zfs filesystem.
+ default_value: yes
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: filesystem
+ description: "Latency charts associate with filesystem actions."
+ labels: []
+ metrics:
+ - name: filesystem.read_latency
+ description: ext4 latency for each read request.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: latency period
+ - name: filesystem.open_latency
+ description: ext4 latency for each open request.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: latency period
+ - name: filesystem.sync_latency
+ description: ext4 latency for each sync request.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: latency period
+ - name: iilesystem
+ description: ""
+ labels: []
+ metrics:
+ - name: filesystem.write_latency
+ description: ext4 latency for each write request.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: latency period
+ - name: global
+ description: ""
+ labels: []
+ metrics:
+ - name: filesystem.attributte_latency
+ description: nfs latency for each attribute request.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: latency period
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: shm
+ monitored_instance:
+ name: eBPF SHM
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ - plugin_name: cgroups.plugin
+ module_name: cgroups
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - syscall
+ - shared memory
+ - eBPF
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor syscall responsible to manipulate shared memory."
+ method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ - title: Debug Filesystem
+ description: |
+ This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
+ configuration:
+ file:
+ name: "ebpf.d/shm.conf"
+ description: "Overwrite default configuration and allows user to select charts visible on dashboard."
+ options:
+ description: |
+ This configuration file have two different sections. The `[global]` overwrites all default options, while `[syscalls]` allow user to select the syscall to monitor.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: apps
+ description: Enable or disable integration with apps.plugin
+ default_value: no
+ required: false
+ - name: cgroups
+ description: Enable or disable integration with cgroup.plugin
+ default_value: no
+ required: false
+ - name: pid table size
+ description: Number of elements stored inside hash tables used to monitor calls per PID.
+ default_value: 32768
+ required: false
+ - name: ebpf type format
+ description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
+ default_value: auto
+ required: false
+ - name: ebpf co-re tracing
+ description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
+ default_value: trampoline
+ required: false
+ - name: maps per core
+ description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
+ default_value: yes
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ - name: shmget
+ description: Enable or disable monitoring for syscall `shmget`
+ default_value: yes
+ required: false
+ - name: shmat
+ description: Enable or disable monitoring for syscall `shmat`
+ default_value: yes
+ required: false
+ - name: shmdt
+ description: Enable or disable monitoring for syscall `shmdt`
+ default_value: yes
+ required: false
+ - name: shmctl
+ description: Enable or disable monitoring for syscall `shmctl`
+ default_value: yes
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: cgroup
+ description: "These Metrics show grouped information per cgroup/service."
+ labels: []
+ metrics:
+ - name: cgroup.shmget
+ description: Calls to syscall <code>shmget(2)</code>.
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: get
+ - name: cgroup.shmat
+ description: Calls to syscall <code>shmat(2)</code>.
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: at
+ - name: cgroup.shmdt
+ description: Calls to syscall <code>shmdt(2)</code>.
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: dt
+ - name: cgroup.shmctl
+ description: Calls to syscall <code>shmctl(2)</code>.
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: ctl
+ - name: services.shmget
+ description: Calls to syscall <code>shmget(2)</code>.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.shmat
+ description: Calls to syscall <code>shmat(2)</code>.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.shmdt
+ description: Calls to syscall <code>shmdt(2)</code>.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.shmctl
+ description: Calls to syscall <code>shmctl(2)</code>.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: apps
+ description: "These Metrics show grouped information per apps group."
+ labels: []
+ metrics:
+ - name: apps.shmget_call
+ description: Calls to syscall <code>shmget(2)</code>.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.shmat_call
+ description: Calls to syscall <code>shmat(2)</code>.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.shmdt_call
+ description: Calls to syscall <code>shmdt(2)</code>.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.shmctl_call
+ description: Calls to syscall <code>shmctl(2)</code>.
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: global
+ description: "These Metrics show number of calls for specified syscall."
+ labels: []
+ metrics:
+ - name: system.shared_memory_calls
+ description: Calls to shared memory system calls
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: get
+ - name: at
+ - name: dt
+ - name: ctl
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: softirq
+ monitored_instance:
+ name: eBPF SoftIRQ
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - SoftIRQ
+ - eBPF
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor latency for each SoftIRQ available."
+ method_description: "Attach kprobe to internal kernel functions."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called."
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ - title: Debug Filesystem
+ description: |
+ This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
+ configuration:
+ file:
+ name: "ebpf.d/softirq.conf"
+ description: "Overwrite default configuration reducing number of I/O events."
+ options:
+ description: |
+ All options are defined inside section `[global]`.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "These metrics show latest timestamp for each softIRQ available on host."
+ labels: []
+ metrics:
+ - name: system.softirq_latency
+ description: Soft IRQ latency
+ unit: "milliseconds"
+ chart_type: stacked
+ dimensions:
+ - name: soft IRQs
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: mount
+ monitored_instance:
+ name: eBPF Mount
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - mount
+ - umount
+ - device
+ - eBPF
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor calls for mount and umount syscall."
+ method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT, CONFIG_HAVE_SYSCALL_TRACEPOINTS), files inside debugfs, and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ - title: Debug Filesystem
+ description: |
+ This thread needs to attach a tracepoint to monitor when a process schedule an exit event. To allow this specific feaure, it is necessary to mount `debugfs` (`mount -t debugfs none /sys/kernel/debug/`).`
+ configuration:
+ file:
+ name: "ebpf.d/mount.conf"
+ description: "Overwrite default configuration."
+ options:
+ description: |
+ All options are defined inside section `[global]`.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: ebpf type format
+ description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
+ default_value: auto
+ required: false
+ - name: ebpf co-re tracing
+ description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
+ default_value: trampoline
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "Calls for syscalls mount an umount."
+ labels: []
+ metrics:
+ - name: mount_points.call
+ description: Calls to mount and umount syscalls
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: mount
+ - name: umount
+ - name: mount_points.error
+ description: Errors to mount and umount file systems
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: mount
+ - name: umount
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: vfs
+ monitored_instance:
+ name: eBPF VFS
+ link: "https://kernel.org/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list:
+ - plugin_name: apps.plugin
+ module_name: apps
+ - plugin_name: cgroups.plugin
+ module_name: cgroups
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - virtual
+ - filesystem
+ - eBPF
+ - I/O
+ - files
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor I/O events on Linux Virtual Filesystem."
+ method_description: "Attach tracing (kprobe, trampoline) to internal kernel functions according options used to compile kernel."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: "The plugin needs setuid because it loads data inside kernel. Netada sets necessary permission during installation time."
+ default_behavior:
+ auto_detection:
+ description: "The plugin checks kernel compilation flags (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) and presence of BTF files to decide which eBPF program will be attached."
+ limits:
+ description: ""
+ performance_impact:
+ description: "This thread will add overhead every time that an internal kernel function monitored by this thread is called. The estimated additional period of time is between 90-200ms per call on kernels that do not have BTF technology."
+ setup:
+ prerequisites:
+ list:
+ - title: Compile kernel
+ description: |
+ Check if your kernel was compiled with necessary options (CONFIG_KPROBES, CONFIG_BPF, CONFIG_BPF_SYSCALL, CONFIG_BPF_JIT) in `/proc/config.gz` or inside /boot/config file. Some cited names can be different accoring preferences of Linux distributions.
+ When you do not have options set, it is necessary to get the kernel source code from https://kernel.org or a kernel package from your distribution, this last is preferred. The kernel compilation has a well definedd pattern, but distributions can deliver their configuration files
+ with different names.
+
+ Now follow steps:
+ 1. Copy the configuration file to /usr/src/linux/.config.
+ 2. Select the necessary options: make oldconfig
+ 3. Compile your kernel image: make bzImage
+ 4. Compile your modules: make modules
+ 5. Copy your new kernel image for boot loader directory
+ 6. Install the new modules: make modules_install
+ 7. Generate an initial ramdisk image (`initrd`) if it is necessary.
+ 8. Update your boot loader
+ configuration:
+ file:
+ name: "ebpf.d/vfs.conf"
+ description: "Overwrite default configuration helping to reduce memory usage."
+ options:
+ description: |
+ All options are defined inside section `[global]`.
+ folding:
+ title: "Config options"
+ enabled: true
+ list:
+ - name: update every
+ description: Data collection frequency.
+ default_value: 5
+ required: false
+ - name: ebpf load mode
+ description: Define whether plugin will monitor the call (`entry`) for the functions or it will also monitor the return (`return`).
+ default_value: entry
+ required: false
+ - name: apps
+ description: Enable or disable integration with apps.plugin
+ default_value: no
+ required: false
+ - name: cgroups
+ description: Enable or disable integration with cgroup.plugin
+ default_value: no
+ required: false
+ - name: pid table size
+ description: Number of elements stored inside hash tables used to monitor calls per PID.
+ default_value: 32768
+ required: false
+ - name: ebpf type format
+ description: "Define the file type to load an eBPF program. Three options are available: `legacy` (Attach only `kprobe`), `co-re` (Plugin tries to use `trampoline` when available), and `auto` (plugin check OS configuration before to load)."
+ default_value: auto
+ required: false
+ - name: ebpf co-re tracing
+ description: "Select the attach method used by plugin when `co-re` is defined in previous option. Two options are available: `trampoline` (Option with lowest overhead), and `probe` (the same of legacy code)."
+ default_value: trampoline
+ required: false
+ - name: maps per core
+ description: Define how plugin will load their hash maps. When enabled (`yes`) plugin will load one hash table per core, instead to have centralized information.
+ default_value: yes
+ required: false
+ - name: lifetime
+ description: Set default lifetime for thread when enabled by cloud.
+ default_value: 300
+ required: false
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: cgroup
+ description: "These Metrics show grouped information per cgroup/service."
+ labels: []
+ metrics:
+ - name: cgroup.vfs_unlink
+ description: Files deleted
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: delete
+ - name: cgroup.vfs_write
+ description: Write to disk
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: write
+ - name: cgroup.vfs_write_error
+ description: Fails to write
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: write
+ - name: cgroup.vfs_read
+ description: Read from disk
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: cgroup.vfs_read_error
+ description: Fails to read
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: cgroup.vfs_write_bytes
+ description: Bytes written on disk
+ unit: "bytes/s"
+ chart_type: line
+ dimensions:
+ - name: write
+ - name: cgroup.vfs_read_bytes
+ description: Bytes read from disk
+ unit: "bytes/s"
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: cgroup.vfs_fsync
+ description: Calls for <code>vfs_fsync</code>
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: fsync
+ - name: cgroup.vfs_fsync_error
+ description: Sync error
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: fsync
+ - name: cgroup.vfs_open
+ description: Calls for <code>vfs_open</code>
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: cgroup.vfs_open_error
+ description: Open error
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: cgroup.vfs_create
+ description: Calls for <code>vfs_create</code>
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: create
+ - name: cgroup.vfs_create_error
+ description: Create error
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: create
+ - name: services.vfs_unlink
+ description: Files deleted
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.vfs_write
+ description: Write to disk
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.vfs_write_error
+ description: Fails to write
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.vfs_read
+ description: Read from disk
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.vfs_read_error
+ description: Fails to read
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.vfs_write_bytes
+ description: Bytes written on disk
+ unit: "bytes/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.vfs_read_bytes
+ description: Bytes read from disk
+ unit: "bytes/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.vfs_fsync
+ description: Calls to <code>vfs_fsync</code>
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.vfs_fsync_error
+ description: Sync error
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.vfs_open
+ description: Calls to <code>vfs_open</code>
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.vfs_open_error
+ description: Open error
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.vfs_create
+ description: Calls to <code>vfs_create</code>
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: services.vfs_create_error
+ description: Create error
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per systemd service
+ - name: global
+ description: "These Metrics show grouped information per cgroup/service."
+ labels: []
+ metrics:
+ - name: filesystem.vfs_deleted_objects
+ description: Remove files
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: delete
+ - name: filesystem.vfs_io
+ description: Calls to IO
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: filesystem.vfs_io_bytes
+ description: Bytes written and read
+ unit: "bytes/s"
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: filesystem.vfs_io_error
+ description: Fails to write or read
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: read
+ - name: write
+ - name: filesystem.vfs_fsync
+ description: Calls for <code>vfs_fsync</code>
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: fsync
+ - name: filesystem.vfs_fsync_error
+ description: Fails to synchronize
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: fsync
+ - name: filesystem.vfs_open
+ description: Calls for <code>vfs_open</code>
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: filesystem.vfs_open_error
+ description: Fails to open a file
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: open
+ - name: filesystem.vfs_create
+ description: Calls for <code>vfs_create</code>
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: create
+ - name: filesystem.vfs_create_error
+ description: Fails to create a file.
+ unit: "calls/s"
+ chart_type: line
+ dimensions:
+ - name: create
+ - name: apps
+ description: "These Metrics show grouped information per apps group."
+ labels: []
+ metrics:
+ - name: apps.file_deleted
+ description: Files deleted
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.vfs_write_call
+ description: Write to disk
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.vfs_write_error
+ description: Fails to write
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.vfs_read_call
+ description: Read from disk
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.vfs_read_error
+ description: Fails to read
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.vfs_write_bytes
+ description: Bytes written on disk
+ unit: "bytes/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.vfs_read_bytes
+ description: Bytes read on disk
+ unit: "bytes/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.vfs_fsync
+ description: Calls for <code>vfs_fsync</code>
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.vfs_fsync_error
+ description: Sync error
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.vfs_open
+ description: Calls for <code>vfs_open</code>
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.vfs_open_error
+ description: Open error
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.vfs_create
+ description: Calls for <code>vfs_create</code>
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - name: apps.vfs_create_error
+ description: Create error
+ unit: "calls/s"
+ chart_type: stacked
+ dimensions:
+ - name: a dimension per app group
+ - meta:
+ plugin_name: ebpf.plugin
+ module_name: process
+ monitored_instance:
+ name: eBPF Process
+ link: "https://github.com/netdata/netdata/"
+ categories:
+ - data-collection.ebpf
+ icon_filename: "ebpf.jpg"
+ related_resources:
+ integrations:
+ list: []
+ info_provided_to_referring_integrations:
+ description: ""
+ keywords:
+ - Memory
+ - plugin
+ - eBPF
+ most_popular: false
+ overview:
+ data_collection:
+ metrics_description: "Monitor internal memory usage."
+ method_description: "Uses netdata internal statistic to monitor memory management by plugin."
+ supported_platforms:
+ include:
+ - Linux
+ exclude: []
+ multi_instance: true
+ additional_permissions:
+ description: ""
+ default_behavior:
+ auto_detection:
+ description: ""
+ limits:
+ description: ""
+ performance_impact:
+ description: ""
+ setup:
+ prerequisites:
+ list:
+ - title: Netdata flags.
+ description: "To have these charts you need to compile netdata with flag `NETDATA_DEV_MODE`."
+ configuration:
+ file:
+ name: ""
+ description: ""
+ options:
+ description: ""
+ folding:
+ title: ""
+ enabled: true
+ list: []
+ examples:
+ folding:
+ enabled: true
+ title: ""
+ list: []
+ troubleshooting:
+ problems:
+ list: []
+ alerts: []
+ metrics:
+ folding:
+ title: Metrics
+ enabled: false
+ description: ""
+ availability: []
+ scopes:
+ - name: global
+ description: "How plugin is allocating memory."
+ labels: []
+ metrics:
+ - name: netdata.ebpf_aral_stat_size
+ description: Bytes allocated for ARAL.
+ unit: "bytes"
+ chart_type: stacked
+ dimensions:
+ - name: memory
+ - name: netdata.ebpf_aral_stat_alloc
+ description: Calls to allocate memory.
+ unit: "calls"
+ chart_type: stacked
+ dimensions:
+ - name: aral
+ - name: netdata.ebpf_threads
+ description: Threads info
+ unit: "threads"
+ chart_type: line
+ dimensions:
+ - name: total
+ - name: running
+ - name: netdata.ebpf_load_methods
+ description: Load info
+ unit: "methods"
+ chart_type: line
+ dimensions:
+ - name: legacy
+ - name: co-re
+ - name: netdata.ebpf_kernel_memory
+ description: Memory allocated for hash tables.
+ unit: "bytes"
+ chart_type: line
+ dimensions:
+ - name: memory_locked
+ - name: netdata.ebpf_hash_tables_count
+ description: Number of hash tables loaded
+ unit: "hash tables"
+ chart_type: line
+ dimensions:
+ - name: hash_table
+ - name: netdata.ebpf_aral_stat_size
+ description: Bytes allocated for ARAL
+ unit: "bytes"
+ chart_type: stacked
+ dimensions:
+ - name: memory
+ - name: netdata.ebpf_aral_stat_alloc
+ description: Calls to allocate memory
+ unit: "calls"
+ chart_type: stacked
+ dimensions:
+ - name: aral
+ - name: netdata.ebpf_aral_stat_size
+ description: Bytes allocated for ARAL.
+ unit: "bytes"
+ chart_type: stacked
+ dimensions:
+ - name: memory
+ - name: netdata.ebpf_aral_stat_alloc
+ description: Calls to allocate memory
+ unit: "calls"
+ chart_type: stacked
+ dimensions:
+ - name: aral
+ - name: netdata.ebpf_hash_tables_insert_pid_elements
+ description: Number of times an element was inserted in a hash table.
+ unit: "rows"
+ chart_type: line
+ dimensions:
+ - name: thread
+ - name: netdata.ebpf_hash_tables_remove_pid_elements
+ description: Number of times an element was removed in a hash table.
+ unit: "rows"
+ chart_type: line
+ dimensions:
+ - name: thread
diff --git a/collectors/ebpf.plugin/metrics.csv b/collectors/ebpf.plugin/metrics.csv
deleted file mode 100644
index 5714c9767..000000000
--- a/collectors/ebpf.plugin/metrics.csv
+++ /dev/null
@@ -1,197 +0,0 @@
-metric,scope,dimensions,unit,description,chart_type,labels,plugin,module
-cgroup.fd_open,cgroup,open,calls/s,Number of open files,line,,ebpf.plugin,filedescriptor
-cgroup.fd_open_error,cgroup,open,calls/s,Fails to open files,line,,ebpf.plugin,filedescriptor
-cgroup.fd_closed,cgroup,close,calls/s,Files closed,line,,ebpf.plugin,filedescriptor
-cgroup.fd_close_error,cgroup,close,calls/s,Fails to close files,line,,ebpf.plugin,filedescriptor
-services.file_open,,a dimension per systemd service,calls/s,Number of open files,stacked,,ebpf.plugin,filedescriptor
-services.file_open_error,,a dimension per systemd service,calls/s,Fails to open files,stacked,,ebpf.plugin,filedescriptor
-services.file_closed,,a dimension per systemd service,calls/s,Files closed,stacked,,ebpf.plugin,filedescriptor
-services.file_close_error,,a dimension per systemd service,calls/s,Fails to close files,stacked,,ebpf.plugin,filedescriptor
-apps.file_open,,a dimension per app group,calls/s,Number of open files,stacked,,ebpf.plugin,filedescriptor
-apps.file_open_error,,a dimension per app group,calls/s,Fails to open files,stacked,,ebpf.plugin,filedescriptor
-apps.file_closed,,a dimension per app group,calls/s,Files closed,stacked,,ebpf.plugin,filedescriptor
-apps.file_close_error,,a dimension per app group,calls/s,Fails to close files,stacked,,ebpf.plugin,filedescriptor
-filesystem.file_descriptor,,"open, close",calls/s,Open and close calls,line,,ebpf.plugin,filedescriptor
-filesystem.file_error,,"open, close",calls/s,Open fails,line,,ebpf.plugin,filedescriptor
-system.process_thread,,process,calls/s,Start process,line,,ebpf.plugin,processes
-system.process_status,,"process, zombie",difference,Process not closed,line,,ebpf.plugin,processes
-system.exit,,process,calls/s,Exit process,line,,ebpf.plugin,processes
-system.task_error,,task,calls/s,Fails to create process,line,,ebpf.plugin,processes
-apps.process_create,,a dimension per app group,calls/s,Process started,stacked,,ebpf.plugin,processes
-apps.thread_create,,a dimension per app group,calls/s,Threads started,stacked,,ebpf.plugin,processes
-apps.task_exit,,a dimension per app group,calls/s,Tasks starts exit process,stacked,,ebpf.plugin,processes
-apps.task_close,,a dimension per app group,calls/s,Tasks closed,stacked,,ebpf.plugin,processes
-apps.task_error,,a dimension per app group,calls/s,Errors to create process or threads,stacked,,ebpf.plugin,processes
-cgroup.process_create,cgroup,process,calls/s,Process started,line,,ebpf.plugin,processes
-cgroup.thread_create,cgroup,thread,calls/s,Threads started,line,,ebpf.plugin,processes
-cgroup.task_exit,cgroup,exit,calls/s,Tasks starts exit process,line,,ebpf.plugin,processes
-cgroup.task_close,cgroup,process,calls/s,Tasks closed,line,,ebpf.plugin,processes
-cgroup.task_error,cgroup,process,calls/s,Errors to create process or threads,line,,ebpf.plugin,processes
-services.process_create,cgroup,a dimension per systemd service,calls/s,Process started,stacked,,ebpf.plugin,processes
-services.thread_create,cgroup,a dimension per systemd service,calls/s,Threads started,stacked,,ebpf.plugin,processes
-services.task_close,cgroup,a dimension per systemd service,calls/s,Tasks starts exit process,stacked,,ebpf.plugin,processes
-services.task_exit,cgroup,a dimension per systemd service,calls/s,Tasks closed,stacked,,ebpf.plugin,processes
-services.task_error,cgroup,a dimension per systemd service,calls/s,Errors to create process or threads,stacked,,ebpf.plugin,processes
-disk.latency_io,disk,latency,calls/s,Disk latency,stacked,,ebpf.plugin,disk
-system.hardirq_latency,,hardirq names,milisecondds,Hardware IRQ latency,stacked,,ebpf.plugin,hardirq
-apps.cachestat_ratio,,a dimension per app group,%,Hit ratio,line,,ebpf.plugin,cachestat
-apps.cachestat_dirties,,a dimension per app group,page/s,Number of dirty pages,stacked,,ebpf.plugin,cachestat
-apps.cachestat_hits,,a dimension per app group,hits/s,Number of accessed files,stacked,,ebpf.plugin,cachestat
-apps.cachestat_misses,,a dimension per app group,misses/s,Files out of page cache,stacked,,ebpf.plugin,cachestat
-services.cachestat_ratio,,a dimension per systemd service,%,Hit ratio,line,,ebpf.plugin,cachestat
-services.cachestat_dirties,,a dimension per systemd service,page/s,Number of dirty pages,line,,ebpf.plugin,cachestat
-services.cachestat_hits,,a dimension per systemd service,hits/s,Number of accessed files,line,,ebpf.plugin,cachestat
-services.cachestat_misses,,a dimension per systemd service,misses/s,Files out of page cache,line,,ebpf.plugin,cachestat
-cgroup.cachestat_ratio,cgroup,ratio,%,Hit ratio,line,,ebpf.plugin,cachestat
-cgroup.cachestat_dirties,cgroup,dirty,page/s,Number of dirty pages,line,,ebpf.plugin,cachestat
-cgroup.cachestat_hits,cgroup,hit,hits/s,Number of accessed files,line,,ebpf.plugin,cachestat
-cgroup.cachestat_misses,cgroup,miss,misses/s,Files out of page cache,line,,ebpf.plugin,cachestat
-mem.file_sync,,"fsync, fdatasync",calls/s,Monitor calls for <code>fsync(2)</code> and <code>fdatasync(2)</code>.,stacked,,ebpf.plugin,sync
-mem.meory_map,,msync,calls/s,Monitor calls for <code>msync(2)</code>.,line,,ebpf.plugin,sync
-mem.sync,,"sync, syncfs",calls/s,Monitor calls for <code>sync(2)</code> and <code>syncfs(2)</code>.,line,,ebpf.plugin,sync
-mem.file_segment,,sync_file_range,calls/s,Monitor calls for <code>sync_file_range(2)</code>.,line,,ebpf.plugin,sync
-mem.cachestat_ratio,,ratio,%,Hit ratio,line,,ebpf.plugin,cachestat
-mem.cachestat_dirties,,dirty,page/s,Number of dirty pages,line,,ebpf.plugin,cachestat
-mem.cachestat_hits,,hit,hits/s,Number of accessed files,line,,ebpf.plugin,cachestat
-mem.cachestat_misses,,miss,misses/s,Files out of page cache,line,,ebpf.plugin,cachestat
-mdstat.mdstat_flush,,disk,flushes,MD flushes,stacked,,ebpf.plugin,mdflush
-cgroup.swap_read,cgroup,read,calls/s,Calls to function <code>swap_readpage</code>.,line,,ebpf.plugin,swap
-cgroup.swap_write,cgroup,write,calls/s,Calls to function <code>swap_writepage</code>.,line,,ebpf.plugin,swap
-services.swap_read,,a dimension per systemd service,calls/s,Calls to <code>swap_readpage</code>.,stacked,,ebpf.plugin,swap
-services.swap_write,,a dimension per systemd service,calls/s,Calls to function <code>swap_writepage</code>.,stacked,,ebpf.plugin,swap
-apps.swap_read_call,,a dimension per app group,calls/s,Calls to function <code>swap_readpage</code>.,stacked,,ebpf.plugin,swap
-apps.swap_write_call,,a dimension per app group,calls/s,Calls to function <code>swap_writepage</code>.,stacked,,ebpf.plugin,swap
-system.swapcalls,,"write, read",calls/s,Calls to access swap memory,line,,ebpf.plugin,swap
-cgroup.oomkills,cgroup,cgroup name,kills,OOM kills. This chart is provided by eBPF plugin.,line,,ebpf.plugin,oomkill
-services.oomkills,,a dimension per systemd service,kills,OOM kills. This chart is provided by eBPF plugin.,line,,ebpf.plugin,oomkill
-apps.oomkills,,a dimension per app group,kills,OOM kills,stacked,,ebpf.plugin,oomkill
-ip.inbound_conn,,connection_tcp,connections/s,Inbound connections.,line,,ebpf.plugin,socket
-ip.tcp_outbound_conn,,received,connections/s,TCP outbound connections.,line,,ebpf.plugin,socket
-ip.tcp_functions,,"received, send, closed",calls/s,Calls to internal functions,line,,ebpf.plugin,socket
-ip.total_tcp_bandwidth,,"received, send",kilobits/s,TCP bandwidth,line,,ebpf.plugin,socket
-ip.tcp_error,,"received, send",calls/s,TCP errors,line,,ebpf.plugin,socket
-ip.tcp_retransmit,,retransmited,calls/s,Packages retransmitted,line,,ebpf.plugin,socket
-ip.udp_functions,,"received, send",calls/s,UDP calls,line,,ebpf.plugin,socket
-ip.total_udp_bandwidth,,"received, send",kilobits/s,UDP bandwidth,line,,ebpf.plugin,socket
-ip.udp_error,,"received, send",calls/s,UDP errors,line,,ebpf.plugin,socket
-apps.outbound_conn_v4,,a dimension per app group,connections/s,Calls to tcp_v4_connection,stacked,,ebpf.plugin,socket
-apps.outbound_conn_v6,,a dimension per app group,connections/s,Calls to tcp_v6_connection,stacked,,ebpf.plugin,socket
-apps.total_bandwidth_sent,,a dimension per app group,kilobits/s,Bytes sent,stacked,,ebpf.plugin,socket
-apps.total_bandwidth_recv,,a dimension per app group,kilobits/s,bytes received,stacked,,ebpf.plugin,socket
-apps.bandwidth_tcp_send,,a dimension per app group,calls/s,Calls for tcp_sendmsg,stacked,,ebpf.plugin,socket
-apps.bandwidth_tcp_recv,,a dimension per app group,calls/s,Calls for tcp_cleanup_rbuf,stacked,,ebpf.plugin,socket
-apps.bandwidth_tcp_retransmit,,a dimension per app group,calls/s,Calls for tcp_retransmit,stacked,,ebpf.plugin,socket
-apps.bandwidth_udp_send,,a dimension per app group,calls/s,Calls for udp_sendmsg,stacked,,ebpf.plugin,socket
-apps.bandwidth_udp_recv,,a dimension per app group,calls/s,Calls for udp_recvmsg,stacked,,ebpf.plugin,socket
-cgroup.net_conn_ipv4,cgroup,connected_v4,connections/s,Calls to tcp_v4_connection,line,,ebpf.plugin,socket
-cgroup.net_conn_ipv6,cgroup,connected_v6,connections/s,Calls to tcp_v6_connection,line,,ebpf.plugin,socket
-cgroup.net_bytes_recv,cgroup,received,calls/s,Bytes received,line,,ebpf.plugin,socket
-cgroup.net_bytes_sent,cgroup,sent,calls/s,Bytes sent,line,,ebpf.plugin,socket
-cgroup.net_tcp_recv,cgroup,received,calls/s,Calls to tcp_cleanup_rbuf.,line,,ebpf.plugin,socket
-cgroup.net_tcp_send,cgroup,sent,calls/s,Calls to tcp_sendmsg.,line,,ebpf.plugin,socket
-cgroup.net_retransmit,cgroup,retransmitted,calls/s,Calls to tcp_retransmit.,line,,ebpf.plugin,socket
-cgroup.net_udp_send,cgroup,sent,calls/s,Calls to udp_sendmsg,line,,ebpf.plugin,socket
-cgroup.net_udp_recv,cgroup,received,calls/s,Calls to udp_recvmsg,line,,ebpf.plugin,socket
-services.net_conn_ipv4,,a dimension per systemd service,connections/s,Calls to tcp_v4_connection,stacked,,ebpf.plugin,socket
-services.net_conn_ipv6,,a dimension per systemd service,connections/s,Calls to tcp_v6_connection,stacked,,ebpf.plugin,socket
-services.net_bytes_recv,,a dimension per systemd service,kilobits/s,Bytes received,stacked,,ebpf.plugin,socket
-services.net_bytes_sent,,a dimension per systemd service,kilobits/s,Bytes sent,stacked,,ebpf.plugin,socket
-services.net_tcp_recv,,a dimension per systemd service,calls/s,Calls to tcp_cleanup_rbuf.,stacked,,ebpf.plugin,socket
-services.net_tcp_send,,a dimension per systemd service,calls/s,Calls to tcp_sendmsg.,stacked,,ebpf.plugin,socket
-services.net_tcp_retransmit,,a dimension per systemd service,calls/s,Calls to tcp_retransmit,stacked,,ebpf.plugin,socket
-services.net_udp_send,,a dimension per systemd service,calls/s,Calls to udp_sendmsg,stacked,,ebpf.plugin,socket
-services.net_udp_recv,,a dimension per systemd service,calls/s,Calls to udp_recvmsg,stacked,,ebpf.plugin,socket
-apps.dc_ratio,,a dimension per app group,%,Percentage of files inside directory cache,line,,ebpf.plugin,dcstat
-apps.dc_reference,,a dimension per app group,files,Count file access,stacked,,ebpf.plugin,dcstat
-apps.dc_not_cache,,a dimension per app group,files,Files not present inside directory cache,stacked,,ebpf.plugin,dcstat
-apps.dc_not_found,,a dimension per app group,files,Files not found,stacked,,ebpf.plugin,dcstat
-cgroup.dc_ratio,cgroup,ratio,%,Percentage of files inside directory cache,line,,ebpf.plugin,dcstat
-cgroup.dc_reference,cgroup,reference,files,Count file access,line,,ebpf.plugin,dcstat
-cgroup.dc_not_cache,cgroup,slow,files,Files not present inside directory cache,line,,ebpf.plugin,dcstat
-cgroup.dc_not_found,cgroup,miss,files,Files not found,line,,ebpf.plugin,dcstat
-services.dc_ratio,,a dimension per systemd service,%,Percentage of files inside directory cache,line,,ebpf.plugin,dcstat
-services.dc_reference,,a dimension per systemd service,files,Count file access,line,,ebpf.plugin,dcstat
-services.dc_not_cache,,a dimension per systemd service,files,Files not present inside directory cache,line,,ebpf.plugin,dcstat
-services.dc_not_found,,a dimension per systemd service,files,Files not found,line,,ebpf.plugin,dcstat
-filesystem.dc_hit_ratio,,ratio,%,Percentage of files inside directory cache,line,,ebpf.plugin,dcstat
-filesystem.dc_reference,filesystem,"reference, slow, miss",files,Variables used to calculate hit ratio.,line,,ebpf.plugin,dcstat
-filesystem.read_latency,filesystem,latency period,calls/s,ext4 latency for each read request.,stacked,,ebpf.plugin,filesystem
-filesystem.write_latency,iilesystem,latency period,calls/s,ext4 latency for each write request.,stacked,,ebpf.plugin,filesystem
-filesystem.open_latency,filesystem,latency period,calls/s,ext4 latency for each open request.,stacked,,ebpf.plugin,filesystem
-filesystem.sync_latency,filesystem,latency period,calls/s,ext4 latency for each sync request.,stacked,,ebpf.plugin,filesystem
-filesystem.attributte_latency,,latency period,calls/s,nfs latency for each attribute request.,stacked,,ebpf.plugin,filesystem
-cgroup.shmget,cgroup,get,calls/s,Calls to syscall <code>shmget(2)</code>.,line,,ebpf.plugin,shm
-cgroup.shmat,cgroup,at,calls/s,Calls to syscall <code>shmat(2)</code>.,line,,ebpf.plugin,shm
-cgroup.shmdt,cgroup,dt,calls/s,Calls to syscall <code>shmdt(2)</code>.,line,,ebpf.plugin,shm
-cgroup.shmctl,cgroup,ctl,calls/s,Calls to syscall <code>shmctl(2)</code>.,line,,ebpf.plugin,shm
-services.shmget,,a dimension per systemd service,calls/s,Calls to syscall <code>shmget(2)</code>.,stacked,,ebpf.plugin,shm
-services.shmat,,a dimension per systemd service,calls/s,Calls to syscall <code>shmat(2)</code>.,stacked,,ebpf.plugin,shm
-services.shmdt,,a dimension per systemd service,calls/s,Calls to syscall <code>shmdt(2)</code>.,stacked,,ebpf.plugin,shm
-services.shmctl,,a dimension per systemd service,calls/s,Calls to syscall <code>shmctl(2)</code>.,stacked,,ebpf.plugin,shm
-apps.shmget_call,,a dimension per app group,calls/s,Calls to syscall <code>shmget(2)</code>.,stacked,,ebpf.plugin,shm
-apps.shmat_call,,a dimension per app group,calls/s,Calls to syscall <code>shmat(2)</code>.,stacked,,ebpf.plugin,shm
-apps.shmdt_call,,a dimension per app group,calls/s,Calls to syscall <code>shmdt(2)</code>.,stacked,,ebpf.plugin,shm
-apps.shmctl_call,,a dimension per app group,calls/s,Calls to syscall <code>shmctl(2)</code>.,stacked,,ebpf.plugin,shm
-system.shared_memory_calls,,"get, at, dt, ctl",calls/s,Calls to shared memory system calls,line,,ebpf.plugin,shm
-system.softirq_latency,,soft IRQs,miliseconds,Software IRQ latency,stacked,,ebpf.plugin,softirq
-mount_points.call,,"mount, umount",calls/s,Calls to mount and umount syscalls,line,,ebpf.plugin,mount
-mount_points.error,,"mount, umount",calls/s,Errors to mount and umount file systems,line,,ebpf.plugin,mount
-cgroup.vfs_unlink,cgroup,delete,calls/s,Files deleted,line,,ebpf.plugin,vfs
-cgroup.vfs_write,cgroup,write,calls/s,Write to disk,line,,ebpf.plugin,vfs
-cgroup.vfs_write_error,cgroup,write,calls/s,Fails to write,line,,ebpf.plugin,vfs
-cgroup.vfs_read,cgroup,read,calls/s,Read from disk,line,,ebpf.plugin,vfs
-cgroup.vfs_read_error,cgroup,read,calls/s,Fails to read,line,,ebpf.plugin,vfs
-cgroup.vfs_write_bytes,cgroup,write,bytes/s,Bytes written on disk,line,,ebpf.plugin,vfs
-cgroup.vfs_read_bytes,cgroup,read,bytes/s,Bytes read from disk,line,,ebpf.plugin,vfs
-cgroup.vfs_fsync,cgroup,fsync,calls/s,Calls for <code>vfs_fsync</code>,line,,ebpf.plugin,vfs
-cgroup.vfs_fsync_error,cgroup,fsync,calls/s,Sync error,line,,ebpf.plugin,vfs
-cgroup.vfs_open,cgroup,open,calls/s,Calls for <code>vfs_open</code>,line,,ebpf.plugin,vfs
-cgroup.vfs_open_error,cgroup,open,calls/s,Open error,line,,ebpf.plugin,vfs
-cgroup.vfs_create,cgroup,create,calls/s,Calls for <code>vfs_create</code>,line,,ebpf.plugin,vfs
-cgroup.vfs_create_error,cgroup,create,calls/s,Create error,line,,ebpf.plugin,vfs
-services.vfs_unlink,,a dimension per systemd service,calls/s,Files deleted,stacked,,ebpf.plugin,vfs
-services.vfs_write,,a dimension per systemd service,calls/s,Write to disk,stacked,,ebpf.plugin,vfs
-services.vfs_write_error,,a dimension per systemd service,calls/s,Fails to write,stacked,,ebpf.plugin,vfs
-services.vfs_read,,a dimension per systemd service,calls/s,Read from disk,stacked,,ebpf.plugin,vfs
-services.vfs_read_error,,a dimension per systemd service,calls/s,Fails to read,stacked,,ebpf.plugin,vfs
-services.vfs_write_bytes,,a dimension per systemd service,bytes/s,Bytes written on disk,stacked,,ebpf.plugin,vfs
-services.vfs_read_bytes,,a dimension per systemd service,bytes/s,Bytes read from disk,stacked,,ebpf.plugin,vfs
-services.vfs_fsync,,a dimension per systemd service,calls/s,Calls to <code>vfs_fsync</code>,stacked,,ebpf.plugin,vfs
-services.vfs_fsync_error,,a dimension per systemd service,calls/s,Sync error,stacked,,ebpf.plugin,vfs
-services.vfs_open,,a dimension per systemd service,calls/s,Calls to <code>vfs_open</code>,stacked,,ebpf.plugin,vfs
-services.vfs_open_error,,a dimension per systemd service,calls/s,Open error,stacked,,ebpf.plugin,vfs
-services.vfs_create,,a dimension per systemd service,calls/s,Calls to <code>vfs_create</code>,stacked,,ebpf.plugin,vfs
-services.vfs_create_error,,a dimension per systemd service,calls/s,Create error,stacked,,ebpf.plugin,vfs
-filesystem.vfs_deleted_objects,,delete,calls/s,Remove files,line,,ebpf.plugin,vfs
-filesystem.vfs_io,,"read, write",calls/s,Calls to IO,line,,ebpf.plugin,vfs
-filesystem.vfs_io_bytes,,"read, write",bytes/s,Bytes written and read,line,,ebpf.plugin,vfs
-filesystem.vfs_io_error,,"read, write",calls/s,Fails to write or read,line,,ebpf.plugin,vfs
-filesystem.vfs_fsync,,fsync,calls/s,Calls for <code>vfs_fsync</code>,line,,ebpf.plugin,vfs
-filesystem.vfs_fsync_error,,fsync,calls/s,Fails to synchronize,line,,ebpf.plugin,vfs
-filesystem.vfs_open,,open,calls/s,Calls for <code>vfs_open</code>,line,,ebpf.plugin,vfs
-filesystem.vfs_open_error,,open,calls/s,Fails to open a file,line,,ebpf.plugin,vfs
-filesystem.vfs_create,,create,calls/s,Calls for <code>vfs_create</code>,line,,ebpf.plugin,vfs
-filesystem.vfs_create_error,,create,calls/s,Fails to create a file.,line,,ebpf.plugin,vfs
-apps.file_deleted,,a dimension per app group,calls/s,Files deleted,stacked,,ebpf.plugin,vfs
-apps.vfs_write_call,,a dimension per app group,calls/s,Write to disk,stacked,,ebpf.plugin,vfs
-apps.vfs_write_error,,a dimension per app group,calls/s,Fails to write,stacked,,ebpf.plugin,vfs
-apps.vfs_read_call,,a dimension per app group,calls/s,Read from disk,stacked,,ebpf.plugin,vfs
-apps.vfs_read_error,,a dimension per app group,calls/s,Fails to read,stacked,,ebpf.plugin,vfs
-apps.vfs_write_bytes,,a dimension per app group,bytes/s,Bytes written on disk,stacked,,ebpf.plugin,vfs
-apps.vfs_read_bytes,,a dimension per app group,bytes/s,Bytes read on disk,stacked,,ebpf.plugin,vfs
-apps.vfs_fsync,,a dimension per app group,calls/s,Calls for <code>vfs_fsync</code>,stacked,,ebpf.plugin,vfs
-apps.vfs_fsync_error,,a dimension per app group,calls/s,Sync error,stacked,,ebpf.plugin,vfs
-apps.vfs_open,,a dimension per app group,calls/s,Calls for <code>vfs_open</code>,stacked,,ebpf.plugin,vfs
-apps.vfs_open_error,,a dimension per app group,calls/s,Open error,stacked,,ebpf.plugin,vfs
-apps.vfs_create,,a dimension per app group,calls/s,Calls for <code>vfs_create</code>,stacked,,ebpf.plugin,vfs
-apps.vfs_create_error,,a dimension per app group,calls/s,Create error,stacked,,ebpf.plugin,vfs
-netdata.ebpf_aral_stat_size,,memory,bytes,Bytes allocated for ARAL.,stacked,,ebpf.plugin,process
-netdata.ebpf_aral_stat_alloc,,aral,calls,Calls to allocate memory.,stacked,,ebpf.plugin,process
-netdata.ebpf_threads,,"total, running",threads,Threads info,line,,ebpf.plugin,process
-netdata.ebpf_load_methods,,"legacy, co-re",methods,Load info,line,,ebpf.plugin,process
-netdata.ebpf_kernel_memory,,memory_locked,bytes,Memory allocated for hash tables.,line,,ebpf.plugin,process
-netdata.ebpf_hash_tables_count,,hash_table,hash tables,Number of hash tables loaded,line,,ebpf.plugin,process
-netdata.ebpf_aral_stat_size,,memory,bytes,Bytes allocated for ARAL,stacked,,ebpf.plugin,process
-netdata.ebpf_aral_stat_alloc,,aral,calls,Calls to allocate memory,stacked,,ebpf.plugin,process
-netdata.ebpf_aral_stat_size,,memory,bytes,Bytes allocated for ARAL.,stacked,,ebpf.plugin,process
-netdata.ebpf_aral_stat_alloc,,aral,calls,Calls to allocate memory,stacked,,ebpf.plugin,process
diff --git a/collectors/ebpf.plugin/multi_metadata.yaml b/collectors/ebpf.plugin/multi_metadata.yaml
deleted file mode 100644
index 9a31a4037..000000000
--- a/collectors/ebpf.plugin/multi_metadata.yaml
+++ /dev/null
@@ -1,2360 +0,0 @@
-name: ebpf.plugin
-modules:
- - meta:
- plugin_name: ebpf.plugin
- module_name: filedescriptor
- monitored_instance:
- name: ebpf filedescriptor
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: ""
- labels: []
- metrics:
- - name: cgroup.fd_open
- description: Number of open files
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: cgroup.fd_open_error
- description: Fails to open files
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: cgroup.fd_closed
- description: Files closed
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: close
- - name: cgroup.fd_close_error
- description: Fails to close files
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: close
- - name: global
- description: ""
- labels: []
- metrics:
- - name: services.file_open
- description: Number of open files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.file_open_error
- description: Fails to open files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.file_closed
- description: Files closed
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.file_close_error
- description: Fails to close files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: apps.file_open
- description: Number of open files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.file_open_error
- description: Fails to open files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.file_closed
- description: Files closed
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.file_close_error
- description: Fails to close files
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: filesystem.file_descriptor
- description: Open and close calls
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: close
- - name: filesystem.file_error
- description: Open fails
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: close
- - meta:
- plugin_name: ebpf.plugin
- module_name: processes
- monitored_instance:
- name: ebpf processes
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.process_thread
- description: Start process
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: process
- - name: system.process_status
- description: Process not closed
- unit: "difference"
- chart_type: line
- dimensions:
- - name: process
- - name: zombie
- - name: system.exit
- description: Exit process
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: process
- - name: system.task_error
- description: Fails to create process
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: task
- - name: apps.process_create
- description: Process started
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.thread_create
- description: Threads started
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.task_exit
- description: Tasks starts exit process
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.task_close
- description: Tasks closed
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.task_error
- description: Errors to create process or threads
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: cgroup
- description: ""
- labels: []
- metrics:
- - name: cgroup.process_create
- description: Process started
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: process
- - name: cgroup.thread_create
- description: Threads started
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: thread
- - name: cgroup.task_exit
- description: Tasks starts exit process
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: exit
- - name: cgroup.task_close
- description: Tasks closed
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: process
- - name: cgroup.task_error
- description: Errors to create process or threads
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: process
- - name: services.process_create
- description: Process started
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.thread_create
- description: Threads started
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.task_close
- description: Tasks starts exit process
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.task_exit
- description: Tasks closed
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.task_error
- description: Errors to create process or threads
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - meta:
- plugin_name: ebpf.plugin
- module_name: disk
- monitored_instance:
- name: ebpf disk
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: disk
- description: ""
- labels: []
- metrics:
- - name: disk.latency_io
- description: Disk latency
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency
- - meta:
- plugin_name: ebpf.plugin
- module_name: hardirq
- monitored_instance:
- name: ebpf hardirq
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.hardirq_latency
- description: Hardware IRQ latency
- unit: "milisecondds"
- chart_type: stacked
- dimensions:
- - name: hardirq names
- - meta:
- plugin_name: ebpf.plugin
- module_name: cachestat
- monitored_instance:
- name: ebpf cachestat
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: apps.cachestat_ratio
- description: Hit ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: a dimension per app group
- - name: apps.cachestat_dirties
- description: Number of dirty pages
- unit: "page/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.cachestat_hits
- description: Number of accessed files
- unit: "hits/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.cachestat_misses
- description: Files out of page cache
- unit: "misses/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: services.cachestat_ratio
- description: Hit ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.cachestat_dirties
- description: Number of dirty pages
- unit: "page/s"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.cachestat_hits
- description: Number of accessed files
- unit: "hits/s"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.cachestat_misses
- description: Files out of page cache
- unit: "misses/s"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: mem.cachestat_ratio
- description: Hit ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: mem.cachestat_dirties
- description: Number of dirty pages
- unit: "page/s"
- chart_type: line
- dimensions:
- - name: dirty
- - name: mem.cachestat_hits
- description: Number of accessed files
- unit: "hits/s"
- chart_type: line
- dimensions:
- - name: hit
- - name: mem.cachestat_misses
- description: Files out of page cache
- unit: "misses/s"
- chart_type: line
- dimensions:
- - name: miss
- - name: cgroup
- description: ""
- labels: []
- metrics:
- - name: cgroup.cachestat_ratio
- description: Hit ratio
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: cgroup.cachestat_dirties
- description: Number of dirty pages
- unit: "page/s"
- chart_type: line
- dimensions:
- - name: dirty
- - name: cgroup.cachestat_hits
- description: Number of accessed files
- unit: "hits/s"
- chart_type: line
- dimensions:
- - name: hit
- - name: cgroup.cachestat_misses
- description: Files out of page cache
- unit: "misses/s"
- chart_type: line
- dimensions:
- - name: miss
- - meta:
- plugin_name: ebpf.plugin
- module_name: sync
- monitored_instance:
- name: ebpf sync
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts:
- - name: sync_freq
- link: https://github.com/netdata/netdata/blob/master/health/health.d/synchronization.conf
- metric: mem.sync
- info: number of sync() system calls. Every call causes all pending modifications to filesystem metadata and cached file data to be written to the underlying filesystems.
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: mem.file_sync
- description: Monitor calls for <code>fsync(2)</code> and <code>fdatasync(2)</code>.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: fsync
- - name: fdatasync
- - name: mem.meory_map
- description: Monitor calls for <code>msync(2)</code>.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: msync
- - name: mem.sync
- description: Monitor calls for <code>sync(2)</code> and <code>syncfs(2)</code>.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: sync
- - name: syncfs
- - name: mem.file_segment
- description: Monitor calls for <code>sync_file_range(2)</code>.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: sync_file_range
- - meta:
- plugin_name: ebpf.plugin
- module_name: mdflush
- monitored_instance:
- name: ebpf mdflush
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: mdstat.mdstat_flush
- description: MD flushes
- unit: "flushes"
- chart_type: stacked
- dimensions:
- - name: disk
- - meta:
- plugin_name: ebpf.plugin
- module_name: swap
- monitored_instance:
- name: ebpf swap
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: ""
- labels: []
- metrics:
- - name: cgroup.swap_read
- description: Calls to function <code>swap_readpage</code>.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: cgroup.swap_write
- description: Calls to function <code>swap_writepage</code>.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: write
- - name: global
- description: ""
- labels: []
- metrics:
- - name: services.swap_read
- description: Calls to <code>swap_readpage</code>.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.swap_write
- description: Calls to function <code>swap_writepage</code>.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: apps.swap_read_call
- description: Calls to function <code>swap_readpage</code>.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.swap_write_call
- description: Calls to function <code>swap_writepage</code>.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: system.swapcalls
- description: Calls to access swap memory
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: write
- - name: read
- - meta:
- plugin_name: ebpf.plugin
- module_name: oomkill
- monitored_instance:
- name: ebpf oomkill
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: ""
- labels: []
- metrics:
- - name: cgroup.oomkills
- description: OOM kills. This chart is provided by eBPF plugin.
- unit: "kills"
- chart_type: line
- dimensions:
- - name: cgroup name
- - name: global
- description: ""
- labels: []
- metrics:
- - name: services.oomkills
- description: OOM kills. This chart is provided by eBPF plugin.
- unit: "kills"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: apps.oomkills
- description: OOM kills
- unit: "kills"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - meta:
- plugin_name: ebpf.plugin
- module_name: socket
- monitored_instance:
- name: ebpf socket
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: ip.inbound_conn
- description: Inbound connections.
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: connection_tcp
- - name: ip.tcp_outbound_conn
- description: TCP outbound connections.
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: received
- - name: ip.tcp_functions
- description: Calls to internal functions
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: closed
- - name: ip.total_tcp_bandwidth
- description: TCP bandwidth
- unit: "kilobits/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: ip.tcp_error
- description: TCP errors
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: ip.tcp_retransmit
- description: Packages retransmitted
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: retransmited
- - name: ip.udp_functions
- description: UDP calls
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: ip.total_udp_bandwidth
- description: UDP bandwidth
- unit: "kilobits/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: ip.udp_error
- description: UDP errors
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: send
- - name: apps.outbound_conn_v4
- description: Calls to tcp_v4_connection
- unit: "connections/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.outbound_conn_v6
- description: Calls to tcp_v6_connection
- unit: "connections/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.total_bandwidth_sent
- description: Bytes sent
- unit: "kilobits/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.total_bandwidth_recv
- description: bytes received
- unit: "kilobits/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.bandwidth_tcp_send
- description: Calls for tcp_sendmsg
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.bandwidth_tcp_recv
- description: Calls for tcp_cleanup_rbuf
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.bandwidth_tcp_retransmit
- description: Calls for tcp_retransmit
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.bandwidth_udp_send
- description: Calls for udp_sendmsg
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.bandwidth_udp_recv
- description: Calls for udp_recvmsg
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: services.net_conn_ipv4
- description: Calls to tcp_v4_connection
- unit: "connections/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_conn_ipv6
- description: Calls to tcp_v6_connection
- unit: "connections/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_bytes_recv
- description: Bytes received
- unit: "kilobits/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_bytes_sent
- description: Bytes sent
- unit: "kilobits/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_tcp_recv
- description: Calls to tcp_cleanup_rbuf.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_tcp_send
- description: Calls to tcp_sendmsg.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_tcp_retransmit
- description: Calls to tcp_retransmit
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_udp_send
- description: Calls to udp_sendmsg
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.net_udp_recv
- description: Calls to udp_recvmsg
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: cgroup
- description: ""
- labels: []
- metrics:
- - name: cgroup.net_conn_ipv4
- description: Calls to tcp_v4_connection
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: connected_v4
- - name: cgroup.net_conn_ipv6
- description: Calls to tcp_v6_connection
- unit: "connections/s"
- chart_type: line
- dimensions:
- - name: connected_v6
- - name: cgroup.net_bytes_recv
- description: Bytes received
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: cgroup.net_bytes_sent
- description: Bytes sent
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: cgroup.net_tcp_recv
- description: Calls to tcp_cleanup_rbuf.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - name: cgroup.net_tcp_send
- description: Calls to tcp_sendmsg.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: cgroup.net_retransmit
- description: Calls to tcp_retransmit.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: retransmitted
- - name: cgroup.net_udp_send
- description: Calls to udp_sendmsg
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: sent
- - name: cgroup.net_udp_recv
- description: Calls to udp_recvmsg
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: received
- - meta:
- plugin_name: ebpf.plugin
- module_name: dcstat
- monitored_instance:
- name: ebpf dcstat
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: apps.dc_ratio
- description: Percentage of files inside directory cache
- unit: "%"
- chart_type: line
- dimensions:
- - name: a dimension per app group
- - name: apps.dc_reference
- description: Count file access
- unit: "files"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.dc_not_cache
- description: Files not present inside directory cache
- unit: "files"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.dc_not_found
- description: Files not found
- unit: "files"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: services.dc_ratio
- description: Percentage of files inside directory cache
- unit: "%"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.dc_reference
- description: Count file access
- unit: "files"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.dc_not_cache
- description: Files not present inside directory cache
- unit: "files"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: services.dc_not_found
- description: Files not found
- unit: "files"
- chart_type: line
- dimensions:
- - name: a dimension per systemd service
- - name: filesystem.dc_hit_ratio
- description: Percentage of files inside directory cache
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: cgroup
- description: ""
- labels: []
- metrics:
- - name: cgroup.dc_ratio
- description: Percentage of files inside directory cache
- unit: "%"
- chart_type: line
- dimensions:
- - name: ratio
- - name: cgroup.dc_reference
- description: Count file access
- unit: "files"
- chart_type: line
- dimensions:
- - name: reference
- - name: cgroup.dc_not_cache
- description: Files not present inside directory cache
- unit: "files"
- chart_type: line
- dimensions:
- - name: slow
- - name: cgroup.dc_not_found
- description: Files not found
- unit: "files"
- chart_type: line
- dimensions:
- - name: miss
- - name: filesystem
- description: ""
- labels: []
- metrics:
- - name: filesystem.dc_reference
- description: Variables used to calculate hit ratio.
- unit: "files"
- chart_type: line
- dimensions:
- - name: reference
- - name: slow
- - name: miss
- - meta:
- plugin_name: ebpf.plugin
- module_name: filesystem
- monitored_instance:
- name: ebpf filesystem
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: filesystem
- description: ""
- labels: []
- metrics:
- - name: filesystem.read_latency
- description: ext4 latency for each read request.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency period
- - name: filesystem.open_latency
- description: ext4 latency for each open request.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency period
- - name: filesystem.sync_latency
- description: ext4 latency for each sync request.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency period
- - name: iilesystem
- description: ""
- labels: []
- metrics:
- - name: filesystem.write_latency
- description: ext4 latency for each write request.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency period
- - name: global
- description: ""
- labels: []
- metrics:
- - name: filesystem.attributte_latency
- description: nfs latency for each attribute request.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: latency period
- - meta:
- plugin_name: ebpf.plugin
- module_name: shm
- monitored_instance:
- name: ebpf shm
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: ""
- labels: []
- metrics:
- - name: cgroup.shmget
- description: Calls to syscall <code>shmget(2)</code>.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: get
- - name: cgroup.shmat
- description: Calls to syscall <code>shmat(2)</code>.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: at
- - name: cgroup.shmdt
- description: Calls to syscall <code>shmdt(2)</code>.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: dt
- - name: cgroup.shmctl
- description: Calls to syscall <code>shmctl(2)</code>.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: ctl
- - name: global
- description: ""
- labels: []
- metrics:
- - name: services.shmget
- description: Calls to syscall <code>shmget(2)</code>.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.shmat
- description: Calls to syscall <code>shmat(2)</code>.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.shmdt
- description: Calls to syscall <code>shmdt(2)</code>.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.shmctl
- description: Calls to syscall <code>shmctl(2)</code>.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: apps.shmget_call
- description: Calls to syscall <code>shmget(2)</code>.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.shmat_call
- description: Calls to syscall <code>shmat(2)</code>.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.shmdt_call
- description: Calls to syscall <code>shmdt(2)</code>.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.shmctl_call
- description: Calls to syscall <code>shmctl(2)</code>.
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: system.shared_memory_calls
- description: Calls to shared memory system calls
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: get
- - name: at
- - name: dt
- - name: ctl
- - meta:
- plugin_name: ebpf.plugin
- module_name: softirq
- monitored_instance:
- name: ebpf softirq
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: system.softirq_latency
- description: Software IRQ latency
- unit: "miliseconds"
- chart_type: stacked
- dimensions:
- - name: soft IRQs
- - meta:
- plugin_name: ebpf.plugin
- module_name: mount
- monitored_instance:
- name: ebpf mount
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: mount_points.call
- description: Calls to mount and umount syscalls
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: mount
- - name: umount
- - name: mount_points.error
- description: Errors to mount and umount file systems
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: mount
- - name: umount
- - meta:
- plugin_name: ebpf.plugin
- module_name: vfs
- monitored_instance:
- name: ebpf vfs
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: cgroup
- description: ""
- labels: []
- metrics:
- - name: cgroup.vfs_unlink
- description: Files deleted
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: delete
- - name: cgroup.vfs_write
- description: Write to disk
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: write
- - name: cgroup.vfs_write_error
- description: Fails to write
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: write
- - name: cgroup.vfs_read
- description: Read from disk
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: cgroup.vfs_read_error
- description: Fails to read
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: cgroup.vfs_write_bytes
- description: Bytes written on disk
- unit: "bytes/s"
- chart_type: line
- dimensions:
- - name: write
- - name: cgroup.vfs_read_bytes
- description: Bytes read from disk
- unit: "bytes/s"
- chart_type: line
- dimensions:
- - name: read
- - name: cgroup.vfs_fsync
- description: Calls for <code>vfs_fsync</code>
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: fsync
- - name: cgroup.vfs_fsync_error
- description: Sync error
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: fsync
- - name: cgroup.vfs_open
- description: Calls for <code>vfs_open</code>
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: cgroup.vfs_open_error
- description: Open error
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: cgroup.vfs_create
- description: Calls for <code>vfs_create</code>
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: create
- - name: cgroup.vfs_create_error
- description: Create error
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: create
- - name: global
- description: ""
- labels: []
- metrics:
- - name: services.vfs_unlink
- description: Files deleted
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_write
- description: Write to disk
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_write_error
- description: Fails to write
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_read
- description: Read from disk
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_read_error
- description: Fails to read
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_write_bytes
- description: Bytes written on disk
- unit: "bytes/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_read_bytes
- description: Bytes read from disk
- unit: "bytes/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_fsync
- description: Calls to <code>vfs_fsync</code>
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_fsync_error
- description: Sync error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_open
- description: Calls to <code>vfs_open</code>
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_open_error
- description: Open error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_create
- description: Calls to <code>vfs_create</code>
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: services.vfs_create_error
- description: Create error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per systemd service
- - name: filesystem.vfs_deleted_objects
- description: Remove files
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: delete
- - name: filesystem.vfs_io
- description: Calls to IO
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: filesystem.vfs_io_bytes
- description: Bytes written and read
- unit: "bytes/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: filesystem.vfs_io_error
- description: Fails to write or read
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: read
- - name: write
- - name: filesystem.vfs_fsync
- description: Calls for <code>vfs_fsync</code>
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: fsync
- - name: filesystem.vfs_fsync_error
- description: Fails to synchronize
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: fsync
- - name: filesystem.vfs_open
- description: Calls for <code>vfs_open</code>
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: filesystem.vfs_open_error
- description: Fails to open a file
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: open
- - name: filesystem.vfs_create
- description: Calls for <code>vfs_create</code>
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: create
- - name: filesystem.vfs_create_error
- description: Fails to create a file.
- unit: "calls/s"
- chart_type: line
- dimensions:
- - name: create
- - name: apps.file_deleted
- description: Files deleted
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.vfs_write_call
- description: Write to disk
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.vfs_write_error
- description: Fails to write
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.vfs_read_call
- description: Read from disk
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.vfs_read_error
- description: Fails to read
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.vfs_write_bytes
- description: Bytes written on disk
- unit: "bytes/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.vfs_read_bytes
- description: Bytes read on disk
- unit: "bytes/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.vfs_fsync
- description: Calls for <code>vfs_fsync</code>
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.vfs_fsync_error
- description: Sync error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.vfs_open
- description: Calls for <code>vfs_open</code>
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.vfs_open_error
- description: Open error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.vfs_create
- description: Calls for <code>vfs_create</code>
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - name: apps.vfs_create_error
- description: Create error
- unit: "calls/s"
- chart_type: stacked
- dimensions:
- - name: a dimension per app group
- - meta:
- plugin_name: ebpf.plugin
- module_name: process
- monitored_instance:
- name: ebpf process
- link: ''
- categories: []
- icon_filename: ''
- related_resources:
- integrations:
- list: []
- info_provided_to_referring_integrations:
- description: ''
- keywords: []
- most_popular: false
- overview:
- data_collection:
- metrics_description: ''
- method_description: ''
- supported_platforms:
- include: []
- exclude: []
- multi-instance: true
- additional_permissions:
- description: ''
- default_behavior:
- auto_detection:
- description: ''
- limits:
- description: ''
- performance_impact:
- description: ''
- setup:
- prerequisites:
- list: []
- configuration:
- file:
- name: ''
- description: ''
- options:
- description: ''
- folding:
- title: ''
- enabled: true
- list: []
- examples:
- folding:
- enabled: true
- title: ''
- list: []
- troubleshooting:
- problems:
- list: []
- alerts: []
- metrics:
- folding:
- title: Metrics
- enabled: false
- description: ""
- availability: []
- scopes:
- - name: global
- description: ""
- labels: []
- metrics:
- - name: netdata.ebpf_aral_stat_size
- description: Bytes allocated for ARAL.
- unit: "bytes"
- chart_type: stacked
- dimensions:
- - name: memory
- - name: netdata.ebpf_aral_stat_alloc
- description: Calls to allocate memory.
- unit: "calls"
- chart_type: stacked
- dimensions:
- - name: aral
- - name: netdata.ebpf_threads
- description: Threads info
- unit: "threads"
- chart_type: line
- dimensions:
- - name: total
- - name: running
- - name: netdata.ebpf_load_methods
- description: Load info
- unit: "methods"
- chart_type: line
- dimensions:
- - name: legacy
- - name: co-re
- - name: netdata.ebpf_kernel_memory
- description: Memory allocated for hash tables.
- unit: "bytes"
- chart_type: line
- dimensions:
- - name: memory_locked
- - name: netdata.ebpf_hash_tables_count
- description: Number of hash tables loaded
- unit: "hash tables"
- chart_type: line
- dimensions:
- - name: hash_table
- - name: netdata.ebpf_aral_stat_size
- description: Bytes allocated for ARAL
- unit: "bytes"
- chart_type: stacked
- dimensions:
- - name: memory
- - name: netdata.ebpf_aral_stat_alloc
- description: Calls to allocate memory
- unit: "calls"
- chart_type: stacked
- dimensions:
- - name: aral
- - name: netdata.ebpf_aral_stat_size
- description: Bytes allocated for ARAL.
- unit: "bytes"
- chart_type: stacked
- dimensions:
- - name: memory
- - name: netdata.ebpf_aral_stat_alloc
- description: Calls to allocate memory
- unit: "calls"
- chart_type: stacked
- dimensions:
- - name: aral